aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2009-04-05 02:14:15 -0400
committerLen Brown <len.brown@intel.com>2009-04-05 02:14:15 -0400
commit478c6a43fcbc6c11609f8cee7c7b57223907754f (patch)
treea7f7952099da60d33032aed6de9c0c56c9f8779e /arch/x86/include/asm
parent8a3f257c704e02aee9869decd069a806b45be3f1 (diff)
parent6bb597507f9839b13498781e481f5458aea33620 (diff)
Merge branch 'linus' into release
Conflicts: arch/x86/kernel/cpu/cpufreq/longhaul.c Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/a.out-core.h2
-rw-r--r--arch/x86/include/asm/acpi.h3
-rw-r--r--arch/x86/include/asm/aes.h11
-rw-r--r--arch/x86/include/asm/apic.h455
-rw-r--r--arch/x86/include/asm/apicdef.h1
-rw-r--r--arch/x86/include/asm/apicnum.h12
-rw-r--r--arch/x86/include/asm/apm.h (renamed from arch/x86/include/asm/mach-default/apm.h)0
-rw-r--r--arch/x86/include/asm/arch_hooks.h26
-rw-r--r--arch/x86/include/asm/bigsmp/apic.h155
-rw-r--r--arch/x86/include/asm/bigsmp/apicdef.h13
-rw-r--r--arch/x86/include/asm/bigsmp/ipi.h22
-rw-r--r--arch/x86/include/asm/boot.h20
-rw-r--r--arch/x86/include/asm/cacheflush.h58
-rw-r--r--arch/x86/include/asm/calling.h56
-rw-r--r--arch/x86/include/asm/cpu.h17
-rwxr-xr-xarch/x86/include/asm/cpu_debug.h226
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/cpumask.h32
-rw-r--r--arch/x86/include/asm/current.h24
-rw-r--r--arch/x86/include/asm/desc.h3
-rw-r--r--arch/x86/include/asm/device.h2
-rw-r--r--arch/x86/include/asm/dma-mapping.h188
-rw-r--r--arch/x86/include/asm/dmi.h19
-rw-r--r--arch/x86/include/asm/do_timer.h (renamed from arch/x86/include/asm/mach-default/do_timer.h)0
-rw-r--r--arch/x86/include/asm/e820.h2
-rw-r--r--arch/x86/include/asm/elf.h15
-rw-r--r--arch/x86/include/asm/entry_arch.h (renamed from arch/x86/include/asm/mach-default/entry_arch.h)27
-rw-r--r--arch/x86/include/asm/es7000/apic.h242
-rw-r--r--arch/x86/include/asm/es7000/apicdef.h13
-rw-r--r--arch/x86/include/asm/es7000/ipi.h22
-rw-r--r--arch/x86/include/asm/es7000/mpparse.h29
-rw-r--r--arch/x86/include/asm/es7000/wakecpu.h37
-rw-r--r--arch/x86/include/asm/fixmap.h139
-rw-r--r--arch/x86/include/asm/fixmap_32.h119
-rw-r--r--arch/x86/include/asm/fixmap_64.h79
-rw-r--r--arch/x86/include/asm/ftrace.h25
-rw-r--r--arch/x86/include/asm/genapic.h6
-rw-r--r--arch/x86/include/asm/genapic_32.h148
-rw-r--r--arch/x86/include/asm/genapic_64.h66
-rw-r--r--arch/x86/include/asm/hardirq.h50
-rw-r--r--arch/x86/include/asm/hardirq_32.h30
-rw-r--r--arch/x86/include/asm/hardirq_64.h25
-rw-r--r--arch/x86/include/asm/highmem.h1
-rw-r--r--arch/x86/include/asm/hw_irq.h25
-rw-r--r--arch/x86/include/asm/i8259.h4
-rw-r--r--arch/x86/include/asm/ia32.h7
-rw-r--r--arch/x86/include/asm/init.h18
-rw-r--r--arch/x86/include/asm/io.h97
-rw-r--r--arch/x86/include/asm/io_32.h88
-rw-r--r--arch/x86/include/asm/io_64.h61
-rw-r--r--arch/x86/include/asm/io_apic.h44
-rw-r--r--arch/x86/include/asm/iommu.h2
-rw-r--r--arch/x86/include/asm/ipi.h75
-rw-r--r--arch/x86/include/asm/irq.h5
-rw-r--r--arch/x86/include/asm/irq_regs.h36
-rw-r--r--arch/x86/include/asm/irq_regs_32.h31
-rw-r--r--arch/x86/include/asm/irq_regs_64.h1
-rw-r--r--arch/x86/include/asm/irq_remapping.h2
-rw-r--r--arch/x86/include/asm/irq_vectors.h214
-rw-r--r--arch/x86/include/asm/kexec.h34
-rw-r--r--arch/x86/include/asm/kvm.h24
-rw-r--r--arch/x86/include/asm/kvm_host.h61
-rw-r--r--arch/x86/include/asm/lguest_hcall.h24
-rw-r--r--arch/x86/include/asm/linkage.h77
-rw-r--r--arch/x86/include/asm/mach-default/mach_apic.h168
-rw-r--r--arch/x86/include/asm/mach-default/mach_apicdef.h24
-rw-r--r--arch/x86/include/asm/mach-default/mach_ipi.h64
-rw-r--r--arch/x86/include/asm/mach-default/mach_mpparse.h17
-rw-r--r--arch/x86/include/asm/mach-default/mach_mpspec.h12
-rw-r--r--arch/x86/include/asm/mach-default/mach_wakecpu.h41
-rw-r--r--arch/x86/include/asm/mach-generic/gpio.h15
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apic.h35
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apicdef.h11
-rw-r--r--arch/x86/include/asm/mach-generic/mach_ipi.h10
-rw-r--r--arch/x86/include/asm/mach-generic/mach_mpparse.h9
-rw-r--r--arch/x86/include/asm/mach-generic/mach_mpspec.h12
-rw-r--r--arch/x86/include/asm/mach-generic/mach_wakecpu.h12
-rw-r--r--arch/x86/include/asm/mach-rdc321x/gpio.h60
-rw-r--r--arch/x86/include/asm/mach-voyager/do_timer.h17
-rw-r--r--arch/x86/include/asm/mach-voyager/entry_arch.h26
-rw-r--r--arch/x86/include/asm/mach-voyager/setup_arch.h12
-rw-r--r--arch/x86/include/asm/mach_timer.h (renamed from arch/x86/include/asm/mach-default/mach_timer.h)0
-rw-r--r--arch/x86/include/asm/mach_traps.h (renamed from arch/x86/include/asm/mach-default/mach_traps.h)0
-rw-r--r--arch/x86/include/asm/mce.h35
-rw-r--r--arch/x86/include/asm/mmu_context.h63
-rw-r--r--arch/x86/include/asm/mmu_context_32.h55
-rw-r--r--arch/x86/include/asm/mmu_context_64.h54
-rw-r--r--arch/x86/include/asm/mmzone_32.h43
-rw-r--r--arch/x86/include/asm/mpspec.h33
-rw-r--r--arch/x86/include/asm/mpspec_def.h23
-rw-r--r--arch/x86/include/asm/msidef.h1
-rw-r--r--arch/x86/include/asm/msr-index.h14
-rw-r--r--arch/x86/include/asm/numa_32.h6
-rw-r--r--arch/x86/include/asm/numaq.h2
-rw-r--r--arch/x86/include/asm/numaq/apic.h142
-rw-r--r--arch/x86/include/asm/numaq/apicdef.h14
-rw-r--r--arch/x86/include/asm/numaq/ipi.h22
-rw-r--r--arch/x86/include/asm/numaq/mpparse.h6
-rw-r--r--arch/x86/include/asm/numaq/wakecpu.h45
-rw-r--r--arch/x86/include/asm/page.h152
-rw-r--r--arch/x86/include/asm/page_32.h87
-rw-r--r--arch/x86/include/asm/page_32_types.h65
-rw-r--r--arch/x86/include/asm/page_64.h101
-rw-r--r--arch/x86/include/asm/page_64_types.h89
-rw-r--r--arch/x86/include/asm/page_types.h51
-rw-r--r--arch/x86/include/asm/paravirt.h484
-rw-r--r--arch/x86/include/asm/pat.h10
-rw-r--r--arch/x86/include/asm/pci-functions.h (renamed from arch/x86/include/asm/mach-default/pci-functions.h)0
-rw-r--r--arch/x86/include/asm/pci.h39
-rw-r--r--arch/x86/include/asm/pci_32.h34
-rw-r--r--arch/x86/include/asm/pci_64.h22
-rw-r--r--arch/x86/include/asm/pda.h137
-rw-r--r--arch/x86/include/asm/percpu.h169
-rw-r--r--arch/x86/include/asm/pgtable-2level.h9
-rw-r--r--arch/x86/include/asm/pgtable-2level_types.h (renamed from arch/x86/include/asm/pgtable-2level-defs.h)17
-rw-r--r--arch/x86/include/asm/pgtable-3level.h52
-rw-r--r--arch/x86/include/asm/pgtable-3level_types.h (renamed from arch/x86/include/asm/pgtable-3level-defs.h)20
-rw-r--r--arch/x86/include/asm/pgtable.h507
-rw-r--r--arch/x86/include/asm/pgtable_32.h91
-rw-r--r--arch/x86/include/asm/pgtable_32_types.h51
-rw-r--r--arch/x86/include/asm/pgtable_64.h113
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h63
-rw-r--r--arch/x86/include/asm/pgtable_types.h329
-rw-r--r--arch/x86/include/asm/prctl.h4
-rw-r--r--arch/x86/include/asm/processor.h50
-rw-r--r--arch/x86/include/asm/proto.h4
-rw-r--r--arch/x86/include/asm/ptrace.h4
-rw-r--r--arch/x86/include/asm/rdc321x_defs.h (renamed from arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h)0
-rw-r--r--arch/x86/include/asm/sections.h7
-rw-r--r--arch/x86/include/asm/segment.h9
-rw-r--r--arch/x86/include/asm/setup.h99
-rw-r--r--arch/x86/include/asm/setup_arch.h (renamed from arch/x86/include/asm/mach-default/setup_arch.h)0
-rw-r--r--arch/x86/include/asm/smp.h69
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h (renamed from arch/x86/include/asm/mach-default/smpboot_hooks.h)6
-rw-r--r--arch/x86/include/asm/socket.h3
-rw-r--r--arch/x86/include/asm/spinlock.h72
-rw-r--r--arch/x86/include/asm/stackprotector.h124
-rw-r--r--arch/x86/include/asm/summit/apic.h202
-rw-r--r--arch/x86/include/asm/summit/apicdef.h13
-rw-r--r--arch/x86/include/asm/summit/ipi.h26
-rw-r--r--arch/x86/include/asm/summit/mpparse.h109
-rw-r--r--arch/x86/include/asm/suspend_32.h24
-rw-r--r--arch/x86/include/asm/svm.h4
-rw-r--r--arch/x86/include/asm/sys_ia32.h2
-rw-r--r--arch/x86/include/asm/syscalls.h23
-rw-r--r--arch/x86/include/asm/system.h70
-rw-r--r--arch/x86/include/asm/thread_info.h21
-rw-r--r--arch/x86/include/asm/timer.h4
-rw-r--r--arch/x86/include/asm/tlbflush.h17
-rw-r--r--arch/x86/include/asm/topology.h43
-rw-r--r--arch/x86/include/asm/trampoline.h1
-rw-r--r--arch/x86/include/asm/traps.h2
-rw-r--r--arch/x86/include/asm/uaccess.h138
-rw-r--r--arch/x86/include/asm/uaccess_64.h10
-rw-r--r--arch/x86/include/asm/unistd_32.h2
-rw-r--r--arch/x86/include/asm/unistd_64.h4
-rw-r--r--arch/x86/include/asm/uv/uv.h33
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h1
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h18
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h153
-rw-r--r--arch/x86/include/asm/vic.h61
-rw-r--r--arch/x86/include/asm/virtext.h2
-rw-r--r--arch/x86/include/asm/vmx.h5
-rw-r--r--arch/x86/include/asm/voyager.h529
-rw-r--r--arch/x86/include/asm/xen/events.h6
-rw-r--r--arch/x86/include/asm/xen/hypercall.h2
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h28
-rw-r--r--arch/x86/include/asm/xen/page.h1
168 files changed, 3855 insertions, 5296 deletions
diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h
index 3c601f8224be..bb70e397aa84 100644
--- a/arch/x86/include/asm/a.out-core.h
+++ b/arch/x86/include/asm/a.out-core.h
@@ -55,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
55 dump->regs.ds = (u16)regs->ds; 55 dump->regs.ds = (u16)regs->ds;
56 dump->regs.es = (u16)regs->es; 56 dump->regs.es = (u16)regs->es;
57 dump->regs.fs = (u16)regs->fs; 57 dump->regs.fs = (u16)regs->fs;
58 savesegment(gs, dump->regs.gs); 58 dump->regs.gs = get_user_gs(regs);
59 dump->regs.orig_ax = regs->orig_ax; 59 dump->regs.orig_ax = regs->orig_ax;
60 dump->regs.ip = regs->ip; 60 dump->regs.ip = regs->ip;
61 dump->regs.cs = (u16)regs->cs; 61 dump->regs.cs = (u16)regs->cs;
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 9830681446ad..4518dc500903 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -102,9 +102,6 @@ static inline void disable_acpi(void)
102 acpi_noirq = 1; 102 acpi_noirq = 1;
103} 103}
104 104
105/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
106#define FIX_ACPI_PAGES 4
107
108extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); 105extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
109 106
110static inline void acpi_noirq_set(void) { acpi_noirq = 1; } 107static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
diff --git a/arch/x86/include/asm/aes.h b/arch/x86/include/asm/aes.h
new file mode 100644
index 000000000000..80545a1cbe39
--- /dev/null
+++ b/arch/x86/include/asm/aes.h
@@ -0,0 +1,11 @@
1#ifndef ASM_X86_AES_H
2#define ASM_X86_AES_H
3
4#include <linux/crypto.h>
5#include <crypto/aes.h>
6
7void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
8 const u8 *src);
9void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
10 const u8 *src);
11#endif
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index ab1d51a8855e..df8a300dfe6c 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -1,15 +1,18 @@
1#ifndef _ASM_X86_APIC_H 1#ifndef _ASM_X86_APIC_H
2#define _ASM_X86_APIC_H 2#define _ASM_X86_APIC_H
3 3
4#include <linux/pm.h> 4#include <linux/cpumask.h>
5#include <linux/delay.h> 5#include <linux/delay.h>
6#include <linux/pm.h>
6 7
7#include <asm/alternative.h> 8#include <asm/alternative.h>
8#include <asm/fixmap.h> 9#include <asm/cpufeature.h>
9#include <asm/apicdef.h>
10#include <asm/processor.h> 10#include <asm/processor.h>
11#include <asm/apicdef.h>
12#include <asm/atomic.h>
13#include <asm/fixmap.h>
14#include <asm/mpspec.h>
11#include <asm/system.h> 15#include <asm/system.h>
12#include <asm/cpufeature.h>
13#include <asm/msr.h> 16#include <asm/msr.h>
14 17
15#define ARCH_APICTIMER_STOPS_ON_C3 1 18#define ARCH_APICTIMER_STOPS_ON_C3 1
@@ -33,7 +36,13 @@
33 } while (0) 36 } while (0)
34 37
35 38
39#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
36extern void generic_apic_probe(void); 40extern void generic_apic_probe(void);
41#else
42static inline void generic_apic_probe(void)
43{
44}
45#endif
37 46
38#ifdef CONFIG_X86_LOCAL_APIC 47#ifdef CONFIG_X86_LOCAL_APIC
39 48
@@ -41,6 +50,21 @@ extern unsigned int apic_verbosity;
41extern int local_apic_timer_c2_ok; 50extern int local_apic_timer_c2_ok;
42 51
43extern int disable_apic; 52extern int disable_apic;
53
54#ifdef CONFIG_SMP
55extern void __inquire_remote_apic(int apicid);
56#else /* CONFIG_SMP */
57static inline void __inquire_remote_apic(int apicid)
58{
59}
60#endif /* CONFIG_SMP */
61
62static inline void default_inquire_remote_apic(int apicid)
63{
64 if (apic_verbosity >= APIC_DEBUG)
65 __inquire_remote_apic(apicid);
66}
67
44/* 68/*
45 * Basic functions accessing APICs. 69 * Basic functions accessing APICs.
46 */ 70 */
@@ -51,7 +75,14 @@ extern int disable_apic;
51#define setup_secondary_clock setup_secondary_APIC_clock 75#define setup_secondary_clock setup_secondary_APIC_clock
52#endif 76#endif
53 77
78#ifdef CONFIG_X86_64
54extern int is_vsmp_box(void); 79extern int is_vsmp_box(void);
80#else
81static inline int is_vsmp_box(void)
82{
83 return 0;
84}
85#endif
55extern void xapic_wait_icr_idle(void); 86extern void xapic_wait_icr_idle(void);
56extern u32 safe_xapic_wait_icr_idle(void); 87extern u32 safe_xapic_wait_icr_idle(void);
57extern void xapic_icr_write(u32, u32); 88extern void xapic_icr_write(u32, u32);
@@ -71,6 +102,22 @@ static inline u32 native_apic_mem_read(u32 reg)
71 return *((volatile u32 *)(APIC_BASE + reg)); 102 return *((volatile u32 *)(APIC_BASE + reg));
72} 103}
73 104
105extern void native_apic_wait_icr_idle(void);
106extern u32 native_safe_apic_wait_icr_idle(void);
107extern void native_apic_icr_write(u32 low, u32 id);
108extern u64 native_apic_icr_read(void);
109
110#ifdef CONFIG_X86_X2APIC
111/*
112 * Make previous memory operations globally visible before
113 * sending the IPI through x2apic wrmsr. We need a serializing instruction or
114 * mfence for this.
115 */
116static inline void x2apic_wrmsr_fence(void)
117{
118 asm volatile("mfence" : : : "memory");
119}
120
74static inline void native_apic_msr_write(u32 reg, u32 v) 121static inline void native_apic_msr_write(u32 reg, u32 v)
75{ 122{
76 if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || 123 if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
@@ -91,8 +138,32 @@ static inline u32 native_apic_msr_read(u32 reg)
91 return low; 138 return low;
92} 139}
93 140
94#ifndef CONFIG_X86_32 141static inline void native_x2apic_wait_icr_idle(void)
95extern int x2apic; 142{
143 /* no need to wait for icr idle in x2apic */
144 return;
145}
146
147static inline u32 native_safe_x2apic_wait_icr_idle(void)
148{
149 /* no need to wait for icr idle in x2apic */
150 return 0;
151}
152
153static inline void native_x2apic_icr_write(u32 low, u32 id)
154{
155 wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
156}
157
158static inline u64 native_x2apic_icr_read(void)
159{
160 unsigned long val;
161
162 rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
163 return val;
164}
165
166extern int x2apic, x2apic_phys;
96extern void check_x2apic(void); 167extern void check_x2apic(void);
97extern void enable_x2apic(void); 168extern void enable_x2apic(void);
98extern void enable_IR_x2apic(void); 169extern void enable_IR_x2apic(void);
@@ -110,30 +181,27 @@ static inline int x2apic_enabled(void)
110 return 0; 181 return 0;
111} 182}
112#else 183#else
113#define x2apic_enabled() 0 184static inline void check_x2apic(void)
114#endif 185{
115 186}
116struct apic_ops { 187static inline void enable_x2apic(void)
117 u32 (*read)(u32 reg); 188{
118 void (*write)(u32 reg, u32 v); 189}
119 u64 (*icr_read)(void); 190static inline void enable_IR_x2apic(void)
120 void (*icr_write)(u32 low, u32 high); 191{
121 void (*wait_icr_idle)(void); 192}
122 u32 (*safe_wait_icr_idle)(void); 193static inline int x2apic_enabled(void)
123}; 194{
195 return 0;
196}
124 197
125extern struct apic_ops *apic_ops; 198#define x2apic 0
126 199
127#define apic_read (apic_ops->read) 200#endif
128#define apic_write (apic_ops->write)
129#define apic_icr_read (apic_ops->icr_read)
130#define apic_icr_write (apic_ops->icr_write)
131#define apic_wait_icr_idle (apic_ops->wait_icr_idle)
132#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle)
133 201
134extern int get_physical_broadcast(void); 202extern int get_physical_broadcast(void);
135 203
136#ifdef CONFIG_X86_64 204#ifdef CONFIG_X86_X2APIC
137static inline void ack_x2APIC_irq(void) 205static inline void ack_x2APIC_irq(void)
138{ 206{
139 /* Docs say use 0 for future compatibility */ 207 /* Docs say use 0 for future compatibility */
@@ -141,18 +209,6 @@ static inline void ack_x2APIC_irq(void)
141} 209}
142#endif 210#endif
143 211
144
145static inline void ack_APIC_irq(void)
146{
147 /*
148 * ack_APIC_irq() actually gets compiled as a single instruction
149 * ... yummie.
150 */
151
152 /* Docs say use 0 for future compatibility */
153 apic_write(APIC_EOI, 0);
154}
155
156extern int lapic_get_maxlvt(void); 212extern int lapic_get_maxlvt(void);
157extern void clear_local_APIC(void); 213extern void clear_local_APIC(void);
158extern void connect_bsp_APIC(void); 214extern void connect_bsp_APIC(void);
@@ -196,4 +252,329 @@ static inline void disable_local_APIC(void) { }
196 252
197#endif /* !CONFIG_X86_LOCAL_APIC */ 253#endif /* !CONFIG_X86_LOCAL_APIC */
198 254
255#ifdef CONFIG_X86_64
256#define SET_APIC_ID(x) (apic->set_apic_id(x))
257#else
258
259#endif
260
261/*
262 * Copyright 2004 James Cleverdon, IBM.
263 * Subject to the GNU Public License, v.2
264 *
265 * Generic APIC sub-arch data struct.
266 *
267 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
268 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
269 * James Cleverdon.
270 */
271struct apic {
272 char *name;
273
274 int (*probe)(void);
275 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
276 int (*apic_id_registered)(void);
277
278 u32 irq_delivery_mode;
279 u32 irq_dest_mode;
280
281 const struct cpumask *(*target_cpus)(void);
282
283 int disable_esr;
284
285 int dest_logical;
286 unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
287 unsigned long (*check_apicid_present)(int apicid);
288
289 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
290 void (*init_apic_ldr)(void);
291
292 physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
293
294 void (*setup_apic_routing)(void);
295 int (*multi_timer_check)(int apic, int irq);
296 int (*apicid_to_node)(int logical_apicid);
297 int (*cpu_to_logical_apicid)(int cpu);
298 int (*cpu_present_to_apicid)(int mps_cpu);
299 physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
300 void (*setup_portio_remap)(void);
301 int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
302 void (*enable_apic_mode)(void);
303 int (*phys_pkg_id)(int cpuid_apic, int index_msb);
304
305 /*
306 * When one of the next two hooks returns 1 the apic
307 * is switched to this. Essentially they are additional
308 * probe functions:
309 */
310 int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
311
312 unsigned int (*get_apic_id)(unsigned long x);
313 unsigned long (*set_apic_id)(unsigned int id);
314 unsigned long apic_id_mask;
315
316 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
317 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
318 const struct cpumask *andmask);
319
320 /* ipi */
321 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
322 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
323 int vector);
324 void (*send_IPI_allbutself)(int vector);
325 void (*send_IPI_all)(int vector);
326 void (*send_IPI_self)(int vector);
327
328 /* wakeup_secondary_cpu */
329 int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
330
331 int trampoline_phys_low;
332 int trampoline_phys_high;
333
334 void (*wait_for_init_deassert)(atomic_t *deassert);
335 void (*smp_callin_clear_local_apic)(void);
336 void (*inquire_remote_apic)(int apicid);
337
338 /* apic ops */
339 u32 (*read)(u32 reg);
340 void (*write)(u32 reg, u32 v);
341 u64 (*icr_read)(void);
342 void (*icr_write)(u32 low, u32 high);
343 void (*wait_icr_idle)(void);
344 u32 (*safe_wait_icr_idle)(void);
345};
346
347/*
348 * Pointer to the local APIC driver in use on this system (there's
349 * always just one such driver in use - the kernel decides via an
350 * early probing process which one it picks - and then sticks to it):
351 */
352extern struct apic *apic;
353
354/*
355 * APIC functionality to boot other CPUs - only used on SMP:
356 */
357#ifdef CONFIG_SMP
358extern atomic_t init_deasserted;
359extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
360#endif
361
362static inline u32 apic_read(u32 reg)
363{
364 return apic->read(reg);
365}
366
367static inline void apic_write(u32 reg, u32 val)
368{
369 apic->write(reg, val);
370}
371
372static inline u64 apic_icr_read(void)
373{
374 return apic->icr_read();
375}
376
377static inline void apic_icr_write(u32 low, u32 high)
378{
379 apic->icr_write(low, high);
380}
381
382static inline void apic_wait_icr_idle(void)
383{
384 apic->wait_icr_idle();
385}
386
387static inline u32 safe_apic_wait_icr_idle(void)
388{
389 return apic->safe_wait_icr_idle();
390}
391
392
393static inline void ack_APIC_irq(void)
394{
395#ifdef CONFIG_X86_LOCAL_APIC
396 /*
397 * ack_APIC_irq() actually gets compiled as a single instruction
398 * ... yummie.
399 */
400
401 /* Docs say use 0 for future compatibility */
402 apic_write(APIC_EOI, 0);
403#endif
404}
405
406static inline unsigned default_get_apic_id(unsigned long x)
407{
408 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
409
410 if (APIC_XAPIC(ver))
411 return (x >> 24) & 0xFF;
412 else
413 return (x >> 24) & 0x0F;
414}
415
416/*
417 * Warm reset vector default position:
418 */
419#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
420#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
421
422#ifdef CONFIG_X86_64
423extern struct apic apic_flat;
424extern struct apic apic_physflat;
425extern struct apic apic_x2apic_cluster;
426extern struct apic apic_x2apic_phys;
427extern int default_acpi_madt_oem_check(char *, char *);
428
429extern void apic_send_IPI_self(int vector);
430
431extern struct apic apic_x2apic_uv_x;
432DECLARE_PER_CPU(int, x2apic_extra_bits);
433
434extern int default_cpu_present_to_apicid(int mps_cpu);
435extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
436#endif
437
438static inline void default_wait_for_init_deassert(atomic_t *deassert)
439{
440 while (!atomic_read(deassert))
441 cpu_relax();
442 return;
443}
444
445extern void generic_bigsmp_probe(void);
446
447
448#ifdef CONFIG_X86_LOCAL_APIC
449
450#include <asm/smp.h>
451
452#define APIC_DFR_VALUE (APIC_DFR_FLAT)
453
454static inline const struct cpumask *default_target_cpus(void)
455{
456#ifdef CONFIG_SMP
457 return cpu_online_mask;
458#else
459 return cpumask_of(0);
460#endif
461}
462
463DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
464
465
466static inline unsigned int read_apic_id(void)
467{
468 unsigned int reg;
469
470 reg = apic_read(APIC_ID);
471
472 return apic->get_apic_id(reg);
473}
474
475extern void default_setup_apic_routing(void);
476
477#ifdef CONFIG_X86_32
478/*
479 * Set up the logical destination ID.
480 *
481 * Intel recommends to set DFR, LDR and TPR before enabling
482 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
483 * document number 292116). So here it goes...
484 */
485extern void default_init_apic_ldr(void);
486
487static inline int default_apic_id_registered(void)
488{
489 return physid_isset(read_apic_id(), phys_cpu_present_map);
490}
491
492static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
493{
494 return cpuid_apic >> index_msb;
495}
496
497extern int default_apicid_to_node(int logical_apicid);
498
499#endif
500
501static inline unsigned int
502default_cpu_mask_to_apicid(const struct cpumask *cpumask)
503{
504 return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
505}
506
507static inline unsigned int
508default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
509 const struct cpumask *andmask)
510{
511 unsigned long mask1 = cpumask_bits(cpumask)[0];
512 unsigned long mask2 = cpumask_bits(andmask)[0];
513 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
514
515 return (unsigned int)(mask1 & mask2 & mask3);
516}
517
518static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
519{
520 return physid_isset(apicid, bitmap);
521}
522
523static inline unsigned long default_check_apicid_present(int bit)
524{
525 return physid_isset(bit, phys_cpu_present_map);
526}
527
528static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map)
529{
530 return phys_map;
531}
532
533/* Mapping from cpu number to logical apicid */
534static inline int default_cpu_to_logical_apicid(int cpu)
535{
536 return 1 << cpu;
537}
538
539static inline int __default_cpu_present_to_apicid(int mps_cpu)
540{
541 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
542 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
543 else
544 return BAD_APICID;
545}
546
547static inline int
548__default_check_phys_apicid_present(int boot_cpu_physical_apicid)
549{
550 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
551}
552
553#ifdef CONFIG_X86_32
554static inline int default_cpu_present_to_apicid(int mps_cpu)
555{
556 return __default_cpu_present_to_apicid(mps_cpu);
557}
558
559static inline int
560default_check_phys_apicid_present(int boot_cpu_physical_apicid)
561{
562 return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
563}
564#else
565extern int default_cpu_present_to_apicid(int mps_cpu);
566extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
567#endif
568
569static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
570{
571 return physid_mask_of_physid(phys_apicid);
572}
573
574#endif /* CONFIG_X86_LOCAL_APIC */
575
576#ifdef CONFIG_X86_32
577extern u8 cpu_2_logical_apicid[NR_CPUS];
578#endif
579
199#endif /* _ASM_X86_APIC_H */ 580#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index 63134e31e8b9..bc9514fb3b13 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -53,6 +53,7 @@
53#define APIC_ESR_SENDILL 0x00020 53#define APIC_ESR_SENDILL 0x00020
54#define APIC_ESR_RECVILL 0x00040 54#define APIC_ESR_RECVILL 0x00040
55#define APIC_ESR_ILLREGA 0x00080 55#define APIC_ESR_ILLREGA 0x00080
56#define APIC_LVTCMCI 0x2f0
56#define APIC_ICR 0x300 57#define APIC_ICR 0x300
57#define APIC_DEST_SELF 0x40000 58#define APIC_DEST_SELF 0x40000
58#define APIC_DEST_ALLINC 0x80000 59#define APIC_DEST_ALLINC 0x80000
diff --git a/arch/x86/include/asm/apicnum.h b/arch/x86/include/asm/apicnum.h
new file mode 100644
index 000000000000..82f613c607ce
--- /dev/null
+++ b/arch/x86/include/asm/apicnum.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_X86_APICNUM_H
2#define _ASM_X86_APICNUM_H
3
4/* define MAX_IO_APICS */
5#ifdef CONFIG_X86_32
6# define MAX_IO_APICS 64
7#else
8# define MAX_IO_APICS 128
9# define MAX_LOCAL_APIC 32768
10#endif
11
12#endif /* _ASM_X86_APICNUM_H */
diff --git a/arch/x86/include/asm/mach-default/apm.h b/arch/x86/include/asm/apm.h
index 20370c6db74b..20370c6db74b 100644
--- a/arch/x86/include/asm/mach-default/apm.h
+++ b/arch/x86/include/asm/apm.h
diff --git a/arch/x86/include/asm/arch_hooks.h b/arch/x86/include/asm/arch_hooks.h
deleted file mode 100644
index cbd4957838a6..000000000000
--- a/arch/x86/include/asm/arch_hooks.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef _ASM_X86_ARCH_HOOKS_H
2#define _ASM_X86_ARCH_HOOKS_H
3
4#include <linux/interrupt.h>
5
6/*
7 * linux/include/asm/arch_hooks.h
8 *
9 * define the architecture specific hooks
10 */
11
12/* these aren't arch hooks, they are generic routines
13 * that can be used by the hooks */
14extern void init_ISA_irqs(void);
15extern irqreturn_t timer_interrupt(int irq, void *dev_id);
16
17/* these are the defined hooks */
18extern void intr_init_hook(void);
19extern void pre_intr_init_hook(void);
20extern void pre_setup_arch_hook(void);
21extern void trap_init_hook(void);
22extern void pre_time_init_hook(void);
23extern void time_init_hook(void);
24extern void mca_nmi_hook(void);
25
26#endif /* _ASM_X86_ARCH_HOOKS_H */
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h
deleted file mode 100644
index d8dd9f537911..000000000000
--- a/arch/x86/include/asm/bigsmp/apic.h
+++ /dev/null
@@ -1,155 +0,0 @@
1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H
3
4#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
5#define esr_disable (1)
6
7static inline int apic_id_registered(void)
8{
9 return (1);
10}
11
12static inline const cpumask_t *target_cpus(void)
13{
14#ifdef CONFIG_SMP
15 return &cpu_online_map;
16#else
17 return &cpumask_of_cpu(0);
18#endif
19}
20
21#undef APIC_DEST_LOGICAL
22#define APIC_DEST_LOGICAL 0
23#define APIC_DFR_VALUE (APIC_DFR_FLAT)
24#define INT_DELIVERY_MODE (dest_Fixed)
25#define INT_DEST_MODE (0) /* phys delivery to target proc */
26#define NO_BALANCE_IRQ (0)
27
28static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
29{
30 return (0);
31}
32
33static inline unsigned long check_apicid_present(int bit)
34{
35 return (1);
36}
37
38static inline unsigned long calculate_ldr(int cpu)
39{
40 unsigned long val, id;
41 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
42 id = xapic_phys_to_log_apicid(cpu);
43 val |= SET_APIC_LOGICAL_ID(id);
44 return val;
45}
46
47/*
48 * Set up the logical destination ID.
49 *
50 * Intel recommends to set DFR, LDR and TPR before enabling
51 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
52 * document number 292116). So here it goes...
53 */
54static inline void init_apic_ldr(void)
55{
56 unsigned long val;
57 int cpu = smp_processor_id();
58
59 apic_write(APIC_DFR, APIC_DFR_VALUE);
60 val = calculate_ldr(cpu);
61 apic_write(APIC_LDR, val);
62}
63
64static inline void setup_apic_routing(void)
65{
66 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
67 "Physflat", nr_ioapics);
68}
69
70static inline int multi_timer_check(int apic, int irq)
71{
72 return (0);
73}
74
75static inline int apicid_to_node(int logical_apicid)
76{
77 return apicid_2_node[hard_smp_processor_id()];
78}
79
80static inline int cpu_present_to_apicid(int mps_cpu)
81{
82 if (mps_cpu < nr_cpu_ids)
83 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
84
85 return BAD_APICID;
86}
87
88static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
89{
90 return physid_mask_of_physid(phys_apicid);
91}
92
93extern u8 cpu_2_logical_apicid[];
94/* Mapping from cpu number to logical apicid */
95static inline int cpu_to_logical_apicid(int cpu)
96{
97 if (cpu >= nr_cpu_ids)
98 return BAD_APICID;
99 return cpu_physical_id(cpu);
100}
101
102static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
103{
104 /* For clustered we don't have a good way to do this yet - hack */
105 return physids_promote(0xFFL);
106}
107
108static inline void setup_portio_remap(void)
109{
110}
111
112static inline void enable_apic_mode(void)
113{
114}
115
116static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
117{
118 return (1);
119}
120
121/* As we are using single CPU as destination, pick only one CPU here */
122static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
123{
124 int cpu;
125 int apicid;
126
127 cpu = first_cpu(*cpumask);
128 apicid = cpu_to_logical_apicid(cpu);
129 return apicid;
130}
131
132static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 const struct cpumask *andmask)
134{
135 int cpu;
136
137 /*
138 * We're using fixed IRQ delivery, can only return one phys APIC ID.
139 * May as well be the first.
140 */
141 for_each_cpu_and(cpu, cpumask, andmask)
142 if (cpumask_test_cpu(cpu, cpu_online_mask))
143 break;
144 if (cpu < nr_cpu_ids)
145 return cpu_to_logical_apicid(cpu);
146
147 return BAD_APICID;
148}
149
150static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
151{
152 return cpuid_apic >> index_msb;
153}
154
155#endif /* __ASM_MACH_APIC_H */
diff --git a/arch/x86/include/asm/bigsmp/apicdef.h b/arch/x86/include/asm/bigsmp/apicdef.h
deleted file mode 100644
index 392c3f5ef2fe..000000000000
--- a/arch/x86/include/asm/bigsmp/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h
deleted file mode 100644
index 27fcd01b3ae6..000000000000
--- a/arch/x86/include/asm/bigsmp/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H
3
4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
6
7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 send_IPI_mask_allbutself(cpu_online_mask, vector);
15}
16
17static inline void send_IPI_all(int vector)
18{
19 send_IPI_mask(cpu_online_mask, vector);
20}
21
22#endif /* __ASM_MACH_IPI_H */
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index dd61616cb73d..6ba23dd9fc92 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -1,26 +1,36 @@
1#ifndef _ASM_X86_BOOT_H 1#ifndef _ASM_X86_BOOT_H
2#define _ASM_X86_BOOT_H 2#define _ASM_X86_BOOT_H
3 3
4/* Don't touch these, unless you really know what you're doing. */
5#define DEF_SYSSEG 0x1000
6#define DEF_SYSSIZE 0x7F00
7
8/* Internal svga startup constants */ 4/* Internal svga startup constants */
9#define NORMAL_VGA 0xffff /* 80x25 mode */ 5#define NORMAL_VGA 0xffff /* 80x25 mode */
10#define EXTENDED_VGA 0xfffe /* 80x50 mode */ 6#define EXTENDED_VGA 0xfffe /* 80x50 mode */
11#define ASK_VGA 0xfffd /* ask for it at bootup */ 7#define ASK_VGA 0xfffd /* ask for it at bootup */
12 8
9#ifdef __KERNEL__
10
13/* Physical address where kernel should be loaded. */ 11/* Physical address where kernel should be loaded. */
14#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ 12#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15 + (CONFIG_PHYSICAL_ALIGN - 1)) \ 13 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16 & ~(CONFIG_PHYSICAL_ALIGN - 1)) 14 & ~(CONFIG_PHYSICAL_ALIGN - 1))
17 15
16#ifdef CONFIG_KERNEL_BZIP2
17#define BOOT_HEAP_SIZE 0x400000
18#else /* !CONFIG_KERNEL_BZIP2 */
19
18#ifdef CONFIG_X86_64 20#ifdef CONFIG_X86_64
19#define BOOT_HEAP_SIZE 0x7000 21#define BOOT_HEAP_SIZE 0x7000
20#define BOOT_STACK_SIZE 0x4000
21#else 22#else
22#define BOOT_HEAP_SIZE 0x4000 23#define BOOT_HEAP_SIZE 0x4000
24#endif
25
26#endif /* !CONFIG_KERNEL_BZIP2 */
27
28#ifdef CONFIG_X86_64
29#define BOOT_STACK_SIZE 0x4000
30#else
23#define BOOT_STACK_SIZE 0x1000 31#define BOOT_STACK_SIZE 0x1000
24#endif 32#endif
25 33
34#endif /* __KERNEL__ */
35
26#endif /* _ASM_X86_BOOT_H */ 36#endif /* _ASM_X86_BOOT_H */
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 2f8466540fb5..b3894bf52fcd 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -5,24 +5,43 @@
5#include <linux/mm.h> 5#include <linux/mm.h>
6 6
7/* Caches aren't brain-dead on the intel. */ 7/* Caches aren't brain-dead on the intel. */
8#define flush_cache_all() do { } while (0) 8static inline void flush_cache_all(void) { }
9#define flush_cache_mm(mm) do { } while (0) 9static inline void flush_cache_mm(struct mm_struct *mm) { }
10#define flush_cache_dup_mm(mm) do { } while (0) 10static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
11#define flush_cache_range(vma, start, end) do { } while (0) 11static inline void flush_cache_range(struct vm_area_struct *vma,
12#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 12 unsigned long start, unsigned long end) { }
13#define flush_dcache_page(page) do { } while (0) 13static inline void flush_cache_page(struct vm_area_struct *vma,
14#define flush_dcache_mmap_lock(mapping) do { } while (0) 14 unsigned long vmaddr, unsigned long pfn) { }
15#define flush_dcache_mmap_unlock(mapping) do { } while (0) 15static inline void flush_dcache_page(struct page *page) { }
16#define flush_icache_range(start, end) do { } while (0) 16static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
17#define flush_icache_page(vma, pg) do { } while (0) 17static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
18#define flush_icache_user_range(vma, pg, adr, len) do { } while (0) 18static inline void flush_icache_range(unsigned long start,
19#define flush_cache_vmap(start, end) do { } while (0) 19 unsigned long end) { }
20#define flush_cache_vunmap(start, end) do { } while (0) 20static inline void flush_icache_page(struct vm_area_struct *vma,
21 21 struct page *page) { }
22#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 22static inline void flush_icache_user_range(struct vm_area_struct *vma,
23 memcpy((dst), (src), (len)) 23 struct page *page,
24#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 24 unsigned long addr,
25 memcpy((dst), (src), (len)) 25 unsigned long len) { }
26static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
27static inline void flush_cache_vunmap(unsigned long start,
28 unsigned long end) { }
29
30static inline void copy_to_user_page(struct vm_area_struct *vma,
31 struct page *page, unsigned long vaddr,
32 void *dst, const void *src,
33 unsigned long len)
34{
35 memcpy(dst, src, len);
36}
37
38static inline void copy_from_user_page(struct vm_area_struct *vma,
39 struct page *page, unsigned long vaddr,
40 void *dst, const void *src,
41 unsigned long len)
42{
43 memcpy(dst, src, len);
44}
26 45
27#define PG_non_WB PG_arch_1 46#define PG_non_WB PG_arch_1
28PAGEFLAG(NonWB, non_WB) 47PAGEFLAG(NonWB, non_WB)
@@ -71,6 +90,9 @@ int set_memory_4k(unsigned long addr, int numpages);
71int set_memory_array_uc(unsigned long *addr, int addrinarray); 90int set_memory_array_uc(unsigned long *addr, int addrinarray);
72int set_memory_array_wb(unsigned long *addr, int addrinarray); 91int set_memory_array_wb(unsigned long *addr, int addrinarray);
73 92
93int set_pages_array_uc(struct page **pages, int addrinarray);
94int set_pages_array_wb(struct page **pages, int addrinarray);
95
74/* 96/*
75 * For legacy compatibility with the old APIs, a few functions 97 * For legacy compatibility with the old APIs, a few functions
76 * are provided that work on a "struct page". 98 * are provided that work on a "struct page".
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 2bc162e0ec6e..0e63c9a2a8d0 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -1,5 +1,55 @@
1/* 1/*
2 * Some macros to handle stack frames in assembly. 2
3 x86 function call convention, 64-bit:
4 -------------------------------------
5 arguments | callee-saved | extra caller-saved | return
6 [callee-clobbered] | | [callee-clobbered] |
7 ---------------------------------------------------------------------------
8 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
9
10 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
11 functions when it sees tail-call optimization possibilities) rflags is
12 clobbered. Leftover arguments are passed over the stack frame.)
13
14 [*] In the frame-pointers case rbp is fixed to the stack frame.
15
16 [**] for struct return values wider than 64 bits the return convention is a
17 bit more complex: up to 128 bits width we return small structures
18 straight in rax, rdx. For structures larger than that (3 words or
19 larger) the caller puts a pointer to an on-stack return struct
20 [allocated in the caller's stack frame] into the first argument - i.e.
21 into rdi. All other arguments shift up by one in this case.
22 Fortunately this case is rare in the kernel.
23
24For 32-bit we have the following conventions - kernel is built with
25-mregparm=3 and -freg-struct-return:
26
27 x86 function calling convention, 32-bit:
28 ----------------------------------------
29 arguments | callee-saved | extra caller-saved | return
30 [callee-clobbered] | | [callee-clobbered] |
31 -------------------------------------------------------------------------
32 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
33
34 ( here too esp is obviously invariant across normal function calls. eflags
35 is clobbered. Leftover arguments are passed over the stack frame. )
36
37 [*] In the frame-pointers case ebp is fixed to the stack frame.
38
39 [**] We build with -freg-struct-return, which on 32-bit means similar
40 semantics as on 64-bit: edx can be used for a second return value
41 (i.e. covering integer and structure sizes up to 64 bits) - after that
42 it gets more complex and more expensive: 3-word or larger struct returns
43 get done in the caller's frame and the pointer to the return struct goes
44 into regparm0, i.e. eax - the other arguments shift up and the
45 function's register parameters degenerate to regparm=2 in essence.
46
47*/
48
49
50/*
51 * 64-bit system call stack frame layout defines and helpers,
52 * for assembly code:
3 */ 53 */
4 54
5#define R15 0 55#define R15 0
@@ -9,7 +59,7 @@
9#define RBP 32 59#define RBP 32
10#define RBX 40 60#define RBX 40
11 61
12/* arguments: interrupts/non tracing syscalls only save upto here*/ 62/* arguments: interrupts/non tracing syscalls only save up to here: */
13#define R11 48 63#define R11 48
14#define R10 56 64#define R10 56
15#define R9 64 65#define R9 64
@@ -22,7 +72,7 @@
22#define ORIG_RAX 120 /* + error_code */ 72#define ORIG_RAX 120 /* + error_code */
23/* end of arguments */ 73/* end of arguments */
24 74
25/* cpu exception frame or undefined in case of fast syscall. */ 75/* cpu exception frame or undefined in case of fast syscall: */
26#define RIP 128 76#define RIP 128
27#define CS 136 77#define CS 136
28#define EFLAGS 144 78#define EFLAGS 144
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index bae482df6039..b185091bf19c 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -7,6 +7,20 @@
7#include <linux/nodemask.h> 7#include <linux/nodemask.h>
8#include <linux/percpu.h> 8#include <linux/percpu.h>
9 9
10#ifdef CONFIG_SMP
11
12extern void prefill_possible_map(void);
13
14#else /* CONFIG_SMP */
15
16static inline void prefill_possible_map(void) {}
17
18#define cpu_physical_id(cpu) boot_cpu_physical_apicid
19#define safe_smp_processor_id() 0
20#define stack_smp_processor_id() 0
21
22#endif /* CONFIG_SMP */
23
10struct x86_cpu { 24struct x86_cpu {
11 struct cpu cpu; 25 struct cpu cpu;
12}; 26};
@@ -17,4 +31,7 @@ extern void arch_unregister_cpu(int);
17#endif 31#endif
18 32
19DECLARE_PER_CPU(int, cpu_state); 33DECLARE_PER_CPU(int, cpu_state);
34
35extern unsigned int boot_cpu_id;
36
20#endif /* _ASM_X86_CPU_H */ 37#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h
new file mode 100755
index 000000000000..222802029fa6
--- /dev/null
+++ b/arch/x86/include/asm/cpu_debug.h
@@ -0,0 +1,226 @@
1#ifndef _ASM_X86_CPU_DEBUG_H
2#define _ASM_X86_CPU_DEBUG_H
3
4/*
5 * CPU x86 architecture debug
6 *
7 * Copyright(C) 2009 Jaswinder Singh Rajput
8 */
9
10/* Register flags */
11enum cpu_debug_bit {
12/* Model Specific Registers (MSRs) */
13 CPU_MC_BIT, /* Machine Check */
14 CPU_MONITOR_BIT, /* Monitor */
15 CPU_TIME_BIT, /* Time */
16 CPU_PMC_BIT, /* Performance Monitor */
17 CPU_PLATFORM_BIT, /* Platform */
18 CPU_APIC_BIT, /* APIC */
19 CPU_POWERON_BIT, /* Power-on */
20 CPU_CONTROL_BIT, /* Control */
21 CPU_FEATURES_BIT, /* Features control */
22 CPU_LBRANCH_BIT, /* Last Branch */
23 CPU_BIOS_BIT, /* BIOS */
24 CPU_FREQ_BIT, /* Frequency */
25 CPU_MTTR_BIT, /* MTRR */
26 CPU_PERF_BIT, /* Performance */
27 CPU_CACHE_BIT, /* Cache */
28 CPU_SYSENTER_BIT, /* Sysenter */
29 CPU_THERM_BIT, /* Thermal */
30 CPU_MISC_BIT, /* Miscellaneous */
31 CPU_DEBUG_BIT, /* Debug */
32 CPU_PAT_BIT, /* PAT */
33 CPU_VMX_BIT, /* VMX */
34 CPU_CALL_BIT, /* System Call */
35 CPU_BASE_BIT, /* BASE Address */
36 CPU_VER_BIT, /* Version ID */
37 CPU_CONF_BIT, /* Configuration */
38 CPU_SMM_BIT, /* System mgmt mode */
39 CPU_SVM_BIT, /*Secure Virtual Machine*/
40 CPU_OSVM_BIT, /* OS-Visible Workaround*/
41/* Standard Registers */
42 CPU_TSS_BIT, /* Task Stack Segment */
43 CPU_CR_BIT, /* Control Registers */
44 CPU_DT_BIT, /* Descriptor Table */
45/* End of Registers flags */
46 CPU_REG_ALL_BIT, /* Select all Registers */
47};
48
49#define CPU_REG_ALL (~0) /* Select all Registers */
50
51#define CPU_MC (1 << CPU_MC_BIT)
52#define CPU_MONITOR (1 << CPU_MONITOR_BIT)
53#define CPU_TIME (1 << CPU_TIME_BIT)
54#define CPU_PMC (1 << CPU_PMC_BIT)
55#define CPU_PLATFORM (1 << CPU_PLATFORM_BIT)
56#define CPU_APIC (1 << CPU_APIC_BIT)
57#define CPU_POWERON (1 << CPU_POWERON_BIT)
58#define CPU_CONTROL (1 << CPU_CONTROL_BIT)
59#define CPU_FEATURES (1 << CPU_FEATURES_BIT)
60#define CPU_LBRANCH (1 << CPU_LBRANCH_BIT)
61#define CPU_BIOS (1 << CPU_BIOS_BIT)
62#define CPU_FREQ (1 << CPU_FREQ_BIT)
63#define CPU_MTRR (1 << CPU_MTTR_BIT)
64#define CPU_PERF (1 << CPU_PERF_BIT)
65#define CPU_CACHE (1 << CPU_CACHE_BIT)
66#define CPU_SYSENTER (1 << CPU_SYSENTER_BIT)
67#define CPU_THERM (1 << CPU_THERM_BIT)
68#define CPU_MISC (1 << CPU_MISC_BIT)
69#define CPU_DEBUG (1 << CPU_DEBUG_BIT)
70#define CPU_PAT (1 << CPU_PAT_BIT)
71#define CPU_VMX (1 << CPU_VMX_BIT)
72#define CPU_CALL (1 << CPU_CALL_BIT)
73#define CPU_BASE (1 << CPU_BASE_BIT)
74#define CPU_VER (1 << CPU_VER_BIT)
75#define CPU_CONF (1 << CPU_CONF_BIT)
76#define CPU_SMM (1 << CPU_SMM_BIT)
77#define CPU_SVM (1 << CPU_SVM_BIT)
78#define CPU_OSVM (1 << CPU_OSVM_BIT)
79#define CPU_TSS (1 << CPU_TSS_BIT)
80#define CPU_CR (1 << CPU_CR_BIT)
81#define CPU_DT (1 << CPU_DT_BIT)
82
83/* Register file flags */
84enum cpu_file_bit {
85 CPU_INDEX_BIT, /* index */
86 CPU_VALUE_BIT, /* value */
87};
88
89#define CPU_FILE_VALUE (1 << CPU_VALUE_BIT)
90
91/*
92 * DisplayFamily_DisplayModel Processor Families/Processor Number Series
93 * -------------------------- ------------------------------------------
94 * 05_01, 05_02, 05_04 Pentium, Pentium with MMX
95 *
96 * 06_01 Pentium Pro
97 * 06_03, 06_05 Pentium II Xeon, Pentium II
98 * 06_07, 06_08, 06_0A, 06_0B Pentium III Xeon, Pentum III
99 *
100 * 06_09, 060D Pentium M
101 *
102 * 06_0E Core Duo, Core Solo
103 *
104 * 06_0F Xeon 3000, 3200, 5100, 5300, 7300 series,
105 * Core 2 Quad, Core 2 Extreme, Core 2 Duo,
106 * Pentium dual-core
107 * 06_17 Xeon 5200, 5400 series, Core 2 Quad Q9650
108 *
109 * 06_1C Atom
110 *
111 * 0F_00, 0F_01, 0F_02 Xeon, Xeon MP, Pentium 4
112 * 0F_03, 0F_04 Xeon, Xeon MP, Pentium 4, Pentium D
113 *
114 * 0F_06 Xeon 7100, 5000 Series, Xeon MP,
115 * Pentium 4, Pentium D
116 */
117
118/* Register processors bits */
119enum cpu_processor_bit {
120 CPU_NONE,
121/* Intel */
122 CPU_INTEL_PENTIUM_BIT,
123 CPU_INTEL_P6_BIT,
124 CPU_INTEL_PENTIUM_M_BIT,
125 CPU_INTEL_CORE_BIT,
126 CPU_INTEL_CORE2_BIT,
127 CPU_INTEL_ATOM_BIT,
128 CPU_INTEL_XEON_P4_BIT,
129 CPU_INTEL_XEON_MP_BIT,
130/* AMD */
131 CPU_AMD_K6_BIT,
132 CPU_AMD_K7_BIT,
133 CPU_AMD_K8_BIT,
134 CPU_AMD_0F_BIT,
135 CPU_AMD_10_BIT,
136 CPU_AMD_11_BIT,
137};
138
139#define CPU_INTEL_PENTIUM (1 << CPU_INTEL_PENTIUM_BIT)
140#define CPU_INTEL_P6 (1 << CPU_INTEL_P6_BIT)
141#define CPU_INTEL_PENTIUM_M (1 << CPU_INTEL_PENTIUM_M_BIT)
142#define CPU_INTEL_CORE (1 << CPU_INTEL_CORE_BIT)
143#define CPU_INTEL_CORE2 (1 << CPU_INTEL_CORE2_BIT)
144#define CPU_INTEL_ATOM (1 << CPU_INTEL_ATOM_BIT)
145#define CPU_INTEL_XEON_P4 (1 << CPU_INTEL_XEON_P4_BIT)
146#define CPU_INTEL_XEON_MP (1 << CPU_INTEL_XEON_MP_BIT)
147
148#define CPU_INTEL_PX (CPU_INTEL_P6 | CPU_INTEL_PENTIUM_M)
149#define CPU_INTEL_COREX (CPU_INTEL_CORE | CPU_INTEL_CORE2)
150#define CPU_INTEL_XEON (CPU_INTEL_XEON_P4 | CPU_INTEL_XEON_MP)
151#define CPU_CO_AT (CPU_INTEL_CORE | CPU_INTEL_ATOM)
152#define CPU_C2_AT (CPU_INTEL_CORE2 | CPU_INTEL_ATOM)
153#define CPU_CX_AT (CPU_INTEL_COREX | CPU_INTEL_ATOM)
154#define CPU_CX_XE (CPU_INTEL_COREX | CPU_INTEL_XEON)
155#define CPU_P6_XE (CPU_INTEL_P6 | CPU_INTEL_XEON)
156#define CPU_PM_CO_AT (CPU_INTEL_PENTIUM_M | CPU_CO_AT)
157#define CPU_C2_AT_XE (CPU_C2_AT | CPU_INTEL_XEON)
158#define CPU_CX_AT_XE (CPU_CX_AT | CPU_INTEL_XEON)
159#define CPU_P6_CX_AT (CPU_INTEL_P6 | CPU_CX_AT)
160#define CPU_P6_CX_XE (CPU_P6_XE | CPU_INTEL_COREX)
161#define CPU_P6_CX_AT_XE (CPU_INTEL_P6 | CPU_CX_AT_XE)
162#define CPU_PM_CX_AT_XE (CPU_INTEL_PENTIUM_M | CPU_CX_AT_XE)
163#define CPU_PM_CX_AT (CPU_INTEL_PENTIUM_M | CPU_CX_AT)
164#define CPU_PM_CX_XE (CPU_INTEL_PENTIUM_M | CPU_CX_XE)
165#define CPU_PX_CX_AT (CPU_INTEL_PX | CPU_CX_AT)
166#define CPU_PX_CX_AT_XE (CPU_INTEL_PX | CPU_CX_AT_XE)
167
168/* Select all supported Intel CPUs */
169#define CPU_INTEL_ALL (CPU_INTEL_PENTIUM | CPU_PX_CX_AT_XE)
170
171#define CPU_AMD_K6 (1 << CPU_AMD_K6_BIT)
172#define CPU_AMD_K7 (1 << CPU_AMD_K7_BIT)
173#define CPU_AMD_K8 (1 << CPU_AMD_K8_BIT)
174#define CPU_AMD_0F (1 << CPU_AMD_0F_BIT)
175#define CPU_AMD_10 (1 << CPU_AMD_10_BIT)
176#define CPU_AMD_11 (1 << CPU_AMD_11_BIT)
177
178#define CPU_K10_PLUS (CPU_AMD_10 | CPU_AMD_11)
179#define CPU_K0F_PLUS (CPU_AMD_0F | CPU_K10_PLUS)
180#define CPU_K8_PLUS (CPU_AMD_K8 | CPU_K0F_PLUS)
181#define CPU_K7_PLUS (CPU_AMD_K7 | CPU_K8_PLUS)
182
183/* Select all supported AMD CPUs */
184#define CPU_AMD_ALL (CPU_AMD_K6 | CPU_K7_PLUS)
185
186/* Select all supported CPUs */
187#define CPU_ALL (CPU_INTEL_ALL | CPU_AMD_ALL)
188
189#define MAX_CPU_FILES 512
190
191struct cpu_private {
192 unsigned cpu;
193 unsigned type;
194 unsigned reg;
195 unsigned file;
196};
197
198struct cpu_debug_base {
199 char *name; /* Register name */
200 unsigned flag; /* Register flag */
201 unsigned write; /* Register write flag */
202};
203
204/*
205 * Currently it looks similar to cpu_debug_base but once we add more files
206 * cpu_file_base will go in different direction
207 */
208struct cpu_file_base {
209 char *name; /* Register file name */
210 unsigned flag; /* Register file flag */
211 unsigned write; /* Register write flag */
212};
213
214struct cpu_cpuX_base {
215 struct dentry *dentry; /* Register dentry */
216 int init; /* Register index file */
217};
218
219struct cpu_debug_range {
220 unsigned min; /* Register range min */
221 unsigned max; /* Register range max */
222 unsigned flag; /* Supported flags */
223 unsigned model; /* Supported models */
224};
225
226#endif /* _ASM_X86_CPU_DEBUG_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 7301e60dc4a8..0beba0d1468d 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -213,6 +213,7 @@ extern const char * const x86_power_flags[32];
213#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) 213#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
214#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) 214#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
215#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) 215#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
216#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
216#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) 217#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
217#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) 218#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
218#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) 219#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h
new file mode 100644
index 000000000000..a7f3c75f8ad7
--- /dev/null
+++ b/arch/x86/include/asm/cpumask.h
@@ -0,0 +1,32 @@
1#ifndef _ASM_X86_CPUMASK_H
2#define _ASM_X86_CPUMASK_H
3#ifndef __ASSEMBLY__
4#include <linux/cpumask.h>
5
6#ifdef CONFIG_X86_64
7
8extern cpumask_var_t cpu_callin_mask;
9extern cpumask_var_t cpu_callout_mask;
10extern cpumask_var_t cpu_initialized_mask;
11extern cpumask_var_t cpu_sibling_setup_mask;
12
13extern void setup_cpu_local_masks(void);
14
15#else /* CONFIG_X86_32 */
16
17extern cpumask_t cpu_callin_map;
18extern cpumask_t cpu_callout_map;
19extern cpumask_t cpu_initialized;
20extern cpumask_t cpu_sibling_setup_map;
21
22#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
23#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
24#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
25#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
26
27static inline void setup_cpu_local_masks(void) { }
28
29#endif /* CONFIG_X86_32 */
30
31#endif /* __ASSEMBLY__ */
32#endif /* _ASM_X86_CPUMASK_H */
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index 0930b4f8d672..c68c361697e1 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -1,39 +1,21 @@
1#ifndef _ASM_X86_CURRENT_H 1#ifndef _ASM_X86_CURRENT_H
2#define _ASM_X86_CURRENT_H 2#define _ASM_X86_CURRENT_H
3 3
4#ifdef CONFIG_X86_32
5#include <linux/compiler.h> 4#include <linux/compiler.h>
6#include <asm/percpu.h> 5#include <asm/percpu.h>
7 6
7#ifndef __ASSEMBLY__
8struct task_struct; 8struct task_struct;
9 9
10DECLARE_PER_CPU(struct task_struct *, current_task); 10DECLARE_PER_CPU(struct task_struct *, current_task);
11static __always_inline struct task_struct *get_current(void)
12{
13 return x86_read_percpu(current_task);
14}
15
16#else /* X86_32 */
17
18#ifndef __ASSEMBLY__
19#include <asm/pda.h>
20
21struct task_struct;
22 11
23static __always_inline struct task_struct *get_current(void) 12static __always_inline struct task_struct *get_current(void)
24{ 13{
25 return read_pda(pcurrent); 14 return percpu_read(current_task);
26} 15}
27 16
28#else /* __ASSEMBLY__ */ 17#define current get_current()
29
30#include <asm/asm-offsets.h>
31#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
32 18
33#endif /* __ASSEMBLY__ */ 19#endif /* __ASSEMBLY__ */
34 20
35#endif /* X86_32 */
36
37#define current get_current()
38
39#endif /* _ASM_X86_CURRENT_H */ 21#endif /* _ASM_X86_CURRENT_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index dc27705f5443..5623c50d67b2 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -91,7 +91,6 @@ static inline int desc_empty(const void *ptr)
91#define store_gdt(dtr) native_store_gdt(dtr) 91#define store_gdt(dtr) native_store_gdt(dtr)
92#define store_idt(dtr) native_store_idt(dtr) 92#define store_idt(dtr) native_store_idt(dtr)
93#define store_tr(tr) (tr = native_store_tr()) 93#define store_tr(tr) (tr = native_store_tr())
94#define store_ldt(ldt) asm("sldt %0":"=m" (ldt))
95 94
96#define load_TLS(t, cpu) native_load_tls(t, cpu) 95#define load_TLS(t, cpu) native_load_tls(t, cpu)
97#define set_ldt native_set_ldt 96#define set_ldt native_set_ldt
@@ -112,6 +111,8 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
112} 111}
113#endif /* CONFIG_PARAVIRT */ 112#endif /* CONFIG_PARAVIRT */
114 113
114#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
115
115static inline void native_write_idt_entry(gate_desc *idt, int entry, 116static inline void native_write_idt_entry(gate_desc *idt, int entry,
116 const gate_desc *gate) 117 const gate_desc *gate)
117{ 118{
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index 3c034f48fdb0..4994a20acbcb 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -6,7 +6,7 @@ struct dev_archdata {
6 void *acpi_handle; 6 void *acpi_handle;
7#endif 7#endif
8#ifdef CONFIG_X86_64 8#ifdef CONFIG_X86_64
9struct dma_mapping_ops *dma_ops; 9struct dma_map_ops *dma_ops;
10#endif 10#endif
11#ifdef CONFIG_DMAR 11#ifdef CONFIG_DMAR
12 void *iommu; /* hook for IOMMU specific extension */ 12 void *iommu; /* hook for IOMMU specific extension */
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 132a134d12f2..cea7b74963e9 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -7,6 +7,8 @@
7 */ 7 */
8 8
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <linux/dma-debug.h>
11#include <linux/dma-attrs.h>
10#include <asm/io.h> 12#include <asm/io.h>
11#include <asm/swiotlb.h> 13#include <asm/swiotlb.h>
12#include <asm-generic/dma-coherent.h> 14#include <asm-generic/dma-coherent.h>
@@ -16,47 +18,9 @@ extern int iommu_merge;
16extern struct device x86_dma_fallback_dev; 18extern struct device x86_dma_fallback_dev;
17extern int panic_on_overflow; 19extern int panic_on_overflow;
18 20
19struct dma_mapping_ops { 21extern struct dma_map_ops *dma_ops;
20 int (*mapping_error)(struct device *dev, 22
21 dma_addr_t dma_addr); 23static inline struct dma_map_ops *get_dma_ops(struct device *dev)
22 void* (*alloc_coherent)(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, gfp_t gfp);
24 void (*free_coherent)(struct device *dev, size_t size,
25 void *vaddr, dma_addr_t dma_handle);
26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
27 size_t size, int direction);
28 void (*unmap_single)(struct device *dev, dma_addr_t addr,
29 size_t size, int direction);
30 void (*sync_single_for_cpu)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
32 int direction);
33 void (*sync_single_for_device)(struct device *hwdev,
34 dma_addr_t dma_handle, size_t size,
35 int direction);
36 void (*sync_single_range_for_cpu)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_single_range_for_device)(struct device *hwdev,
40 dma_addr_t dma_handle, unsigned long offset,
41 size_t size, int direction);
42 void (*sync_sg_for_cpu)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
44 int direction);
45 void (*sync_sg_for_device)(struct device *hwdev,
46 struct scatterlist *sg, int nelems,
47 int direction);
48 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
49 int nents, int direction);
50 void (*unmap_sg)(struct device *hwdev,
51 struct scatterlist *sg, int nents,
52 int direction);
53 int (*dma_supported)(struct device *hwdev, u64 mask);
54 int is_phys;
55};
56
57extern struct dma_mapping_ops *dma_ops;
58
59static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
60{ 24{
61#ifdef CONFIG_X86_32 25#ifdef CONFIG_X86_32
62 return dma_ops; 26 return dma_ops;
@@ -71,7 +35,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
71/* Make sure we keep the same behaviour */ 35/* Make sure we keep the same behaviour */
72static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 36static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
73{ 37{
74 struct dma_mapping_ops *ops = get_dma_ops(dev); 38 struct dma_map_ops *ops = get_dma_ops(dev);
75 if (ops->mapping_error) 39 if (ops->mapping_error)
76 return ops->mapping_error(dev, dma_addr); 40 return ops->mapping_error(dev, dma_addr);
77 41
@@ -90,137 +54,167 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
90 54
91static inline dma_addr_t 55static inline dma_addr_t
92dma_map_single(struct device *hwdev, void *ptr, size_t size, 56dma_map_single(struct device *hwdev, void *ptr, size_t size,
93 int direction) 57 enum dma_data_direction dir)
94{ 58{
95 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 59 struct dma_map_ops *ops = get_dma_ops(hwdev);
96 60 dma_addr_t addr;
97 BUG_ON(!valid_dma_direction(direction)); 61
98 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); 62 BUG_ON(!valid_dma_direction(dir));
63 addr = ops->map_page(hwdev, virt_to_page(ptr),
64 (unsigned long)ptr & ~PAGE_MASK, size,
65 dir, NULL);
66 debug_dma_map_page(hwdev, virt_to_page(ptr),
67 (unsigned long)ptr & ~PAGE_MASK, size,
68 dir, addr, true);
69 return addr;
99} 70}
100 71
101static inline void 72static inline void
102dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 73dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
103 int direction) 74 enum dma_data_direction dir)
104{ 75{
105 struct dma_mapping_ops *ops = get_dma_ops(dev); 76 struct dma_map_ops *ops = get_dma_ops(dev);
106 77
107 BUG_ON(!valid_dma_direction(direction)); 78 BUG_ON(!valid_dma_direction(dir));
108 if (ops->unmap_single) 79 if (ops->unmap_page)
109 ops->unmap_single(dev, addr, size, direction); 80 ops->unmap_page(dev, addr, size, dir, NULL);
81 debug_dma_unmap_page(dev, addr, size, dir, true);
110} 82}
111 83
112static inline int 84static inline int
113dma_map_sg(struct device *hwdev, struct scatterlist *sg, 85dma_map_sg(struct device *hwdev, struct scatterlist *sg,
114 int nents, int direction) 86 int nents, enum dma_data_direction dir)
115{ 87{
116 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 88 struct dma_map_ops *ops = get_dma_ops(hwdev);
89 int ents;
90
91 BUG_ON(!valid_dma_direction(dir));
92 ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
93 debug_dma_map_sg(hwdev, sg, nents, ents, dir);
117 94
118 BUG_ON(!valid_dma_direction(direction)); 95 return ents;
119 return ops->map_sg(hwdev, sg, nents, direction);
120} 96}
121 97
122static inline void 98static inline void
123dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 99dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
124 int direction) 100 enum dma_data_direction dir)
125{ 101{
126 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 102 struct dma_map_ops *ops = get_dma_ops(hwdev);
127 103
128 BUG_ON(!valid_dma_direction(direction)); 104 BUG_ON(!valid_dma_direction(dir));
105 debug_dma_unmap_sg(hwdev, sg, nents, dir);
129 if (ops->unmap_sg) 106 if (ops->unmap_sg)
130 ops->unmap_sg(hwdev, sg, nents, direction); 107 ops->unmap_sg(hwdev, sg, nents, dir, NULL);
131} 108}
132 109
133static inline void 110static inline void
134dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 111dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
135 size_t size, int direction) 112 size_t size, enum dma_data_direction dir)
136{ 113{
137 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 114 struct dma_map_ops *ops = get_dma_ops(hwdev);
138 115
139 BUG_ON(!valid_dma_direction(direction)); 116 BUG_ON(!valid_dma_direction(dir));
140 if (ops->sync_single_for_cpu) 117 if (ops->sync_single_for_cpu)
141 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); 118 ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
119 debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir);
142 flush_write_buffers(); 120 flush_write_buffers();
143} 121}
144 122
145static inline void 123static inline void
146dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, 124dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
147 size_t size, int direction) 125 size_t size, enum dma_data_direction dir)
148{ 126{
149 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 127 struct dma_map_ops *ops = get_dma_ops(hwdev);
150 128
151 BUG_ON(!valid_dma_direction(direction)); 129 BUG_ON(!valid_dma_direction(dir));
152 if (ops->sync_single_for_device) 130 if (ops->sync_single_for_device)
153 ops->sync_single_for_device(hwdev, dma_handle, size, direction); 131 ops->sync_single_for_device(hwdev, dma_handle, size, dir);
132 debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir);
154 flush_write_buffers(); 133 flush_write_buffers();
155} 134}
156 135
157static inline void 136static inline void
158dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 137dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
159 unsigned long offset, size_t size, int direction) 138 unsigned long offset, size_t size,
139 enum dma_data_direction dir)
160{ 140{
161 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 141 struct dma_map_ops *ops = get_dma_ops(hwdev);
162 142
163 BUG_ON(!valid_dma_direction(direction)); 143 BUG_ON(!valid_dma_direction(dir));
164 if (ops->sync_single_range_for_cpu) 144 if (ops->sync_single_range_for_cpu)
165 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, 145 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
166 size, direction); 146 size, dir);
147 debug_dma_sync_single_range_for_cpu(hwdev, dma_handle,
148 offset, size, dir);
167 flush_write_buffers(); 149 flush_write_buffers();
168} 150}
169 151
170static inline void 152static inline void
171dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, 153dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
172 unsigned long offset, size_t size, 154 unsigned long offset, size_t size,
173 int direction) 155 enum dma_data_direction dir)
174{ 156{
175 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 157 struct dma_map_ops *ops = get_dma_ops(hwdev);
176 158
177 BUG_ON(!valid_dma_direction(direction)); 159 BUG_ON(!valid_dma_direction(dir));
178 if (ops->sync_single_range_for_device) 160 if (ops->sync_single_range_for_device)
179 ops->sync_single_range_for_device(hwdev, dma_handle, 161 ops->sync_single_range_for_device(hwdev, dma_handle,
180 offset, size, direction); 162 offset, size, dir);
163 debug_dma_sync_single_range_for_device(hwdev, dma_handle,
164 offset, size, dir);
181 flush_write_buffers(); 165 flush_write_buffers();
182} 166}
183 167
184static inline void 168static inline void
185dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 169dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
186 int nelems, int direction) 170 int nelems, enum dma_data_direction dir)
187{ 171{
188 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 172 struct dma_map_ops *ops = get_dma_ops(hwdev);
189 173
190 BUG_ON(!valid_dma_direction(direction)); 174 BUG_ON(!valid_dma_direction(dir));
191 if (ops->sync_sg_for_cpu) 175 if (ops->sync_sg_for_cpu)
192 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); 176 ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
177 debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir);
193 flush_write_buffers(); 178 flush_write_buffers();
194} 179}
195 180
196static inline void 181static inline void
197dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 182dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
198 int nelems, int direction) 183 int nelems, enum dma_data_direction dir)
199{ 184{
200 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 185 struct dma_map_ops *ops = get_dma_ops(hwdev);
201 186
202 BUG_ON(!valid_dma_direction(direction)); 187 BUG_ON(!valid_dma_direction(dir));
203 if (ops->sync_sg_for_device) 188 if (ops->sync_sg_for_device)
204 ops->sync_sg_for_device(hwdev, sg, nelems, direction); 189 ops->sync_sg_for_device(hwdev, sg, nelems, dir);
190 debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir);
205 191
206 flush_write_buffers(); 192 flush_write_buffers();
207} 193}
208 194
209static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 195static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
210 size_t offset, size_t size, 196 size_t offset, size_t size,
211 int direction) 197 enum dma_data_direction dir)
212{ 198{
213 struct dma_mapping_ops *ops = get_dma_ops(dev); 199 struct dma_map_ops *ops = get_dma_ops(dev);
200 dma_addr_t addr;
214 201
215 BUG_ON(!valid_dma_direction(direction)); 202 BUG_ON(!valid_dma_direction(dir));
216 return ops->map_single(dev, page_to_phys(page) + offset, 203 addr = ops->map_page(dev, page, offset, size, dir, NULL);
217 size, direction); 204 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
205
206 return addr;
218} 207}
219 208
220static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, 209static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
221 size_t size, int direction) 210 size_t size, enum dma_data_direction dir)
222{ 211{
223 dma_unmap_single(dev, addr, size, direction); 212 struct dma_map_ops *ops = get_dma_ops(dev);
213
214 BUG_ON(!valid_dma_direction(dir));
215 if (ops->unmap_page)
216 ops->unmap_page(dev, addr, size, dir, NULL);
217 debug_dma_unmap_page(dev, addr, size, dir, false);
224} 218}
225 219
226static inline void 220static inline void
@@ -266,7 +260,7 @@ static inline void *
266dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 260dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
267 gfp_t gfp) 261 gfp_t gfp)
268{ 262{
269 struct dma_mapping_ops *ops = get_dma_ops(dev); 263 struct dma_map_ops *ops = get_dma_ops(dev);
270 void *memory; 264 void *memory;
271 265
272 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 266 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
@@ -285,20 +279,24 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
285 if (!ops->alloc_coherent) 279 if (!ops->alloc_coherent)
286 return NULL; 280 return NULL;
287 281
288 return ops->alloc_coherent(dev, size, dma_handle, 282 memory = ops->alloc_coherent(dev, size, dma_handle,
289 dma_alloc_coherent_gfp_flags(dev, gfp)); 283 dma_alloc_coherent_gfp_flags(dev, gfp));
284 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
285
286 return memory;
290} 287}
291 288
292static inline void dma_free_coherent(struct device *dev, size_t size, 289static inline void dma_free_coherent(struct device *dev, size_t size,
293 void *vaddr, dma_addr_t bus) 290 void *vaddr, dma_addr_t bus)
294{ 291{
295 struct dma_mapping_ops *ops = get_dma_ops(dev); 292 struct dma_map_ops *ops = get_dma_ops(dev);
296 293
297 WARN_ON(irqs_disabled()); /* for portability */ 294 WARN_ON(irqs_disabled()); /* for portability */
298 295
299 if (dma_release_from_coherent(dev, get_order(size), vaddr)) 296 if (dma_release_from_coherent(dev, get_order(size), vaddr))
300 return; 297 return;
301 298
299 debug_dma_free_coherent(dev, size, vaddr, bus);
302 if (ops->free_coherent) 300 if (ops->free_coherent)
303 ops->free_coherent(dev, size, vaddr, bus); 301 ops->free_coherent(dev, size, vaddr, bus);
304} 302}
diff --git a/arch/x86/include/asm/dmi.h b/arch/x86/include/asm/dmi.h
index bc68212c6bc0..fd8f9e2ca35f 100644
--- a/arch/x86/include/asm/dmi.h
+++ b/arch/x86/include/asm/dmi.h
@@ -1,22 +1,15 @@
1#ifndef _ASM_X86_DMI_H 1#ifndef _ASM_X86_DMI_H
2#define _ASM_X86_DMI_H 2#define _ASM_X86_DMI_H
3 3
4#include <asm/io.h> 4#include <linux/compiler.h>
5 5#include <linux/init.h>
6#define DMI_MAX_DATA 2048
7 6
8extern int dmi_alloc_index; 7#include <asm/io.h>
9extern char dmi_alloc_data[DMI_MAX_DATA]; 8#include <asm/setup.h>
10 9
11/* This is so early that there is no good way to allocate dynamic memory. 10static __always_inline __init void *dmi_alloc(unsigned len)
12 Allocate data in an BSS array. */
13static inline void *dmi_alloc(unsigned len)
14{ 11{
15 int idx = dmi_alloc_index; 12 return extend_brk(len, sizeof(int));
16 if ((dmi_alloc_index + len) > DMI_MAX_DATA)
17 return NULL;
18 dmi_alloc_index += len;
19 return dmi_alloc_data + idx;
20} 13}
21 14
22/* Use early IO mappings for DMI because it's initialized early */ 15/* Use early IO mappings for DMI because it's initialized early */
diff --git a/arch/x86/include/asm/mach-default/do_timer.h b/arch/x86/include/asm/do_timer.h
index 23ecda0b28a0..23ecda0b28a0 100644
--- a/arch/x86/include/asm/mach-default/do_timer.h
+++ b/arch/x86/include/asm/do_timer.h
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 00d41ce4c844..7ecba4d85089 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -72,7 +72,7 @@ extern int e820_all_mapped(u64 start, u64 end, unsigned type);
72extern void e820_add_region(u64 start, u64 size, int type); 72extern void e820_add_region(u64 start, u64 size, int type);
73extern void e820_print_map(char *who); 73extern void e820_print_map(char *who);
74extern int 74extern int
75sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, int *pnr_map); 75sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map);
76extern u64 e820_update_range(u64 start, u64 size, unsigned old_type, 76extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
77 unsigned new_type); 77 unsigned new_type);
78extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type, 78extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index f51a3ddde01a..83c1bc8d2e8a 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -112,7 +112,7 @@ extern unsigned int vdso_enabled;
112 * now struct_user_regs, they are different) 112 * now struct_user_regs, they are different)
113 */ 113 */
114 114
115#define ELF_CORE_COPY_REGS(pr_reg, regs) \ 115#define ELF_CORE_COPY_REGS_COMMON(pr_reg, regs) \
116do { \ 116do { \
117 pr_reg[0] = regs->bx; \ 117 pr_reg[0] = regs->bx; \
118 pr_reg[1] = regs->cx; \ 118 pr_reg[1] = regs->cx; \
@@ -124,7 +124,6 @@ do { \
124 pr_reg[7] = regs->ds & 0xffff; \ 124 pr_reg[7] = regs->ds & 0xffff; \
125 pr_reg[8] = regs->es & 0xffff; \ 125 pr_reg[8] = regs->es & 0xffff; \
126 pr_reg[9] = regs->fs & 0xffff; \ 126 pr_reg[9] = regs->fs & 0xffff; \
127 savesegment(gs, pr_reg[10]); \
128 pr_reg[11] = regs->orig_ax; \ 127 pr_reg[11] = regs->orig_ax; \
129 pr_reg[12] = regs->ip; \ 128 pr_reg[12] = regs->ip; \
130 pr_reg[13] = regs->cs & 0xffff; \ 129 pr_reg[13] = regs->cs & 0xffff; \
@@ -133,6 +132,18 @@ do { \
133 pr_reg[16] = regs->ss & 0xffff; \ 132 pr_reg[16] = regs->ss & 0xffff; \
134} while (0); 133} while (0);
135 134
135#define ELF_CORE_COPY_REGS(pr_reg, regs) \
136do { \
137 ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
138 pr_reg[10] = get_user_gs(regs); \
139} while (0);
140
141#define ELF_CORE_COPY_KERNEL_REGS(pr_reg, regs) \
142do { \
143 ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
144 savesegment(gs, pr_reg[10]); \
145} while (0);
146
136#define ELF_PLATFORM (utsname()->machine) 147#define ELF_PLATFORM (utsname()->machine)
137#define set_personality_64bit() do { } while (0) 148#define set_personality_64bit() do { } while (0)
138 149
diff --git a/arch/x86/include/asm/mach-default/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 6b1add8e31dd..c2e6bedaf258 100644
--- a/arch/x86/include/asm/mach-default/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -9,14 +9,32 @@
9 * is no hardware IRQ pin equivalent for them, they are triggered 9 * is no hardware IRQ pin equivalent for them, they are triggered
10 * through the ICC by us (IPIs) 10 * through the ICC by us (IPIs)
11 */ 11 */
12#ifdef CONFIG_X86_SMP 12#ifdef CONFIG_SMP
13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) 13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
14BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
15BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) 14BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
16BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) 15BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
17BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) 16BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
17
18BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0,
19 smp_invalidate_interrupt)
20BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1,
21 smp_invalidate_interrupt)
22BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
23 smp_invalidate_interrupt)
24BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
25 smp_invalidate_interrupt)
26BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
27 smp_invalidate_interrupt)
28BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5,
29 smp_invalidate_interrupt)
30BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6,
31 smp_invalidate_interrupt)
32BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
33 smp_invalidate_interrupt)
18#endif 34#endif
19 35
36BUILD_INTERRUPT(generic_interrupt, GENERIC_INTERRUPT_VECTOR)
37
20/* 38/*
21 * every pentium local APIC has two 'local interrupts', with a 39 * every pentium local APIC has two 'local interrupts', with a
22 * soft-definable vector attached to both interrupts, one of 40 * soft-definable vector attached to both interrupts, one of
@@ -25,10 +43,15 @@ BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
25 * a much simpler SMP time architecture: 43 * a much simpler SMP time architecture:
26 */ 44 */
27#ifdef CONFIG_X86_LOCAL_APIC 45#ifdef CONFIG_X86_LOCAL_APIC
46
28BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) 47BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
29BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) 48BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
30BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) 49BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
31 50
51#ifdef CONFIG_PERF_COUNTERS
52BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR)
53#endif
54
32#ifdef CONFIG_X86_MCE_P4THERMAL 55#ifdef CONFIG_X86_MCE_P4THERMAL
33BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) 56BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
34#endif 57#endif
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
deleted file mode 100644
index c58b9cc74465..000000000000
--- a/arch/x86/include/asm/es7000/apic.h
+++ /dev/null
@@ -1,242 +0,0 @@
1#ifndef __ASM_ES7000_APIC_H
2#define __ASM_ES7000_APIC_H
3
4#include <linux/gfp.h>
5
6#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
7#define esr_disable (1)
8
9static inline int apic_id_registered(void)
10{
11 return (1);
12}
13
14static inline const cpumask_t *target_cpus_cluster(void)
15{
16 return &CPU_MASK_ALL;
17}
18
19static inline const cpumask_t *target_cpus(void)
20{
21 return &cpumask_of_cpu(smp_processor_id());
22}
23
24#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
25#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
26#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
27#define NO_BALANCE_IRQ_CLUSTER (1)
28
29#define APIC_DFR_VALUE (APIC_DFR_FLAT)
30#define INT_DELIVERY_MODE (dest_Fixed)
31#define INT_DEST_MODE (0) /* phys delivery to target procs */
32#define NO_BALANCE_IRQ (0)
33#undef APIC_DEST_LOGICAL
34#define APIC_DEST_LOGICAL 0x0
35
36static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
37{
38 return 0;
39}
40static inline unsigned long check_apicid_present(int bit)
41{
42 return physid_isset(bit, phys_cpu_present_map);
43}
44
45#define apicid_cluster(apicid) (apicid & 0xF0)
46
47static inline unsigned long calculate_ldr(int cpu)
48{
49 unsigned long id;
50 id = xapic_phys_to_log_apicid(cpu);
51 return (SET_APIC_LOGICAL_ID(id));
52}
53
54/*
55 * Set up the logical destination ID.
56 *
57 * Intel recommends to set DFR, LdR and TPR before enabling
58 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
59 * document number 292116). So here it goes...
60 */
61static inline void init_apic_ldr_cluster(void)
62{
63 unsigned long val;
64 int cpu = smp_processor_id();
65
66 apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
67 val = calculate_ldr(cpu);
68 apic_write(APIC_LDR, val);
69}
70
71static inline void init_apic_ldr(void)
72{
73 unsigned long val;
74 int cpu = smp_processor_id();
75
76 apic_write(APIC_DFR, APIC_DFR_VALUE);
77 val = calculate_ldr(cpu);
78 apic_write(APIC_LDR, val);
79}
80
81extern int apic_version [MAX_APICS];
82static inline void setup_apic_routing(void)
83{
84 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
85 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
86 (apic_version[apic] == 0x14) ?
87 "Physical Cluster" : "Logical Cluster",
88 nr_ioapics, cpus_addr(*target_cpus())[0]);
89}
90
91static inline int multi_timer_check(int apic, int irq)
92{
93 return 0;
94}
95
96static inline int apicid_to_node(int logical_apicid)
97{
98 return 0;
99}
100
101
102static inline int cpu_present_to_apicid(int mps_cpu)
103{
104 if (!mps_cpu)
105 return boot_cpu_physical_apicid;
106 else if (mps_cpu < nr_cpu_ids)
107 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
108 else
109 return BAD_APICID;
110}
111
112static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
113{
114 static int id = 0;
115 physid_mask_t mask;
116 mask = physid_mask_of_physid(id);
117 ++id;
118 return mask;
119}
120
121extern u8 cpu_2_logical_apicid[];
122/* Mapping from cpu number to logical apicid */
123static inline int cpu_to_logical_apicid(int cpu)
124{
125#ifdef CONFIG_SMP
126 if (cpu >= nr_cpu_ids)
127 return BAD_APICID;
128 return (int)cpu_2_logical_apicid[cpu];
129#else
130 return logical_smp_processor_id();
131#endif
132}
133
134static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
135{
136 /* For clustered we don't have a good way to do this yet - hack */
137 return physids_promote(0xff);
138}
139
140
141static inline void setup_portio_remap(void)
142{
143}
144
145extern unsigned int boot_cpu_physical_apicid;
146static inline int check_phys_apicid_present(int cpu_physical_apicid)
147{
148 boot_cpu_physical_apicid = read_apic_id();
149 return (1);
150}
151
152static inline unsigned int
153cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
154{
155 int num_bits_set;
156 int cpus_found = 0;
157 int cpu;
158 int apicid;
159
160 num_bits_set = cpumask_weight(cpumask);
161 /* Return id to all */
162 if (num_bits_set == nr_cpu_ids)
163 return 0xFF;
164 /*
165 * The cpus in the mask must all be on the apic cluster. If are not
166 * on the same apicid cluster return default value of TARGET_CPUS.
167 */
168 cpu = cpumask_first(cpumask);
169 apicid = cpu_to_logical_apicid(cpu);
170 while (cpus_found < num_bits_set) {
171 if (cpumask_test_cpu(cpu, cpumask)) {
172 int new_apicid = cpu_to_logical_apicid(cpu);
173 if (apicid_cluster(apicid) !=
174 apicid_cluster(new_apicid)){
175 printk ("%s: Not a valid mask!\n", __func__);
176 return 0xFF;
177 }
178 apicid = new_apicid;
179 cpus_found++;
180 }
181 cpu++;
182 }
183 return apicid;
184}
185
186static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
187{
188 int num_bits_set;
189 int cpus_found = 0;
190 int cpu;
191 int apicid;
192
193 num_bits_set = cpus_weight(*cpumask);
194 /* Return id to all */
195 if (num_bits_set == nr_cpu_ids)
196 return cpu_to_logical_apicid(0);
197 /*
198 * The cpus in the mask must all be on the apic cluster. If are not
199 * on the same apicid cluster return default value of TARGET_CPUS.
200 */
201 cpu = first_cpu(*cpumask);
202 apicid = cpu_to_logical_apicid(cpu);
203 while (cpus_found < num_bits_set) {
204 if (cpu_isset(cpu, *cpumask)) {
205 int new_apicid = cpu_to_logical_apicid(cpu);
206 if (apicid_cluster(apicid) !=
207 apicid_cluster(new_apicid)){
208 printk ("%s: Not a valid mask!\n", __func__);
209 return cpu_to_logical_apicid(0);
210 }
211 apicid = new_apicid;
212 cpus_found++;
213 }
214 cpu++;
215 }
216 return apicid;
217}
218
219
220static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
221 const struct cpumask *andmask)
222{
223 int apicid = cpu_to_logical_apicid(0);
224 cpumask_var_t cpumask;
225
226 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
227 return apicid;
228
229 cpumask_and(cpumask, inmask, andmask);
230 cpumask_and(cpumask, cpumask, cpu_online_mask);
231 apicid = cpu_mask_to_apicid(cpumask);
232
233 free_cpumask_var(cpumask);
234 return apicid;
235}
236
237static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
238{
239 return cpuid_apic >> index_msb;
240}
241
242#endif /* __ASM_ES7000_APIC_H */
diff --git a/arch/x86/include/asm/es7000/apicdef.h b/arch/x86/include/asm/es7000/apicdef.h
deleted file mode 100644
index 8b234a3cb851..000000000000
--- a/arch/x86/include/asm/es7000/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_ES7000_APICDEF_H
2#define __ASM_ES7000_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h
deleted file mode 100644
index 7e8ed24d4b8a..000000000000
--- a/arch/x86/include/asm/es7000/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef __ASM_ES7000_IPI_H
2#define __ASM_ES7000_IPI_H
3
4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
6
7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 send_IPI_mask_allbutself(cpu_online_mask, vector);
15}
16
17static inline void send_IPI_all(int vector)
18{
19 send_IPI_mask(cpu_online_mask, vector);
20}
21
22#endif /* __ASM_ES7000_IPI_H */
diff --git a/arch/x86/include/asm/es7000/mpparse.h b/arch/x86/include/asm/es7000/mpparse.h
deleted file mode 100644
index c1629b090ec2..000000000000
--- a/arch/x86/include/asm/es7000/mpparse.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __ASM_ES7000_MPPARSE_H
2#define __ASM_ES7000_MPPARSE_H
3
4#include <linux/acpi.h>
5
6extern int parse_unisys_oem (char *oemptr);
7extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
8extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
9extern void setup_unisys(void);
10
11#ifndef CONFIG_X86_GENERICARCH
12extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
13extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid);
14#endif
15
16#ifdef CONFIG_ACPI
17
18static inline int es7000_check_dsdt(void)
19{
20 struct acpi_table_header header;
21
22 if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
23 !strncmp(header.oem_id, "UNISYS", 6))
24 return 1;
25 return 0;
26}
27#endif
28
29#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h
deleted file mode 100644
index 78f0daaee436..000000000000
--- a/arch/x86/include/asm/es7000/wakecpu.h
+++ /dev/null
@@ -1,37 +0,0 @@
1#ifndef __ASM_ES7000_WAKECPU_H
2#define __ASM_ES7000_WAKECPU_H
3
4#define TRAMPOLINE_PHYS_LOW 0x467
5#define TRAMPOLINE_PHYS_HIGH 0x469
6
7static inline void wait_for_init_deassert(atomic_t *deassert)
8{
9#ifndef CONFIG_ES7000_CLUSTERED_APIC
10 while (!atomic_read(deassert))
11 cpu_relax();
12#endif
13 return;
14}
15
16/* Nothing to do for most platforms, since cleared by the INIT cycle */
17static inline void smp_callin_clear_local_apic(void)
18{
19}
20
21static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
22{
23}
24
25static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
26{
27}
28
29extern void __inquire_remote_apic(int apicid);
30
31static inline void inquire_remote_apic(int apicid)
32{
33 if (apic_verbosity >= APIC_DEBUG)
34 __inquire_remote_apic(apicid);
35}
36
37#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 23696d44a0af..63a79c77d220 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -1,11 +1,145 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 *
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
12 */
13
1#ifndef _ASM_X86_FIXMAP_H 14#ifndef _ASM_X86_FIXMAP_H
2#define _ASM_X86_FIXMAP_H 15#define _ASM_X86_FIXMAP_H
3 16
17#ifndef __ASSEMBLY__
18#include <linux/kernel.h>
19#include <asm/acpi.h>
20#include <asm/apicdef.h>
21#include <asm/page.h>
22#ifdef CONFIG_X86_32
23#include <linux/threads.h>
24#include <asm/kmap_types.h>
25#else
26#include <asm/vsyscall.h>
27#endif
28
29/*
30 * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
31 * uses fixmaps that relies on FIXADDR_TOP for proper address calculation.
32 * Because of this, FIXADDR_TOP x86 integration was left as later work.
33 */
34#ifdef CONFIG_X86_32
35/* used by vmalloc.c, vsyscall.lds.S.
36 *
37 * Leave one empty page between vmalloc'ed areas and
38 * the start of the fixmap.
39 */
40extern unsigned long __FIXADDR_TOP;
41#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
42
43#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
44#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
45#else
46#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
47
48/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
49#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
50#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
51#endif
52
53
54/*
55 * Here we define all the compile-time 'special' virtual
56 * addresses. The point is to have a constant address at
57 * compile time, but to set the physical address only
58 * in the boot process.
59 * for x86_32: We allocate these special addresses
60 * from the end of virtual memory (0xfffff000) backwards.
61 * Also this lets us do fail-safe vmalloc(), we
62 * can guarantee that these special addresses and
63 * vmalloc()-ed addresses never overlap.
64 *
65 * These 'compile-time allocated' memory buffers are
66 * fixed-size 4k pages (or larger if used with an increment
67 * higher than 1). Use set_fixmap(idx,phys) to associate
68 * physical memory with fixmap indices.
69 *
70 * TLB entries of such buffers will not be flushed across
71 * task switches.
72 */
73enum fixed_addresses {
4#ifdef CONFIG_X86_32 74#ifdef CONFIG_X86_32
5# include "fixmap_32.h" 75 FIX_HOLE,
76 FIX_VDSO,
6#else 77#else
7# include "fixmap_64.h" 78 VSYSCALL_LAST_PAGE,
79 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
80 + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
81 VSYSCALL_HPET,
8#endif 82#endif
83 FIX_DBGP_BASE,
84 FIX_EARLYCON_MEM_BASE,
85#ifdef CONFIG_X86_LOCAL_APIC
86 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
87#endif
88#ifdef CONFIG_X86_IO_APIC
89 FIX_IO_APIC_BASE_0,
90 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
91#endif
92#ifdef CONFIG_X86_VISWS_APIC
93 FIX_CO_CPU, /* Cobalt timer */
94 FIX_CO_APIC, /* Cobalt APIC Redirection Table */
95 FIX_LI_PCIA, /* Lithium PCI Bridge A */
96 FIX_LI_PCIB, /* Lithium PCI Bridge B */
97#endif
98#ifdef CONFIG_X86_F00F_BUG
99 FIX_F00F_IDT, /* Virtual mapping for IDT */
100#endif
101#ifdef CONFIG_X86_CYCLONE_TIMER
102 FIX_CYCLONE_TIMER, /*cyclone timer register*/
103#endif
104#ifdef CONFIG_X86_32
105 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
106 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
107#ifdef CONFIG_PCI_MMCONFIG
108 FIX_PCIE_MCFG,
109#endif
110#endif
111#ifdef CONFIG_PARAVIRT
112 FIX_PARAVIRT_BOOTMAP,
113#endif
114 __end_of_permanent_fixed_addresses,
115#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
116 FIX_OHCI1394_BASE,
117#endif
118 /*
119 * 256 temporary boot-time mappings, used by early_ioremap(),
120 * before ioremap() is functional.
121 *
122 * We round it up to the next 256 pages boundary so that we
123 * can have a single pgd entry and a single pte table:
124 */
125#define NR_FIX_BTMAPS 64
126#define FIX_BTMAPS_SLOTS 4
127 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
128 (__end_of_permanent_fixed_addresses & 255),
129 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
130#ifdef CONFIG_X86_32
131 FIX_WP_TEST,
132#endif
133 __end_of_fixed_addresses
134};
135
136
137extern void reserve_top_address(unsigned long reserve);
138
139#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
140#define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
141#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
142#define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE)
9 143
10extern int fixmaps_set; 144extern int fixmaps_set;
11 145
@@ -69,4 +203,5 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
69 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 203 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
70 return __virt_to_fix(vaddr); 204 return __virt_to_fix(vaddr);
71} 205}
206#endif /* !__ASSEMBLY__ */
72#endif /* _ASM_X86_FIXMAP_H */ 207#endif /* _ASM_X86_FIXMAP_H */
diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h
deleted file mode 100644
index c7115c1d7217..000000000000
--- a/arch/x86/include/asm/fixmap_32.h
+++ /dev/null
@@ -1,119 +0,0 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 *
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 */
12
13#ifndef _ASM_X86_FIXMAP_32_H
14#define _ASM_X86_FIXMAP_32_H
15
16
17/* used by vmalloc.c, vsyscall.lds.S.
18 *
19 * Leave one empty page between vmalloc'ed areas and
20 * the start of the fixmap.
21 */
22extern unsigned long __FIXADDR_TOP;
23#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
24#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
25
26#ifndef __ASSEMBLY__
27#include <linux/kernel.h>
28#include <asm/acpi.h>
29#include <asm/apicdef.h>
30#include <asm/page.h>
31#include <linux/threads.h>
32#include <asm/kmap_types.h>
33
34/*
35 * Here we define all the compile-time 'special' virtual
36 * addresses. The point is to have a constant address at
37 * compile time, but to set the physical address only
38 * in the boot process. We allocate these special addresses
39 * from the end of virtual memory (0xfffff000) backwards.
40 * Also this lets us do fail-safe vmalloc(), we
41 * can guarantee that these special addresses and
42 * vmalloc()-ed addresses never overlap.
43 *
44 * these 'compile-time allocated' memory buffers are
45 * fixed-size 4k pages. (or larger if used with an increment
46 * highger than 1) use fixmap_set(idx,phys) to associate
47 * physical memory with fixmap indices.
48 *
49 * TLB entries of such buffers will not be flushed across
50 * task switches.
51 */
52enum fixed_addresses {
53 FIX_HOLE,
54 FIX_VDSO,
55 FIX_DBGP_BASE,
56 FIX_EARLYCON_MEM_BASE,
57#ifdef CONFIG_X86_LOCAL_APIC
58 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
59#endif
60#ifdef CONFIG_X86_IO_APIC
61 FIX_IO_APIC_BASE_0,
62 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
63#endif
64#ifdef CONFIG_X86_VISWS_APIC
65 FIX_CO_CPU, /* Cobalt timer */
66 FIX_CO_APIC, /* Cobalt APIC Redirection Table */
67 FIX_LI_PCIA, /* Lithium PCI Bridge A */
68 FIX_LI_PCIB, /* Lithium PCI Bridge B */
69#endif
70#ifdef CONFIG_X86_F00F_BUG
71 FIX_F00F_IDT, /* Virtual mapping for IDT */
72#endif
73#ifdef CONFIG_X86_CYCLONE_TIMER
74 FIX_CYCLONE_TIMER, /*cyclone timer register*/
75#endif
76 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
77 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
78#ifdef CONFIG_PCI_MMCONFIG
79 FIX_PCIE_MCFG,
80#endif
81#ifdef CONFIG_PARAVIRT
82 FIX_PARAVIRT_BOOTMAP,
83#endif
84 __end_of_permanent_fixed_addresses,
85 /*
86 * 256 temporary boot-time mappings, used by early_ioremap(),
87 * before ioremap() is functional.
88 *
89 * We round it up to the next 256 pages boundary so that we
90 * can have a single pgd entry and a single pte table:
91 */
92#define NR_FIX_BTMAPS 64
93#define FIX_BTMAPS_SLOTS 4
94 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
95 (__end_of_permanent_fixed_addresses & 255),
96 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
97 FIX_WP_TEST,
98#ifdef CONFIG_ACPI
99 FIX_ACPI_BEGIN,
100 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
101#endif
102#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
103 FIX_OHCI1394_BASE,
104#endif
105 __end_of_fixed_addresses
106};
107
108extern void reserve_top_address(unsigned long reserve);
109
110
111#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
112
113#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
114#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
115#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
116#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
117
118#endif /* !__ASSEMBLY__ */
119#endif /* _ASM_X86_FIXMAP_32_H */
diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h
deleted file mode 100644
index 8be740977db8..000000000000
--- a/arch/x86/include/asm/fixmap_64.h
+++ /dev/null
@@ -1,79 +0,0 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 */
10
11#ifndef _ASM_X86_FIXMAP_64_H
12#define _ASM_X86_FIXMAP_64_H
13
14#include <linux/kernel.h>
15#include <asm/acpi.h>
16#include <asm/apicdef.h>
17#include <asm/page.h>
18#include <asm/vsyscall.h>
19
20/*
21 * Here we define all the compile-time 'special' virtual
22 * addresses. The point is to have a constant address at
23 * compile time, but to set the physical address only
24 * in the boot process.
25 *
26 * These 'compile-time allocated' memory buffers are
27 * fixed-size 4k pages (or larger if used with an increment
28 * higher than 1). Use set_fixmap(idx,phys) to associate
29 * physical memory with fixmap indices.
30 *
31 * TLB entries of such buffers will not be flushed across
32 * task switches.
33 */
34
35enum fixed_addresses {
36 VSYSCALL_LAST_PAGE,
37 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
38 + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
39 VSYSCALL_HPET,
40 FIX_DBGP_BASE,
41 FIX_EARLYCON_MEM_BASE,
42 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
43 FIX_IO_APIC_BASE_0,
44 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
45#ifdef CONFIG_PARAVIRT
46 FIX_PARAVIRT_BOOTMAP,
47#endif
48 __end_of_permanent_fixed_addresses,
49#ifdef CONFIG_ACPI
50 FIX_ACPI_BEGIN,
51 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
52#endif
53#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
54 FIX_OHCI1394_BASE,
55#endif
56 /*
57 * 256 temporary boot-time mappings, used by early_ioremap(),
58 * before ioremap() is functional.
59 *
60 * We round it up to the next 256 pages boundary so that we
61 * can have a single pgd entry and a single pte table:
62 */
63#define NR_FIX_BTMAPS 64
64#define FIX_BTMAPS_SLOTS 4
65 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
66 (__end_of_permanent_fixed_addresses & 255),
67 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
68 __end_of_fixed_addresses
69};
70
71#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
72#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
73#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
74
75/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
76#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
77#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
78
79#endif /* _ASM_X86_FIXMAP_64_H */
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index b55b4a7fbefd..db24c2278be0 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -55,29 +55,4 @@ struct dyn_arch_ftrace {
55#endif /* __ASSEMBLY__ */ 55#endif /* __ASSEMBLY__ */
56#endif /* CONFIG_FUNCTION_TRACER */ 56#endif /* CONFIG_FUNCTION_TRACER */
57 57
58#ifdef CONFIG_FUNCTION_GRAPH_TRACER
59
60#ifndef __ASSEMBLY__
61
62/*
63 * Stack of return addresses for functions
64 * of a thread.
65 * Used in struct thread_info
66 */
67struct ftrace_ret_stack {
68 unsigned long ret;
69 unsigned long func;
70 unsigned long long calltime;
71};
72
73/*
74 * Primary handler of a function return.
75 * It relays on ftrace_return_to_handler.
76 * Defined in entry_32/64.S
77 */
78extern void return_to_handler(void);
79
80#endif /* __ASSEMBLY__ */
81#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
82
83#endif /* _ASM_X86_FTRACE_H */ 58#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h
index d48bee663a6f..4b8b98fa7f25 100644
--- a/arch/x86/include/asm/genapic.h
+++ b/arch/x86/include/asm/genapic.h
@@ -1,5 +1 @@
1#ifdef CONFIG_X86_32 #include <asm/apic.h>
2# include "genapic_32.h"
3#else
4# include "genapic_64.h"
5#endif
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h
deleted file mode 100644
index 2c05b737ee22..000000000000
--- a/arch/x86/include/asm/genapic_32.h
+++ /dev/null
@@ -1,148 +0,0 @@
1#ifndef _ASM_X86_GENAPIC_32_H
2#define _ASM_X86_GENAPIC_32_H
3
4#include <asm/mpspec.h>
5#include <asm/atomic.h>
6
7/*
8 * Generic APIC driver interface.
9 *
10 * An straight forward mapping of the APIC related parts of the
11 * x86 subarchitecture interface to a dynamic object.
12 *
13 * This is used by the "generic" x86 subarchitecture.
14 *
15 * Copyright 2003 Andi Kleen, SuSE Labs.
16 */
17
18struct mpc_bus;
19struct mpc_table;
20struct mpc_cpu;
21
22struct genapic {
23 char *name;
24 int (*probe)(void);
25
26 int (*apic_id_registered)(void);
27 const struct cpumask *(*target_cpus)(void);
28 int int_delivery_mode;
29 int int_dest_mode;
30 int ESR_DISABLE;
31 int apic_destination_logical;
32 unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
33 unsigned long (*check_apicid_present)(int apicid);
34 int no_balance_irq;
35 int no_ioapic_check;
36 void (*init_apic_ldr)(void);
37 physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
38
39 void (*setup_apic_routing)(void);
40 int (*multi_timer_check)(int apic, int irq);
41 int (*apicid_to_node)(int logical_apicid);
42 int (*cpu_to_logical_apicid)(int cpu);
43 int (*cpu_present_to_apicid)(int mps_cpu);
44 physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
45 void (*setup_portio_remap)(void);
46 int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
47 void (*enable_apic_mode)(void);
48 u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
49
50 /* mpparse */
51 /* When one of the next two hooks returns 1 the genapic
52 is switched to this. Essentially they are additional probe
53 functions. */
54 int (*mps_oem_check)(struct mpc_table *mpc, char *oem,
55 char *productid);
56 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
57
58 unsigned (*get_apic_id)(unsigned long x);
59 unsigned long apic_id_mask;
60 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
61 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
62 const struct cpumask *andmask);
63 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
64
65#ifdef CONFIG_SMP
66 /* ipi */
67 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
68 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
69 int vector);
70 void (*send_IPI_allbutself)(int vector);
71 void (*send_IPI_all)(int vector);
72#endif
73 int (*wakeup_cpu)(int apicid, unsigned long start_eip);
74 int trampoline_phys_low;
75 int trampoline_phys_high;
76 void (*wait_for_init_deassert)(atomic_t *deassert);
77 void (*smp_callin_clear_local_apic)(void);
78 void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
79 void (*restore_NMI_vector)(unsigned short *high, unsigned short *low);
80 void (*inquire_remote_apic)(int apicid);
81};
82
83#define APICFUNC(x) .x = x,
84
85/* More functions could be probably marked IPIFUNC and save some space
86 in UP GENERICARCH kernels, but I don't have the nerve right now
87 to untangle this mess. -AK */
88#ifdef CONFIG_SMP
89#define IPIFUNC(x) APICFUNC(x)
90#else
91#define IPIFUNC(x)
92#endif
93
94#define APIC_INIT(aname, aprobe) \
95{ \
96 .name = aname, \
97 .probe = aprobe, \
98 .int_delivery_mode = INT_DELIVERY_MODE, \
99 .int_dest_mode = INT_DEST_MODE, \
100 .no_balance_irq = NO_BALANCE_IRQ, \
101 .ESR_DISABLE = esr_disable, \
102 .apic_destination_logical = APIC_DEST_LOGICAL, \
103 APICFUNC(apic_id_registered) \
104 APICFUNC(target_cpus) \
105 APICFUNC(check_apicid_used) \
106 APICFUNC(check_apicid_present) \
107 APICFUNC(init_apic_ldr) \
108 APICFUNC(ioapic_phys_id_map) \
109 APICFUNC(setup_apic_routing) \
110 APICFUNC(multi_timer_check) \
111 APICFUNC(apicid_to_node) \
112 APICFUNC(cpu_to_logical_apicid) \
113 APICFUNC(cpu_present_to_apicid) \
114 APICFUNC(apicid_to_cpu_present) \
115 APICFUNC(setup_portio_remap) \
116 APICFUNC(check_phys_apicid_present) \
117 APICFUNC(mps_oem_check) \
118 APICFUNC(get_apic_id) \
119 .apic_id_mask = APIC_ID_MASK, \
120 APICFUNC(cpu_mask_to_apicid) \
121 APICFUNC(cpu_mask_to_apicid_and) \
122 APICFUNC(vector_allocation_domain) \
123 APICFUNC(acpi_madt_oem_check) \
124 IPIFUNC(send_IPI_mask) \
125 IPIFUNC(send_IPI_allbutself) \
126 IPIFUNC(send_IPI_all) \
127 APICFUNC(enable_apic_mode) \
128 APICFUNC(phys_pkg_id) \
129 .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \
130 .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \
131 APICFUNC(wait_for_init_deassert) \
132 APICFUNC(smp_callin_clear_local_apic) \
133 APICFUNC(store_NMI_vector) \
134 APICFUNC(restore_NMI_vector) \
135 APICFUNC(inquire_remote_apic) \
136}
137
138extern struct genapic *genapic;
139extern void es7000_update_genapic_to_cluster(void);
140
141enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
142#define get_uv_system_type() UV_NONE
143#define is_uv_system() 0
144#define uv_wakeup_secondary(a, b) 1
145#define uv_system_init() do {} while (0)
146
147
148#endif /* _ASM_X86_GENAPIC_32_H */
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h
deleted file mode 100644
index adf32fb56aa6..000000000000
--- a/arch/x86/include/asm/genapic_64.h
+++ /dev/null
@@ -1,66 +0,0 @@
1#ifndef _ASM_X86_GENAPIC_64_H
2#define _ASM_X86_GENAPIC_64_H
3
4#include <linux/cpumask.h>
5
6/*
7 * Copyright 2004 James Cleverdon, IBM.
8 * Subject to the GNU Public License, v.2
9 *
10 * Generic APIC sub-arch data struct.
11 *
12 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
13 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
14 * James Cleverdon.
15 */
16
17struct genapic {
18 char *name;
19 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
20 u32 int_delivery_mode;
21 u32 int_dest_mode;
22 int (*apic_id_registered)(void);
23 const struct cpumask *(*target_cpus)(void);
24 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
25 void (*init_apic_ldr)(void);
26 /* ipi */
27 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
28 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
29 int vector);
30 void (*send_IPI_allbutself)(int vector);
31 void (*send_IPI_all)(int vector);
32 void (*send_IPI_self)(int vector);
33 /* */
34 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
35 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
36 const struct cpumask *andmask);
37 unsigned int (*phys_pkg_id)(int index_msb);
38 unsigned int (*get_apic_id)(unsigned long x);
39 unsigned long (*set_apic_id)(unsigned int id);
40 unsigned long apic_id_mask;
41 /* wakeup_secondary_cpu */
42 int (*wakeup_cpu)(int apicid, unsigned long start_eip);
43};
44
45extern struct genapic *genapic;
46
47extern struct genapic apic_flat;
48extern struct genapic apic_physflat;
49extern struct genapic apic_x2apic_cluster;
50extern struct genapic apic_x2apic_phys;
51extern int acpi_madt_oem_check(char *, char *);
52
53extern void apic_send_IPI_self(int vector);
54enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
55extern enum uv_system_type get_uv_system_type(void);
56extern int is_uv_system(void);
57
58extern struct genapic apic_x2apic_uv_x;
59DECLARE_PER_CPU(int, x2apic_extra_bits);
60extern void uv_cpu_init(void);
61extern void uv_system_init(void);
62extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
63
64extern void setup_apic_routing(void);
65
66#endif /* _ASM_X86_GENAPIC_64_H */
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 000787df66e6..039db6aa8e02 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -1,11 +1,53 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_HARDIRQ_H
2# include "hardirq_32.h" 2#define _ASM_X86_HARDIRQ_H
3#else 3
4# include "hardirq_64.h" 4#include <linux/threads.h>
5#include <linux/irq.h>
6
7typedef struct {
8 unsigned int __softirq_pending;
9 unsigned int __nmi_count; /* arch dependent */
10 unsigned int irq0_irqs;
11#ifdef CONFIG_X86_LOCAL_APIC
12 unsigned int apic_timer_irqs; /* arch dependent */
13 unsigned int irq_spurious_count;
14#endif
15 unsigned int generic_irqs; /* arch dependent */
16#ifdef CONFIG_SMP
17 unsigned int irq_resched_count;
18 unsigned int irq_call_count;
19 unsigned int irq_tlb_count;
20#endif
21#ifdef CONFIG_X86_MCE
22 unsigned int irq_thermal_count;
23# ifdef CONFIG_X86_64
24 unsigned int irq_threshold_count;
25# endif
5#endif 26#endif
27} ____cacheline_aligned irq_cpustat_t;
28
29DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
30
31/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
32#define MAX_HARDIRQS_PER_CPU NR_VECTORS
33
34#define __ARCH_IRQ_STAT
35
36#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
37
38#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
39
40#define __ARCH_SET_SOFTIRQ_PENDING
41
42#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
43#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
44
45extern void ack_bad_irq(unsigned int irq);
6 46
7extern u64 arch_irq_stat_cpu(unsigned int cpu); 47extern u64 arch_irq_stat_cpu(unsigned int cpu);
8#define arch_irq_stat_cpu arch_irq_stat_cpu 48#define arch_irq_stat_cpu arch_irq_stat_cpu
9 49
10extern u64 arch_irq_stat(void); 50extern u64 arch_irq_stat(void);
11#define arch_irq_stat arch_irq_stat 51#define arch_irq_stat arch_irq_stat
52
53#endif /* _ASM_X86_HARDIRQ_H */
diff --git a/arch/x86/include/asm/hardirq_32.h b/arch/x86/include/asm/hardirq_32.h
deleted file mode 100644
index cf7954d1405f..000000000000
--- a/arch/x86/include/asm/hardirq_32.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef _ASM_X86_HARDIRQ_32_H
2#define _ASM_X86_HARDIRQ_32_H
3
4#include <linux/threads.h>
5#include <linux/irq.h>
6
7typedef struct {
8 unsigned int __softirq_pending;
9 unsigned long idle_timestamp;
10 unsigned int __nmi_count; /* arch dependent */
11 unsigned int apic_timer_irqs; /* arch dependent */
12 unsigned int irq0_irqs;
13 unsigned int irq_resched_count;
14 unsigned int irq_call_count;
15 unsigned int irq_tlb_count;
16 unsigned int irq_thermal_count;
17 unsigned int irq_spurious_count;
18} ____cacheline_aligned irq_cpustat_t;
19
20DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
21
22#define __ARCH_IRQ_STAT
23#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
24
25#define inc_irq_stat(member) (__get_cpu_var(irq_stat).member++)
26
27void ack_bad_irq(unsigned int irq);
28#include <linux/irq_cpustat.h>
29
30#endif /* _ASM_X86_HARDIRQ_32_H */
diff --git a/arch/x86/include/asm/hardirq_64.h b/arch/x86/include/asm/hardirq_64.h
deleted file mode 100644
index b5a6b5d56704..000000000000
--- a/arch/x86/include/asm/hardirq_64.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef _ASM_X86_HARDIRQ_64_H
2#define _ASM_X86_HARDIRQ_64_H
3
4#include <linux/threads.h>
5#include <linux/irq.h>
6#include <asm/pda.h>
7#include <asm/apic.h>
8
9/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
10#define MAX_HARDIRQS_PER_CPU NR_VECTORS
11
12#define __ARCH_IRQ_STAT 1
13
14#define inc_irq_stat(member) add_pda(member, 1)
15
16#define local_softirq_pending() read_pda(__softirq_pending)
17
18#define __ARCH_SET_SOFTIRQ_PENDING 1
19
20#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
21#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
22
23extern void ack_bad_irq(unsigned int irq);
24
25#endif /* _ASM_X86_HARDIRQ_64_H */
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index bf9276bea660..014c2b85ae45 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -63,6 +63,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
63void *kmap_atomic(struct page *page, enum km_type type); 63void *kmap_atomic(struct page *page, enum km_type type);
64void kunmap_atomic(void *kvaddr, enum km_type type); 64void kunmap_atomic(void *kvaddr, enum km_type type);
65void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 65void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
66void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
66struct page *kmap_atomic_to_page(void *ptr); 67struct page *kmap_atomic_to_page(void *ptr);
67 68
68#ifndef CONFIG_PARAVIRT 69#ifndef CONFIG_PARAVIRT
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 8de644b6b959..b762ea49bd70 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -25,10 +25,9 @@
25#include <asm/irq.h> 25#include <asm/irq.h>
26#include <asm/sections.h> 26#include <asm/sections.h>
27 27
28#define platform_legacy_irq(irq) ((irq) < 16)
29
30/* Interrupt handlers registered during init_IRQ */ 28/* Interrupt handlers registered during init_IRQ */
31extern void apic_timer_interrupt(void); 29extern void apic_timer_interrupt(void);
30extern void generic_interrupt(void);
32extern void error_interrupt(void); 31extern void error_interrupt(void);
33extern void spurious_interrupt(void); 32extern void spurious_interrupt(void);
34extern void thermal_interrupt(void); 33extern void thermal_interrupt(void);
@@ -58,7 +57,7 @@ extern void make_8259A_irq(unsigned int irq);
58extern void init_8259A(int aeoi); 57extern void init_8259A(int aeoi);
59 58
60/* IOAPIC */ 59/* IOAPIC */
61#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) 60#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
62extern unsigned long io_apic_irqs; 61extern unsigned long io_apic_irqs;
63 62
64extern void init_VISWS_APIC_irqs(void); 63extern void init_VISWS_APIC_irqs(void);
@@ -67,15 +66,7 @@ extern void disable_IO_APIC(void);
67extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); 66extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
68extern void setup_ioapic_dest(void); 67extern void setup_ioapic_dest(void);
69 68
70#ifdef CONFIG_X86_64
71extern void enable_IO_APIC(void); 69extern void enable_IO_APIC(void);
72#endif
73
74/* IPI functions */
75#ifdef CONFIG_X86_32
76extern void send_IPI_self(int vector);
77#endif
78extern void send_IPI(int dest, int vector);
79 70
80/* Statistics */ 71/* Statistics */
81extern atomic_t irq_err_count; 72extern atomic_t irq_err_count;
@@ -84,21 +75,11 @@ extern atomic_t irq_mis_count;
84/* EISA */ 75/* EISA */
85extern void eisa_set_level_irq(unsigned int irq); 76extern void eisa_set_level_irq(unsigned int irq);
86 77
87/* Voyager functions */
88extern asmlinkage void vic_cpi_interrupt(void);
89extern asmlinkage void vic_sys_interrupt(void);
90extern asmlinkage void vic_cmn_interrupt(void);
91extern asmlinkage void qic_timer_interrupt(void);
92extern asmlinkage void qic_invalidate_interrupt(void);
93extern asmlinkage void qic_reschedule_interrupt(void);
94extern asmlinkage void qic_enable_irq_interrupt(void);
95extern asmlinkage void qic_call_function_interrupt(void);
96
97/* SMP */ 78/* SMP */
98extern void smp_apic_timer_interrupt(struct pt_regs *); 79extern void smp_apic_timer_interrupt(struct pt_regs *);
99extern void smp_spurious_interrupt(struct pt_regs *); 80extern void smp_spurious_interrupt(struct pt_regs *);
100extern void smp_error_interrupt(struct pt_regs *); 81extern void smp_error_interrupt(struct pt_regs *);
101#ifdef CONFIG_X86_SMP 82#ifdef CONFIG_SMP
102extern void smp_reschedule_interrupt(struct pt_regs *); 83extern void smp_reschedule_interrupt(struct pt_regs *);
103extern void smp_call_function_interrupt(struct pt_regs *); 84extern void smp_call_function_interrupt(struct pt_regs *);
104extern void smp_call_function_single_interrupt(struct pt_regs *); 85extern void smp_call_function_single_interrupt(struct pt_regs *);
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 58d7091eeb1f..1a99e6c092af 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -60,4 +60,8 @@ extern struct irq_chip i8259A_chip;
60extern void mask_8259A(void); 60extern void mask_8259A(void);
61extern void unmask_8259A(void); 61extern void unmask_8259A(void);
62 62
63#ifdef CONFIG_X86_32
64extern void init_ISA_irqs(void);
65#endif
66
63#endif /* _ASM_X86_I8259_H */ 67#endif /* _ASM_X86_I8259_H */
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index 50ca486fd88c..1f7e62517284 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -129,13 +129,6 @@ typedef struct compat_siginfo {
129 } _sifields; 129 } _sifields;
130} compat_siginfo_t; 130} compat_siginfo_t;
131 131
132struct ustat32 {
133 __u32 f_tfree;
134 compat_ino_t f_tinode;
135 char f_fname[6];
136 char f_fpack[6];
137};
138
139#define IA32_STACK_TOP IA32_PAGE_OFFSET 132#define IA32_STACK_TOP IA32_PAGE_OFFSET
140 133
141#ifdef __KERNEL__ 134#ifdef __KERNEL__
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
new file mode 100644
index 000000000000..36fb1a6a5109
--- /dev/null
+++ b/arch/x86/include/asm/init.h
@@ -0,0 +1,18 @@
1#ifndef _ASM_X86_INIT_32_H
2#define _ASM_X86_INIT_32_H
3
4#ifdef CONFIG_X86_32
5extern void __init early_ioremap_page_table_range_init(void);
6#endif
7
8extern unsigned long __init
9kernel_physical_mapping_init(unsigned long start,
10 unsigned long end,
11 unsigned long page_size_mask);
12
13
14extern unsigned long __initdata e820_table_start;
15extern unsigned long __meminitdata e820_table_end;
16extern unsigned long __meminitdata e820_table_top;
17
18#endif /* _ASM_X86_INIT_32_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 1dbbdf4be9b4..e5383e3d2f8c 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -5,6 +5,7 @@
5 5
6#include <linux/compiler.h> 6#include <linux/compiler.h>
7#include <asm-generic/int-ll64.h> 7#include <asm-generic/int-ll64.h>
8#include <asm/page.h>
8 9
9#define build_mmio_read(name, size, type, reg, barrier) \ 10#define build_mmio_read(name, size, type, reg, barrier) \
10static inline type name(const volatile void __iomem *addr) \ 11static inline type name(const volatile void __iomem *addr) \
@@ -80,6 +81,98 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
80#define readq readq 81#define readq readq
81#define writeq writeq 82#define writeq writeq
82 83
84/**
85 * virt_to_phys - map virtual addresses to physical
86 * @address: address to remap
87 *
88 * The returned physical address is the physical (CPU) mapping for
89 * the memory address given. It is only valid to use this function on
90 * addresses directly mapped or allocated via kmalloc.
91 *
92 * This function does not give bus mappings for DMA transfers. In
93 * almost all conceivable cases a device driver should not be using
94 * this function
95 */
96
97static inline phys_addr_t virt_to_phys(volatile void *address)
98{
99 return __pa(address);
100}
101
102/**
103 * phys_to_virt - map physical address to virtual
104 * @address: address to remap
105 *
106 * The returned virtual address is a current CPU mapping for
107 * the memory address given. It is only valid to use this function on
108 * addresses that have a kernel mapping
109 *
110 * This function does not handle bus mappings for DMA transfers. In
111 * almost all conceivable cases a device driver should not be using
112 * this function
113 */
114
115static inline void *phys_to_virt(phys_addr_t address)
116{
117 return __va(address);
118}
119
120/*
121 * Change "struct page" to physical address.
122 */
123#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
124
125/*
126 * ISA I/O bus memory addresses are 1:1 with the physical address.
127 * However, we truncate the address to unsigned int to avoid undesirable
128 * promitions in legacy drivers.
129 */
130static inline unsigned int isa_virt_to_bus(volatile void *address)
131{
132 return (unsigned int)virt_to_phys(address);
133}
134#define isa_page_to_bus(page) ((unsigned int)page_to_phys(page))
135#define isa_bus_to_virt phys_to_virt
136
137/*
138 * However PCI ones are not necessarily 1:1 and therefore these interfaces
139 * are forbidden in portable PCI drivers.
140 *
141 * Allow them on x86 for legacy drivers, though.
142 */
143#define virt_to_bus virt_to_phys
144#define bus_to_virt phys_to_virt
145
146/**
147 * ioremap - map bus memory into CPU space
148 * @offset: bus address of the memory
149 * @size: size of the resource to map
150 *
151 * ioremap performs a platform specific sequence of operations to
152 * make bus memory CPU accessible via the readb/readw/readl/writeb/
153 * writew/writel functions and the other mmio helpers. The returned
154 * address is not guaranteed to be usable directly as a virtual
155 * address.
156 *
157 * If the area you are trying to map is a PCI BAR you should have a
158 * look at pci_iomap().
159 */
160extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
161extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
162extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
163 unsigned long prot_val);
164
165/*
166 * The default ioremap() behavior is non-cached:
167 */
168static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
169{
170 return ioremap_nocache(offset, size);
171}
172
173extern void iounmap(volatile void __iomem *addr);
174
175
83#ifdef CONFIG_X86_32 176#ifdef CONFIG_X86_32
84# include "io_32.h" 177# include "io_32.h"
85#else 178#else
@@ -91,7 +184,7 @@ extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
91 184
92extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 185extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
93 unsigned long prot_val); 186 unsigned long prot_val);
94extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); 187extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
95 188
96/* 189/*
97 * early_ioremap() and early_iounmap() are for temporary early boot-time 190 * early_ioremap() and early_iounmap() are for temporary early boot-time
@@ -103,7 +196,7 @@ extern void early_ioremap_reset(void);
103extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); 196extern void __iomem *early_ioremap(unsigned long offset, unsigned long size);
104extern void __iomem *early_memremap(unsigned long offset, unsigned long size); 197extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
105extern void early_iounmap(void __iomem *addr, unsigned long size); 198extern void early_iounmap(void __iomem *addr, unsigned long size);
106extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
107 199
200#define IO_SPACE_LIMIT 0xffff
108 201
109#endif /* _ASM_X86_IO_H */ 202#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
index d8e242e1b396..a299900f5920 100644
--- a/arch/x86/include/asm/io_32.h
+++ b/arch/x86/include/asm/io_32.h
@@ -37,8 +37,6 @@
37 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> 37 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
38 */ 38 */
39 39
40#define IO_SPACE_LIMIT 0xffff
41
42#define XQUAD_PORTIO_BASE 0xfe400000 40#define XQUAD_PORTIO_BASE 0xfe400000
43#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ 41#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
44 42
@@ -53,92 +51,6 @@
53 */ 51 */
54#define xlate_dev_kmem_ptr(p) p 52#define xlate_dev_kmem_ptr(p) p
55 53
56/**
57 * virt_to_phys - map virtual addresses to physical
58 * @address: address to remap
59 *
60 * The returned physical address is the physical (CPU) mapping for
61 * the memory address given. It is only valid to use this function on
62 * addresses directly mapped or allocated via kmalloc.
63 *
64 * This function does not give bus mappings for DMA transfers. In
65 * almost all conceivable cases a device driver should not be using
66 * this function
67 */
68
69static inline unsigned long virt_to_phys(volatile void *address)
70{
71 return __pa(address);
72}
73
74/**
75 * phys_to_virt - map physical address to virtual
76 * @address: address to remap
77 *
78 * The returned virtual address is a current CPU mapping for
79 * the memory address given. It is only valid to use this function on
80 * addresses that have a kernel mapping
81 *
82 * This function does not handle bus mappings for DMA transfers. In
83 * almost all conceivable cases a device driver should not be using
84 * this function
85 */
86
87static inline void *phys_to_virt(unsigned long address)
88{
89 return __va(address);
90}
91
92/*
93 * Change "struct page" to physical address.
94 */
95#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
96
97/**
98 * ioremap - map bus memory into CPU space
99 * @offset: bus address of the memory
100 * @size: size of the resource to map
101 *
102 * ioremap performs a platform specific sequence of operations to
103 * make bus memory CPU accessible via the readb/readw/readl/writeb/
104 * writew/writel functions and the other mmio helpers. The returned
105 * address is not guaranteed to be usable directly as a virtual
106 * address.
107 *
108 * If the area you are trying to map is a PCI BAR you should have a
109 * look at pci_iomap().
110 */
111extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
112extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
113extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
114 unsigned long prot_val);
115
116/*
117 * The default ioremap() behavior is non-cached:
118 */
119static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
120{
121 return ioremap_nocache(offset, size);
122}
123
124extern void iounmap(volatile void __iomem *addr);
125
126/*
127 * ISA I/O bus memory addresses are 1:1 with the physical address.
128 */
129#define isa_virt_to_bus virt_to_phys
130#define isa_page_to_bus page_to_phys
131#define isa_bus_to_virt phys_to_virt
132
133/*
134 * However PCI ones are not necessarily 1:1 and therefore these interfaces
135 * are forbidden in portable PCI drivers.
136 *
137 * Allow them on x86 for legacy drivers, though.
138 */
139#define virt_to_bus virt_to_phys
140#define bus_to_virt phys_to_virt
141
142static inline void 54static inline void
143memset_io(volatile void __iomem *addr, unsigned char val, int count) 55memset_io(volatile void __iomem *addr, unsigned char val, int count)
144{ 56{
diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
index 563c16270ba6..244067893af4 100644
--- a/arch/x86/include/asm/io_64.h
+++ b/arch/x86/include/asm/io_64.h
@@ -136,73 +136,12 @@ __OUTS(b)
136__OUTS(w) 136__OUTS(w)
137__OUTS(l) 137__OUTS(l)
138 138
139#define IO_SPACE_LIMIT 0xffff
140
141#if defined(__KERNEL__) && defined(__x86_64__) 139#if defined(__KERNEL__) && defined(__x86_64__)
142 140
143#include <linux/vmalloc.h> 141#include <linux/vmalloc.h>
144 142
145#ifndef __i386__
146/*
147 * Change virtual addresses to physical addresses and vv.
148 * These are pretty trivial
149 */
150static inline unsigned long virt_to_phys(volatile void *address)
151{
152 return __pa(address);
153}
154
155static inline void *phys_to_virt(unsigned long address)
156{
157 return __va(address);
158}
159#endif
160
161/*
162 * Change "struct page" to physical address.
163 */
164#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
165
166#include <asm-generic/iomap.h> 143#include <asm-generic/iomap.h>
167 144
168/*
169 * This one maps high address device memory and turns off caching for that area.
170 * it's useful if some control registers are in such an area and write combining
171 * or read caching is not desirable:
172 */
173extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
174extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
175extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
176 unsigned long prot_val);
177
178/*
179 * The default ioremap() behavior is non-cached:
180 */
181static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
182{
183 return ioremap_nocache(offset, size);
184}
185
186extern void iounmap(volatile void __iomem *addr);
187
188extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
189
190/*
191 * ISA I/O bus memory addresses are 1:1 with the physical address.
192 */
193#define isa_virt_to_bus virt_to_phys
194#define isa_page_to_bus page_to_phys
195#define isa_bus_to_virt phys_to_virt
196
197/*
198 * However PCI ones are not necessarily 1:1 and therefore these interfaces
199 * are forbidden in portable PCI drivers.
200 *
201 * Allow them on x86 for legacy drivers, though.
202 */
203#define virt_to_bus virt_to_phys
204#define bus_to_virt phys_to_virt
205
206void __memcpy_fromio(void *, unsigned long, unsigned); 145void __memcpy_fromio(void *, unsigned long, unsigned);
207void __memcpy_toio(unsigned long, const void *, unsigned); 146void __memcpy_toio(unsigned long, const void *, unsigned);
208 147
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 7a1f44ac1f17..373cc2bbcad2 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -114,38 +114,16 @@ struct IR_IO_APIC_route_entry {
114extern int nr_ioapics; 114extern int nr_ioapics;
115extern int nr_ioapic_registers[MAX_IO_APICS]; 115extern int nr_ioapic_registers[MAX_IO_APICS];
116 116
117/*
118 * MP-BIOS irq configuration table structures:
119 */
120
121#define MP_MAX_IOAPIC_PIN 127 117#define MP_MAX_IOAPIC_PIN 127
122 118
123struct mp_config_ioapic {
124 unsigned long mp_apicaddr;
125 unsigned int mp_apicid;
126 unsigned char mp_type;
127 unsigned char mp_apicver;
128 unsigned char mp_flags;
129};
130
131struct mp_config_intsrc {
132 unsigned int mp_dstapic;
133 unsigned char mp_type;
134 unsigned char mp_irqtype;
135 unsigned short mp_irqflag;
136 unsigned char mp_srcbus;
137 unsigned char mp_srcbusirq;
138 unsigned char mp_dstirq;
139};
140
141/* I/O APIC entries */ 119/* I/O APIC entries */
142extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; 120extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
143 121
144/* # of MP IRQ source entries */ 122/* # of MP IRQ source entries */
145extern int mp_irq_entries; 123extern int mp_irq_entries;
146 124
147/* MP IRQ source entries */ 125/* MP IRQ source entries */
148extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; 126extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
149 127
150/* non-0 if default (table-less) MP configuration */ 128/* non-0 if default (table-less) MP configuration */
151extern int mpc_default_type; 129extern int mpc_default_type;
@@ -165,15 +143,6 @@ extern int noioapicreroute;
165/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ 143/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
166extern int timer_through_8259; 144extern int timer_through_8259;
167 145
168static inline void disable_ioapic_setup(void)
169{
170#ifdef CONFIG_PCI
171 noioapicquirk = 1;
172 noioapicreroute = -1;
173#endif
174 skip_ioapic_setup = 1;
175}
176
177/* 146/*
178 * If we use the IO-APIC for IRQ routing, disable automatic 147 * If we use the IO-APIC for IRQ routing, disable automatic
179 * assignment of PCI IRQ's. 148 * assignment of PCI IRQ's.
@@ -193,13 +162,20 @@ extern int (*ioapic_renumber_irq)(int ioapic, int irq);
193extern void ioapic_init_mappings(void); 162extern void ioapic_init_mappings(void);
194 163
195#ifdef CONFIG_X86_64 164#ifdef CONFIG_X86_64
196extern int save_mask_IO_APIC_setup(void); 165extern int save_IO_APIC_setup(void);
166extern void mask_IO_APIC_setup(void);
197extern void restore_IO_APIC_setup(void); 167extern void restore_IO_APIC_setup(void);
198extern void reinit_intr_remapped_IO_APIC(int); 168extern void reinit_intr_remapped_IO_APIC(int);
199#endif 169#endif
200 170
201extern void probe_nr_irqs_gsi(void); 171extern void probe_nr_irqs_gsi(void);
202 172
173extern int setup_ioapic_entry(int apic, int irq,
174 struct IO_APIC_route_entry *entry,
175 unsigned int destination, int trigger,
176 int polarity, int vector, int pin);
177extern void ioapic_write_entry(int apic, int pin,
178 struct IO_APIC_route_entry e);
203#else /* !CONFIG_X86_IO_APIC */ 179#else /* !CONFIG_X86_IO_APIC */
204#define io_apic_assign_pci_irqs 0 180#define io_apic_assign_pci_irqs 0
205static const int timer_through_8259 = 0; 181static const int timer_through_8259 = 0;
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index a6ee9e6f530f..af326a2975b5 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -3,7 +3,7 @@
3 3
4extern void pci_iommu_shutdown(void); 4extern void pci_iommu_shutdown(void);
5extern void no_iommu_init(void); 5extern void no_iommu_init(void);
6extern struct dma_mapping_ops nommu_dma_ops; 6extern struct dma_map_ops nommu_dma_ops;
7extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
8extern int iommu_detected; 8extern int iommu_detected;
9 9
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index c745a306f7d3..0b7228268a63 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_IPI_H 1#ifndef _ASM_X86_IPI_H
2#define _ASM_X86_IPI_H 2#define _ASM_X86_IPI_H
3 3
4#ifdef CONFIG_X86_LOCAL_APIC
5
4/* 6/*
5 * Copyright 2004 James Cleverdon, IBM. 7 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2 8 * Subject to the GNU Public License, v.2
@@ -55,8 +57,8 @@ static inline void __xapic_wait_icr_idle(void)
55 cpu_relax(); 57 cpu_relax();
56} 58}
57 59
58static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, 60static inline void
59 unsigned int dest) 61__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
60{ 62{
61 /* 63 /*
62 * Subtle. In the case of the 'never do double writes' workaround 64 * Subtle. In the case of the 'never do double writes' workaround
@@ -87,8 +89,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
87 * This is used to send an IPI with no shorthand notation (the destination is 89 * This is used to send an IPI with no shorthand notation (the destination is
88 * specified in bits 56 to 63 of the ICR). 90 * specified in bits 56 to 63 of the ICR).
89 */ 91 */
90static inline void __send_IPI_dest_field(unsigned int mask, int vector, 92static inline void
91 unsigned int dest) 93 __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
92{ 94{
93 unsigned long cfg; 95 unsigned long cfg;
94 96
@@ -117,41 +119,44 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
117 native_apic_mem_write(APIC_ICR, cfg); 119 native_apic_mem_write(APIC_ICR, cfg);
118} 120}
119 121
120static inline void send_IPI_mask_sequence(const struct cpumask *mask, 122extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
121 int vector) 123 int vector);
122{ 124extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
123 unsigned long flags; 125 int vector);
124 unsigned long query_cpu; 126extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
127 int vector);
128extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
129 int vector);
125 130
126 /* 131/* Avoid include hell */
127 * Hack. The clustered APIC addressing mode doesn't allow us to send 132#define NMI_VECTOR 0x02
128 * to an arbitrary mask, so I do a unicast to each CPU instead. 133
129 * - mbligh 134extern int no_broadcast;
130 */ 135
131 local_irq_save(flags); 136static inline void __default_local_send_IPI_allbutself(int vector)
132 for_each_cpu(query_cpu, mask) { 137{
133 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), 138 if (no_broadcast || vector == NMI_VECTOR)
134 vector, APIC_DEST_PHYSICAL); 139 apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
135 } 140 else
136 local_irq_restore(flags); 141 __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
137} 142}
138 143
139static inline void send_IPI_mask_allbutself(const struct cpumask *mask, 144static inline void __default_local_send_IPI_all(int vector)
140 int vector)
141{ 145{
142 unsigned long flags; 146 if (no_broadcast || vector == NMI_VECTOR)
143 unsigned int query_cpu; 147 apic->send_IPI_mask(cpu_online_mask, vector);
144 unsigned int this_cpu = smp_processor_id(); 148 else
145 149 __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
146 /* See Hack comment above */
147
148 local_irq_save(flags);
149 for_each_cpu(query_cpu, mask)
150 if (query_cpu != this_cpu)
151 __send_IPI_dest_field(
152 per_cpu(x86_cpu_to_apicid, query_cpu),
153 vector, APIC_DEST_PHYSICAL);
154 local_irq_restore(flags);
155} 150}
156 151
152#ifdef CONFIG_X86_32
153extern void default_send_IPI_mask_logical(const struct cpumask *mask,
154 int vector);
155extern void default_send_IPI_allbutself(int vector);
156extern void default_send_IPI_all(int vector);
157extern void default_send_IPI_self(int vector);
158#endif
159
160#endif
161
157#endif /* _ASM_X86_IPI_H */ 162#endif /* _ASM_X86_IPI_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 592688ed04d3..f38481bcd455 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -36,9 +36,12 @@ static inline int irq_canonicalize(int irq)
36extern void fixup_irqs(void); 36extern void fixup_irqs(void);
37#endif 37#endif
38 38
39extern unsigned int do_IRQ(struct pt_regs *regs); 39extern void (*generic_interrupt_extension)(void);
40extern void init_IRQ(void); 40extern void init_IRQ(void);
41extern void native_init_IRQ(void); 41extern void native_init_IRQ(void);
42extern bool handle_irq(unsigned irq, struct pt_regs *regs);
43
44extern unsigned int do_IRQ(struct pt_regs *regs);
42 45
43/* Interrupt vector management */ 46/* Interrupt vector management */
44extern DECLARE_BITMAP(used_vectors, NR_VECTORS); 47extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
diff --git a/arch/x86/include/asm/irq_regs.h b/arch/x86/include/asm/irq_regs.h
index 89c898ab298b..77843225b7ea 100644
--- a/arch/x86/include/asm/irq_regs.h
+++ b/arch/x86/include/asm/irq_regs.h
@@ -1,5 +1,31 @@
1#ifdef CONFIG_X86_32 1/*
2# include "irq_regs_32.h" 2 * Per-cpu current frame pointer - the location of the last exception frame on
3#else 3 * the stack, stored in the per-cpu area.
4# include "irq_regs_64.h" 4 *
5#endif 5 * Jeremy Fitzhardinge <jeremy@goop.org>
6 */
7#ifndef _ASM_X86_IRQ_REGS_H
8#define _ASM_X86_IRQ_REGS_H
9
10#include <asm/percpu.h>
11
12#define ARCH_HAS_OWN_IRQ_REGS
13
14DECLARE_PER_CPU(struct pt_regs *, irq_regs);
15
16static inline struct pt_regs *get_irq_regs(void)
17{
18 return percpu_read(irq_regs);
19}
20
21static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
22{
23 struct pt_regs *old_regs;
24
25 old_regs = get_irq_regs();
26 percpu_write(irq_regs, new_regs);
27
28 return old_regs;
29}
30
31#endif /* _ASM_X86_IRQ_REGS_32_H */
diff --git a/arch/x86/include/asm/irq_regs_32.h b/arch/x86/include/asm/irq_regs_32.h
deleted file mode 100644
index 86afd7473457..000000000000
--- a/arch/x86/include/asm/irq_regs_32.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Per-cpu current frame pointer - the location of the last exception frame on
3 * the stack, stored in the per-cpu area.
4 *
5 * Jeremy Fitzhardinge <jeremy@goop.org>
6 */
7#ifndef _ASM_X86_IRQ_REGS_32_H
8#define _ASM_X86_IRQ_REGS_32_H
9
10#include <asm/percpu.h>
11
12#define ARCH_HAS_OWN_IRQ_REGS
13
14DECLARE_PER_CPU(struct pt_regs *, irq_regs);
15
16static inline struct pt_regs *get_irq_regs(void)
17{
18 return x86_read_percpu(irq_regs);
19}
20
21static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
22{
23 struct pt_regs *old_regs;
24
25 old_regs = get_irq_regs();
26 x86_write_percpu(irq_regs, new_regs);
27
28 return old_regs;
29}
30
31#endif /* _ASM_X86_IRQ_REGS_32_H */
diff --git a/arch/x86/include/asm/irq_regs_64.h b/arch/x86/include/asm/irq_regs_64.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/x86/include/asm/irq_regs_64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/irq_regs.h>
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 20e1fd588dbf..0396760fccb8 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -1,8 +1,6 @@
1#ifndef _ASM_X86_IRQ_REMAPPING_H 1#ifndef _ASM_X86_IRQ_REMAPPING_H
2#define _ASM_X86_IRQ_REMAPPING_H 2#define _ASM_X86_IRQ_REMAPPING_H
3 3
4extern int x2apic;
5
6#define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8) 4#define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8)
7 5
8#endif /* _ASM_X86_IRQ_REMAPPING_H */ 6#endif /* _ASM_X86_IRQ_REMAPPING_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index f7ff65032b9d..3cbd79bbb47c 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -1,47 +1,69 @@
1#ifndef _ASM_X86_IRQ_VECTORS_H 1#ifndef _ASM_X86_IRQ_VECTORS_H
2#define _ASM_X86_IRQ_VECTORS_H 2#define _ASM_X86_IRQ_VECTORS_H
3 3
4#include <linux/threads.h> 4/*
5 * Linux IRQ vector layout.
6 *
7 * There are 256 IDT entries (per CPU - each entry is 8 bytes) which can
8 * be defined by Linux. They are used as a jump table by the CPU when a
9 * given vector is triggered - by a CPU-external, CPU-internal or
10 * software-triggered event.
11 *
12 * Linux sets the kernel code address each entry jumps to early during
13 * bootup, and never changes them. This is the general layout of the
14 * IDT entries:
15 *
16 * Vectors 0 ... 31 : system traps and exceptions - hardcoded events
17 * Vectors 32 ... 127 : device interrupts
18 * Vector 128 : legacy int80 syscall interface
19 * Vectors 129 ... 237 : device interrupts
20 * Vectors 238 ... 255 : special interrupts
21 *
22 * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
23 *
24 * This file enumerates the exact layout of them:
25 */
5 26
6#define NMI_VECTOR 0x02 27#define NMI_VECTOR 0x02
7 28
8/* 29/*
9 * IDT vectors usable for external interrupt sources start 30 * IDT vectors usable for external interrupt sources start
10 * at 0x20: 31 * at 0x20:
11 */ 32 */
12#define FIRST_EXTERNAL_VECTOR 0x20 33#define FIRST_EXTERNAL_VECTOR 0x20
13 34
14#ifdef CONFIG_X86_32 35#ifdef CONFIG_X86_32
15# define SYSCALL_VECTOR 0x80 36# define SYSCALL_VECTOR 0x80
16#else 37#else
17# define IA32_SYSCALL_VECTOR 0x80 38# define IA32_SYSCALL_VECTOR 0x80
18#endif 39#endif
19 40
20/* 41/*
21 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering 42 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering
22 * cleanup after irq migration. 43 * cleanup after irq migration.
23 */ 44 */
24#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR 45#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
25 46
26/* 47/*
27 * Vectors 0x30-0x3f are used for ISA interrupts. 48 * Vectors 0x30-0x3f are used for ISA interrupts.
28 */ 49 */
29#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) 50#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
30#define IRQ1_VECTOR (IRQ0_VECTOR + 1) 51
31#define IRQ2_VECTOR (IRQ0_VECTOR + 2) 52#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
32#define IRQ3_VECTOR (IRQ0_VECTOR + 3) 53#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
33#define IRQ4_VECTOR (IRQ0_VECTOR + 4) 54#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
34#define IRQ5_VECTOR (IRQ0_VECTOR + 5) 55#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
35#define IRQ6_VECTOR (IRQ0_VECTOR + 6) 56#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
36#define IRQ7_VECTOR (IRQ0_VECTOR + 7) 57#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
37#define IRQ8_VECTOR (IRQ0_VECTOR + 8) 58#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
38#define IRQ9_VECTOR (IRQ0_VECTOR + 9) 59#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
39#define IRQ10_VECTOR (IRQ0_VECTOR + 10) 60#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
40#define IRQ11_VECTOR (IRQ0_VECTOR + 11) 61#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
41#define IRQ12_VECTOR (IRQ0_VECTOR + 12) 62#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
42#define IRQ13_VECTOR (IRQ0_VECTOR + 13) 63#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
43#define IRQ14_VECTOR (IRQ0_VECTOR + 14) 64#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
44#define IRQ15_VECTOR (IRQ0_VECTOR + 15) 65#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
66#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
45 67
46/* 68/*
47 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff 69 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
@@ -49,119 +71,103 @@
49 * some of the following vectors are 'rare', they are merged 71 * some of the following vectors are 'rare', they are merged
50 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. 72 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
51 * TLB, reschedule and local APIC vectors are performance-critical. 73 * TLB, reschedule and local APIC vectors are performance-critical.
52 *
53 * Vectors 0xf0-0xfa are free (reserved for future Linux use).
54 */ 74 */
55#ifdef CONFIG_X86_32
56
57# define SPURIOUS_APIC_VECTOR 0xff
58# define ERROR_APIC_VECTOR 0xfe
59# define INVALIDATE_TLB_VECTOR 0xfd
60# define RESCHEDULE_VECTOR 0xfc
61# define CALL_FUNCTION_VECTOR 0xfb
62# define CALL_FUNCTION_SINGLE_VECTOR 0xfa
63# define THERMAL_APIC_VECTOR 0xf0
64
65#else
66 75
67#define SPURIOUS_APIC_VECTOR 0xff 76#define SPURIOUS_APIC_VECTOR 0xff
77/*
78 * Sanity check
79 */
80#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
81# error SPURIOUS_APIC_VECTOR definition error
82#endif
83
68#define ERROR_APIC_VECTOR 0xfe 84#define ERROR_APIC_VECTOR 0xfe
69#define RESCHEDULE_VECTOR 0xfd 85#define RESCHEDULE_VECTOR 0xfd
70#define CALL_FUNCTION_VECTOR 0xfc 86#define CALL_FUNCTION_VECTOR 0xfc
71#define CALL_FUNCTION_SINGLE_VECTOR 0xfb 87#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
72#define THERMAL_APIC_VECTOR 0xfa 88#define THERMAL_APIC_VECTOR 0xfa
73#define THRESHOLD_APIC_VECTOR 0xf9
74#define UV_BAU_MESSAGE 0xf8
75#define INVALIDATE_TLB_VECTOR_END 0xf7
76#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
77
78#define NUM_INVALIDATE_TLB_VECTORS 8
79 89
90#ifdef CONFIG_X86_32
91/* 0xf8 - 0xf9 : free */
92#else
93# define THRESHOLD_APIC_VECTOR 0xf9
94# define UV_BAU_MESSAGE 0xf8
80#endif 95#endif
81 96
97/* f0-f7 used for spreading out TLB flushes: */
98#define INVALIDATE_TLB_VECTOR_END 0xf7
99#define INVALIDATE_TLB_VECTOR_START 0xf0
100#define NUM_INVALIDATE_TLB_VECTORS 8
101
82/* 102/*
83 * Local APIC timer IRQ vector is on a different priority level, 103 * Local APIC timer IRQ vector is on a different priority level,
84 * to work around the 'lost local interrupt if more than 2 IRQ 104 * to work around the 'lost local interrupt if more than 2 IRQ
85 * sources per level' errata. 105 * sources per level' errata.
86 */ 106 */
87#define LOCAL_TIMER_VECTOR 0xef 107#define LOCAL_TIMER_VECTOR 0xef
108
109/*
110 * Performance monitoring interrupt vector:
111 */
112#define LOCAL_PERF_VECTOR 0xee
113
114/*
115 * Generic system vector for platform specific use
116 */
117#define GENERIC_INTERRUPT_VECTOR 0xed
88 118
89/* 119/*
90 * First APIC vector available to drivers: (vectors 0x30-0xee) we 120 * First APIC vector available to drivers: (vectors 0x30-0xee) we
91 * start at 0x31(0x41) to spread out vectors evenly between priority 121 * start at 0x31(0x41) to spread out vectors evenly between priority
92 * levels. (0x80 is the syscall vector) 122 * levels. (0x80 is the syscall vector)
93 */ 123 */
94#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2) 124#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
95 125
96#define NR_VECTORS 256 126#define NR_VECTORS 256
97 127
98#define FPU_IRQ 13 128#define FPU_IRQ 13
99 129
100#define FIRST_VM86_IRQ 3 130#define FIRST_VM86_IRQ 3
101#define LAST_VM86_IRQ 15 131#define LAST_VM86_IRQ 15
102#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
103 132
104#define NR_IRQS_LEGACY 16 133#ifndef __ASSEMBLY__
105 134static inline int invalid_vm86_irq(int irq)
106#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER) 135{
107 136 return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
108#ifndef CONFIG_SPARSE_IRQ 137}
109# if NR_CPUS < MAX_IO_APICS
110# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
111# else
112# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
113# endif
114#else
115# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
116# define NR_IRQS (NR_VECTORS + (8 * NR_CPUS))
117# else
118# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
119# endif
120#endif 138#endif
121 139
122#elif defined(CONFIG_X86_VOYAGER) 140/*
123 141 * Size the maximum number of interrupts.
124# define NR_IRQS 224 142 *
143 * If the irq_desc[] array has a sparse layout, we can size things
144 * generously - it scales up linearly with the maximum number of CPUs,
145 * and the maximum number of IO-APICs, whichever is higher.
146 *
147 * In other cases we size more conservatively, to not create too large
148 * static arrays.
149 */
125 150
126#else /* IO_APIC || VOYAGER */ 151#define NR_IRQS_LEGACY 16
127 152
128# define NR_IRQS 16 153#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
154#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
129 155
156#ifdef CONFIG_X86_IO_APIC
157# ifdef CONFIG_SPARSE_IRQ
158# define NR_IRQS \
159 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
160 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
161 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
162# else
163# if NR_CPUS < MAX_IO_APICS
164# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT)
165# else
166# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
167# endif
168# endif
169#else /* !CONFIG_X86_IO_APIC: */
170# define NR_IRQS NR_IRQS_LEGACY
130#endif 171#endif
131 172
132/* Voyager specific defines */
133/* These define the CPIs we use in linux */
134#define VIC_CPI_LEVEL0 0
135#define VIC_CPI_LEVEL1 1
136/* now the fake CPIs */
137#define VIC_TIMER_CPI 2
138#define VIC_INVALIDATE_CPI 3
139#define VIC_RESCHEDULE_CPI 4
140#define VIC_ENABLE_IRQ_CPI 5
141#define VIC_CALL_FUNCTION_CPI 6
142#define VIC_CALL_FUNCTION_SINGLE_CPI 7
143
144/* Now the QIC CPIs: Since we don't need the two initial levels,
145 * these are 2 less than the VIC CPIs */
146#define QIC_CPI_OFFSET 1
147#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
148#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
149#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
150#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
151#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
152#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
153
154#define VIC_START_FAKE_CPI VIC_TIMER_CPI
155#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
156
157/* this is the SYS_INT CPI. */
158#define VIC_SYS_INT 8
159#define VIC_CMN_INT 15
160
161/* This is the boot CPI for alternate processors. It gets overwritten
162 * by the above once the system has activated all available processors */
163#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
164#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
165
166
167#endif /* _ASM_X86_IRQ_VECTORS_H */ 173#endif /* _ASM_X86_IRQ_VECTORS_H */
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index c61d8b2ab8b9..317ff1703d0b 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -10,27 +10,12 @@
10#else 10#else
11# define PA_CONTROL_PAGE 0 11# define PA_CONTROL_PAGE 0
12# define VA_CONTROL_PAGE 1 12# define VA_CONTROL_PAGE 1
13# define PA_PGD 2 13# define PA_TABLE_PAGE 2
14# define VA_PGD 3 14# define PA_SWAP_PAGE 3
15# define PA_PUD_0 4 15# define PAGES_NR 4
16# define VA_PUD_0 5
17# define PA_PMD_0 6
18# define VA_PMD_0 7
19# define PA_PTE_0 8
20# define VA_PTE_0 9
21# define PA_PUD_1 10
22# define VA_PUD_1 11
23# define PA_PMD_1 12
24# define VA_PMD_1 13
25# define PA_PTE_1 14
26# define VA_PTE_1 15
27# define PA_TABLE_PAGE 16
28# define PAGES_NR 17
29#endif 16#endif
30 17
31#ifdef CONFIG_X86_32
32# define KEXEC_CONTROL_CODE_MAX_SIZE 2048 18# define KEXEC_CONTROL_CODE_MAX_SIZE 2048
33#endif
34 19
35#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
36 21
@@ -151,15 +136,16 @@ relocate_kernel(unsigned long indirection_page,
151 unsigned int has_pae, 136 unsigned int has_pae,
152 unsigned int preserve_context); 137 unsigned int preserve_context);
153#else 138#else
154NORET_TYPE void 139unsigned long
155relocate_kernel(unsigned long indirection_page, 140relocate_kernel(unsigned long indirection_page,
156 unsigned long page_list, 141 unsigned long page_list,
157 unsigned long start_address) ATTRIB_NORET; 142 unsigned long start_address,
143 unsigned int preserve_context);
158#endif 144#endif
159 145
160#ifdef CONFIG_X86_32
161#define ARCH_HAS_KIMAGE_ARCH 146#define ARCH_HAS_KIMAGE_ARCH
162 147
148#ifdef CONFIG_X86_32
163struct kimage_arch { 149struct kimage_arch {
164 pgd_t *pgd; 150 pgd_t *pgd;
165#ifdef CONFIG_X86_PAE 151#ifdef CONFIG_X86_PAE
@@ -169,6 +155,12 @@ struct kimage_arch {
169 pte_t *pte0; 155 pte_t *pte0;
170 pte_t *pte1; 156 pte_t *pte1;
171}; 157};
158#else
159struct kimage_arch {
160 pud_t *pud;
161 pmd_t *pmd;
162 pte_t *pte;
163};
172#endif 164#endif
173 165
174#endif /* __ASSEMBLY__ */ 166#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index 886c9402ec45..dc3f6cf11704 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -15,6 +15,7 @@
15#define __KVM_HAVE_DEVICE_ASSIGNMENT 15#define __KVM_HAVE_DEVICE_ASSIGNMENT
16#define __KVM_HAVE_MSI 16#define __KVM_HAVE_MSI
17#define __KVM_HAVE_USER_NMI 17#define __KVM_HAVE_USER_NMI
18#define __KVM_HAVE_GUEST_DEBUG
18 19
19/* Architectural interrupt line count. */ 20/* Architectural interrupt line count. */
20#define KVM_NR_INTERRUPTS 256 21#define KVM_NR_INTERRUPTS 256
@@ -212,7 +213,30 @@ struct kvm_pit_channel_state {
212 __s64 count_load_time; 213 __s64 count_load_time;
213}; 214};
214 215
216struct kvm_debug_exit_arch {
217 __u32 exception;
218 __u32 pad;
219 __u64 pc;
220 __u64 dr6;
221 __u64 dr7;
222};
223
224#define KVM_GUESTDBG_USE_SW_BP 0x00010000
225#define KVM_GUESTDBG_USE_HW_BP 0x00020000
226#define KVM_GUESTDBG_INJECT_DB 0x00040000
227#define KVM_GUESTDBG_INJECT_BP 0x00080000
228
229/* for KVM_SET_GUEST_DEBUG */
230struct kvm_guest_debug_arch {
231 __u64 debugreg[8];
232};
233
215struct kvm_pit_state { 234struct kvm_pit_state {
216 struct kvm_pit_channel_state channels[3]; 235 struct kvm_pit_channel_state channels[3];
217}; 236};
237
238struct kvm_reinject_control {
239 __u8 pit_reinject;
240 __u8 reserved[31];
241};
218#endif /* _ASM_X86_KVM_H */ 242#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 730843d1d2fb..f0faf58044ff 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -22,6 +22,7 @@
22#include <asm/pvclock-abi.h> 22#include <asm/pvclock-abi.h>
23#include <asm/desc.h> 23#include <asm/desc.h>
24#include <asm/mtrr.h> 24#include <asm/mtrr.h>
25#include <asm/msr-index.h>
25 26
26#define KVM_MAX_VCPUS 16 27#define KVM_MAX_VCPUS 16
27#define KVM_MEMORY_SLOTS 32 28#define KVM_MEMORY_SLOTS 32
@@ -134,11 +135,18 @@ enum {
134 135
135#define KVM_NR_MEM_OBJS 40 136#define KVM_NR_MEM_OBJS 40
136 137
137struct kvm_guest_debug { 138#define KVM_NR_DB_REGS 4
138 int enabled; 139
139 unsigned long bp[4]; 140#define DR6_BD (1 << 13)
140 int singlestep; 141#define DR6_BS (1 << 14)
141}; 142#define DR6_FIXED_1 0xffff0ff0
143#define DR6_VOLATILE 0x0000e00f
144
145#define DR7_BP_EN_MASK 0x000000ff
146#define DR7_GE (1 << 9)
147#define DR7_GD (1 << 13)
148#define DR7_FIXED_1 0x00000400
149#define DR7_VOLATILE 0xffff23ff
142 150
143/* 151/*
144 * We don't want allocation failures within the mmu code, so we preallocate 152 * We don't want allocation failures within the mmu code, so we preallocate
@@ -162,7 +170,8 @@ struct kvm_pte_chain {
162 * bits 0:3 - total guest paging levels (2-4, or zero for real mode) 170 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
163 * bits 4:7 - page table level for this shadow (1-4) 171 * bits 4:7 - page table level for this shadow (1-4)
164 * bits 8:9 - page table quadrant for 2-level guests 172 * bits 8:9 - page table quadrant for 2-level guests
165 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode) 173 * bit 16 - direct mapping of virtual to physical mapping at gfn
174 * used for real mode and two-dimensional paging
166 * bits 17:19 - common access permissions for all ptes in this shadow page 175 * bits 17:19 - common access permissions for all ptes in this shadow page
167 */ 176 */
168union kvm_mmu_page_role { 177union kvm_mmu_page_role {
@@ -172,9 +181,10 @@ union kvm_mmu_page_role {
172 unsigned level:4; 181 unsigned level:4;
173 unsigned quadrant:2; 182 unsigned quadrant:2;
174 unsigned pad_for_nice_hex_output:6; 183 unsigned pad_for_nice_hex_output:6;
175 unsigned metaphysical:1; 184 unsigned direct:1;
176 unsigned access:3; 185 unsigned access:3;
177 unsigned invalid:1; 186 unsigned invalid:1;
187 unsigned cr4_pge:1;
178 }; 188 };
179}; 189};
180 190
@@ -218,6 +228,18 @@ struct kvm_pv_mmu_op_buffer {
218 char buf[512] __aligned(sizeof(long)); 228 char buf[512] __aligned(sizeof(long));
219}; 229};
220 230
231struct kvm_pio_request {
232 unsigned long count;
233 int cur_count;
234 gva_t guest_gva;
235 int in;
236 int port;
237 int size;
238 int string;
239 int down;
240 int rep;
241};
242
221/* 243/*
222 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level 244 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
223 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu 245 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
@@ -236,6 +258,7 @@ struct kvm_mmu {
236 hpa_t root_hpa; 258 hpa_t root_hpa;
237 int root_level; 259 int root_level;
238 int shadow_root_level; 260 int shadow_root_level;
261 union kvm_mmu_page_role base_role;
239 262
240 u64 *pae_root; 263 u64 *pae_root;
241}; 264};
@@ -258,6 +281,7 @@ struct kvm_vcpu_arch {
258 unsigned long cr3; 281 unsigned long cr3;
259 unsigned long cr4; 282 unsigned long cr4;
260 unsigned long cr8; 283 unsigned long cr8;
284 u32 hflags;
261 u64 pdptrs[4]; /* pae */ 285 u64 pdptrs[4]; /* pae */
262 u64 shadow_efer; 286 u64 shadow_efer;
263 u64 apic_base; 287 u64 apic_base;
@@ -338,6 +362,15 @@ struct kvm_vcpu_arch {
338 362
339 struct mtrr_state_type mtrr_state; 363 struct mtrr_state_type mtrr_state;
340 u32 pat; 364 u32 pat;
365
366 int switch_db_regs;
367 unsigned long host_db[KVM_NR_DB_REGS];
368 unsigned long host_dr6;
369 unsigned long host_dr7;
370 unsigned long db[KVM_NR_DB_REGS];
371 unsigned long dr6;
372 unsigned long dr7;
373 unsigned long eff_db[KVM_NR_DB_REGS];
341}; 374};
342 375
343struct kvm_mem_alias { 376struct kvm_mem_alias {
@@ -378,6 +411,7 @@ struct kvm_arch{
378 411
379 unsigned long irq_sources_bitmap; 412 unsigned long irq_sources_bitmap;
380 unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; 413 unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
414 u64 vm_init_tsc;
381}; 415};
382 416
383struct kvm_vm_stat { 417struct kvm_vm_stat {
@@ -446,8 +480,7 @@ struct kvm_x86_ops {
446 void (*vcpu_put)(struct kvm_vcpu *vcpu); 480 void (*vcpu_put)(struct kvm_vcpu *vcpu);
447 481
448 int (*set_guest_debug)(struct kvm_vcpu *vcpu, 482 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
449 struct kvm_debug_guest *dbg); 483 struct kvm_guest_debug *dbg);
450 void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
451 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); 484 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
452 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 485 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
453 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 486 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
@@ -583,16 +616,12 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
583void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, 616void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
584 u32 error_code); 617 u32 error_code);
585 618
586void kvm_pic_set_irq(void *opaque, int irq, int level); 619int kvm_pic_set_irq(void *opaque, int irq, int level);
587 620
588void kvm_inject_nmi(struct kvm_vcpu *vcpu); 621void kvm_inject_nmi(struct kvm_vcpu *vcpu);
589 622
590void fx_init(struct kvm_vcpu *vcpu); 623void fx_init(struct kvm_vcpu *vcpu);
591 624
592int emulator_read_std(unsigned long addr,
593 void *val,
594 unsigned int bytes,
595 struct kvm_vcpu *vcpu);
596int emulator_write_emulated(unsigned long addr, 625int emulator_write_emulated(unsigned long addr,
597 const void *val, 626 const void *val,
598 unsigned int bytes, 627 unsigned int bytes,
@@ -737,6 +766,10 @@ enum {
737 TASK_SWITCH_GATE = 3, 766 TASK_SWITCH_GATE = 3,
738}; 767};
739 768
769#define HF_GIF_MASK (1 << 0)
770#define HF_HIF_MASK (1 << 1)
771#define HF_VINTR_MASK (1 << 2)
772
740/* 773/*
741 * Hardware virtualization extension instructions may fault if a 774 * Hardware virtualization extension instructions may fault if a
742 * reboot turns off virtualization while processes are running. 775 * reboot turns off virtualization while processes are running.
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index 43894428c3c2..0f4ee7148afe 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -26,36 +26,20 @@
26 26
27#ifndef __ASSEMBLY__ 27#ifndef __ASSEMBLY__
28#include <asm/hw_irq.h> 28#include <asm/hw_irq.h>
29#include <asm/kvm_para.h>
29 30
30/*G:031 But first, how does our Guest contact the Host to ask for privileged 31/*G:031 But first, how does our Guest contact the Host to ask for privileged
31 * operations? There are two ways: the direct way is to make a "hypercall", 32 * operations? There are two ways: the direct way is to make a "hypercall",
32 * to make requests of the Host Itself. 33 * to make requests of the Host Itself.
33 * 34 *
34 * Our hypercall mechanism uses the highest unused trap code (traps 32 and 35 * We use the KVM hypercall mechanism. Eighteen hypercalls are
35 * above are used by real hardware interrupts). Fifteen hypercalls are
36 * available: the hypercall number is put in the %eax register, and the 36 * available: the hypercall number is put in the %eax register, and the
37 * arguments (when required) are placed in %edx, %ebx and %ecx. If a return 37 * arguments (when required) are placed in %ebx, %ecx and %edx. If a return
38 * value makes sense, it's returned in %eax. 38 * value makes sense, it's returned in %eax.
39 * 39 *
40 * Grossly invalid calls result in Sudden Death at the hands of the vengeful 40 * Grossly invalid calls result in Sudden Death at the hands of the vengeful
41 * Host, rather than returning failure. This reflects Winston Churchill's 41 * Host, rather than returning failure. This reflects Winston Churchill's
42 * definition of a gentleman: "someone who is only rude intentionally". */ 42 * definition of a gentleman: "someone who is only rude intentionally". */
43static inline unsigned long
44hcall(unsigned long call,
45 unsigned long arg1, unsigned long arg2, unsigned long arg3)
46{
47 /* "int" is the Intel instruction to trigger a trap. */
48 asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
49 /* The call in %eax (aka "a") might be overwritten */
50 : "=a"(call)
51 /* The arguments are in %eax, %edx, %ebx & %ecx */
52 : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3)
53 /* "memory" means this might write somewhere in memory.
54 * This isn't true for all calls, but it's safe to tell
55 * gcc that it might happen so it doesn't get clever. */
56 : "memory");
57 return call;
58}
59/*:*/ 43/*:*/
60 44
61/* Can't use our min() macro here: needs to be a constant */ 45/* Can't use our min() macro here: needs to be a constant */
@@ -64,7 +48,7 @@ hcall(unsigned long call,
64#define LHCALL_RING_SIZE 64 48#define LHCALL_RING_SIZE 64
65struct hcall_args { 49struct hcall_args {
66 /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */ 50 /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */
67 unsigned long arg0, arg2, arg3, arg1; 51 unsigned long arg0, arg1, arg2, arg3;
68}; 52};
69 53
70#endif /* !__ASSEMBLY__ */ 54#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 5d98d0b68ffc..12d55e773eb6 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -1,14 +1,11 @@
1#ifndef _ASM_X86_LINKAGE_H 1#ifndef _ASM_X86_LINKAGE_H
2#define _ASM_X86_LINKAGE_H 2#define _ASM_X86_LINKAGE_H
3 3
4#include <linux/stringify.h>
5
4#undef notrace 6#undef notrace
5#define notrace __attribute__((no_instrument_function)) 7#define notrace __attribute__((no_instrument_function))
6 8
7#ifdef CONFIG_X86_64
8#define __ALIGN .p2align 4,,15
9#define __ALIGN_STR ".p2align 4,,15"
10#endif
11
12#ifdef CONFIG_X86_32 9#ifdef CONFIG_X86_32
13#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) 10#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
14/* 11/*
@@ -50,72 +47,20 @@
50 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ 47 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
51 "g" (arg4), "g" (arg5), "g" (arg6)) 48 "g" (arg4), "g" (arg5), "g" (arg6))
52 49
53#endif 50#endif /* CONFIG_X86_32 */
54
55#ifdef CONFIG_X86_ALIGNMENT_16
56#define __ALIGN .align 16,0x90
57#define __ALIGN_STR ".align 16,0x90"
58#endif
59
60/*
61 * to check ENTRY_X86/END_X86 and
62 * KPROBE_ENTRY_X86/KPROBE_END_X86
63 * unbalanced-missed-mixed appearance
64 */
65#define __set_entry_x86 .set ENTRY_X86_IN, 0
66#define __unset_entry_x86 .set ENTRY_X86_IN, 1
67#define __set_kprobe_x86 .set KPROBE_X86_IN, 0
68#define __unset_kprobe_x86 .set KPROBE_X86_IN, 1
69
70#define __macro_err_x86 .error "ENTRY_X86/KPROBE_X86 unbalanced,missed,mixed"
71
72#define __check_entry_x86 \
73 .ifdef ENTRY_X86_IN; \
74 .ifeq ENTRY_X86_IN; \
75 __macro_err_x86; \
76 .abort; \
77 .endif; \
78 .endif
79
80#define __check_kprobe_x86 \
81 .ifdef KPROBE_X86_IN; \
82 .ifeq KPROBE_X86_IN; \
83 __macro_err_x86; \
84 .abort; \
85 .endif; \
86 .endif
87 51
88#define __check_entry_kprobe_x86 \ 52#ifdef __ASSEMBLY__
89 __check_entry_x86; \
90 __check_kprobe_x86
91 53
92#define ENTRY_KPROBE_FINAL_X86 __check_entry_kprobe_x86 54#define GLOBAL(name) \
93 55 .globl name; \
94#define ENTRY_X86(name) \
95 __check_entry_kprobe_x86; \
96 __set_entry_x86; \
97 .globl name; \
98 __ALIGN; \
99 name: 56 name:
100 57
101#define END_X86(name) \ 58#if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16)
102 __unset_entry_x86; \ 59#define __ALIGN .p2align 4, 0x90
103 __check_entry_kprobe_x86; \ 60#define __ALIGN_STR __stringify(__ALIGN)
104 .size name, .-name 61#endif
105
106#define KPROBE_ENTRY_X86(name) \
107 __check_entry_kprobe_x86; \
108 __set_kprobe_x86; \
109 .pushsection .kprobes.text, "ax"; \
110 .globl name; \
111 __ALIGN; \
112 name:
113 62
114#define KPROBE_END_X86(name) \ 63#endif /* __ASSEMBLY__ */
115 __unset_kprobe_x86; \
116 __check_entry_kprobe_x86; \
117 .size name, .-name; \
118 .popsection
119 64
120#endif /* _ASM_X86_LINKAGE_H */ 65#endif /* _ASM_X86_LINKAGE_H */
121 66
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h
deleted file mode 100644
index cc09cbbee27e..000000000000
--- a/arch/x86/include/asm/mach-default/mach_apic.h
+++ /dev/null
@@ -1,168 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_APIC_H
2#define _ASM_X86_MACH_DEFAULT_MACH_APIC_H
3
4#ifdef CONFIG_X86_LOCAL_APIC
5
6#include <mach_apicdef.h>
7#include <asm/smp.h>
8
9#define APIC_DFR_VALUE (APIC_DFR_FLAT)
10
11static inline const struct cpumask *target_cpus(void)
12{
13#ifdef CONFIG_SMP
14 return cpu_online_mask;
15#else
16 return cpumask_of(0);
17#endif
18}
19
20#define NO_BALANCE_IRQ (0)
21#define esr_disable (0)
22
23#ifdef CONFIG_X86_64
24#include <asm/genapic.h>
25#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
26#define INT_DEST_MODE (genapic->int_dest_mode)
27#define TARGET_CPUS (genapic->target_cpus())
28#define apic_id_registered (genapic->apic_id_registered)
29#define init_apic_ldr (genapic->init_apic_ldr)
30#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
31#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
32#define phys_pkg_id (genapic->phys_pkg_id)
33#define vector_allocation_domain (genapic->vector_allocation_domain)
34#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
35#define send_IPI_self (genapic->send_IPI_self)
36#define wakeup_secondary_cpu (genapic->wakeup_cpu)
37extern void setup_apic_routing(void);
38#else
39#define INT_DELIVERY_MODE dest_LowestPrio
40#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
41#define TARGET_CPUS (target_cpus())
42#define wakeup_secondary_cpu wakeup_secondary_cpu_via_init
43/*
44 * Set up the logical destination ID.
45 *
46 * Intel recommends to set DFR, LDR and TPR before enabling
47 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
48 * document number 292116). So here it goes...
49 */
50static inline void init_apic_ldr(void)
51{
52 unsigned long val;
53
54 apic_write(APIC_DFR, APIC_DFR_VALUE);
55 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
56 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
57 apic_write(APIC_LDR, val);
58}
59
60static inline int apic_id_registered(void)
61{
62 return physid_isset(read_apic_id(), phys_cpu_present_map);
63}
64
65static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
66{
67 return cpumask_bits(cpumask)[0];
68}
69
70static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
71 const struct cpumask *andmask)
72{
73 unsigned long mask1 = cpumask_bits(cpumask)[0];
74 unsigned long mask2 = cpumask_bits(andmask)[0];
75 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
76
77 return (unsigned int)(mask1 & mask2 & mask3);
78}
79
80static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
81{
82 return cpuid_apic >> index_msb;
83}
84
85static inline void setup_apic_routing(void)
86{
87#ifdef CONFIG_X86_IO_APIC
88 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
89 "Flat", nr_ioapics);
90#endif
91}
92
93static inline int apicid_to_node(int logical_apicid)
94{
95#ifdef CONFIG_SMP
96 return apicid_2_node[hard_smp_processor_id()];
97#else
98 return 0;
99#endif
100}
101
102static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
103{
104 /* Careful. Some cpus do not strictly honor the set of cpus
105 * specified in the interrupt destination when using lowest
106 * priority interrupt delivery mode.
107 *
108 * In particular there was a hyperthreading cpu observed to
109 * deliver interrupts to the wrong hyperthread when only one
110 * hyperthread was specified in the interrupt desitination.
111 */
112 *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
113}
114#endif
115
116static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
117{
118 return physid_isset(apicid, bitmap);
119}
120
121static inline unsigned long check_apicid_present(int bit)
122{
123 return physid_isset(bit, phys_cpu_present_map);
124}
125
126static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
127{
128 return phys_map;
129}
130
131static inline int multi_timer_check(int apic, int irq)
132{
133 return 0;
134}
135
136/* Mapping from cpu number to logical apicid */
137static inline int cpu_to_logical_apicid(int cpu)
138{
139 return 1 << cpu;
140}
141
142static inline int cpu_present_to_apicid(int mps_cpu)
143{
144 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
145 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
146 else
147 return BAD_APICID;
148}
149
150static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
151{
152 return physid_mask_of_physid(phys_apicid);
153}
154
155static inline void setup_portio_remap(void)
156{
157}
158
159static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
160{
161 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
162}
163
164static inline void enable_apic_mode(void)
165{
166}
167#endif /* CONFIG_X86_LOCAL_APIC */
168#endif /* _ASM_X86_MACH_DEFAULT_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_apicdef.h b/arch/x86/include/asm/mach-default/mach_apicdef.h
deleted file mode 100644
index 53179936d6c6..000000000000
--- a/arch/x86/include/asm/mach-default/mach_apicdef.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
2#define _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
3
4#include <asm/apic.h>
5
6#ifdef CONFIG_X86_64
7#define APIC_ID_MASK (genapic->apic_id_mask)
8#define GET_APIC_ID(x) (genapic->get_apic_id(x))
9#define SET_APIC_ID(x) (genapic->set_apic_id(x))
10#else
11#define APIC_ID_MASK (0xF<<24)
12static inline unsigned get_apic_id(unsigned long x)
13{
14 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
15 if (APIC_XAPIC(ver))
16 return (((x)>>24)&0xFF);
17 else
18 return (((x)>>24)&0xF);
19}
20
21#define GET_APIC_ID(x) get_apic_id(x)
22#endif
23
24#endif /* _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h
deleted file mode 100644
index 191312d155da..000000000000
--- a/arch/x86/include/asm/mach-default/mach_ipi.h
+++ /dev/null
@@ -1,64 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_IPI_H
2#define _ASM_X86_MACH_DEFAULT_MACH_IPI_H
3
4/* Avoid include hell */
5#define NMI_VECTOR 0x02
6
7void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
8void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
9void __send_IPI_shortcut(unsigned int shortcut, int vector);
10
11extern int no_broadcast;
12
13#ifdef CONFIG_X86_64
14#include <asm/genapic.h>
15#define send_IPI_mask (genapic->send_IPI_mask)
16#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
17#else
18static inline void send_IPI_mask(const struct cpumask *mask, int vector)
19{
20 send_IPI_mask_bitmask(mask, vector);
21}
22void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
23#endif
24
25static inline void __local_send_IPI_allbutself(int vector)
26{
27 if (no_broadcast || vector == NMI_VECTOR)
28 send_IPI_mask_allbutself(cpu_online_mask, vector);
29 else
30 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
31}
32
33static inline void __local_send_IPI_all(int vector)
34{
35 if (no_broadcast || vector == NMI_VECTOR)
36 send_IPI_mask(cpu_online_mask, vector);
37 else
38 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
39}
40
41#ifdef CONFIG_X86_64
42#define send_IPI_allbutself (genapic->send_IPI_allbutself)
43#define send_IPI_all (genapic->send_IPI_all)
44#else
45static inline void send_IPI_allbutself(int vector)
46{
47 /*
48 * if there are no other CPUs in the system then we get an APIC send
49 * error if we try to broadcast, thus avoid sending IPIs in this case.
50 */
51 if (!(num_online_cpus() > 1))
52 return;
53
54 __local_send_IPI_allbutself(vector);
55 return;
56}
57
58static inline void send_IPI_all(int vector)
59{
60 __local_send_IPI_all(vector);
61}
62#endif
63
64#endif /* _ASM_X86_MACH_DEFAULT_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpparse.h b/arch/x86/include/asm/mach-default/mach_mpparse.h
deleted file mode 100644
index c70a263d68cd..000000000000
--- a/arch/x86/include/asm/mach-default/mach_mpparse.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
2#define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
3
4static inline int
5mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
6{
7 return 0;
8}
9
10/* Hook from generic ACPI tables.c */
11static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
12{
13 return 0;
14}
15
16
17#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpspec.h b/arch/x86/include/asm/mach-default/mach_mpspec.h
deleted file mode 100644
index e85ede686be8..000000000000
--- a/arch/x86/include/asm/mach-default/mach_mpspec.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
2#define _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6#if CONFIG_BASE_SMALL == 0
7#define MAX_MP_BUSSES 256
8#else
9#define MAX_MP_BUSSES 32
10#endif
11
12#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h
deleted file mode 100644
index 89897a6a65b9..000000000000
--- a/arch/x86/include/asm/mach-default/mach_wakecpu.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
2#define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
3
4#define TRAMPOLINE_PHYS_LOW (0x467)
5#define TRAMPOLINE_PHYS_HIGH (0x469)
6
7static inline void wait_for_init_deassert(atomic_t *deassert)
8{
9 while (!atomic_read(deassert))
10 cpu_relax();
11 return;
12}
13
14/* Nothing to do for most platforms, since cleared by the INIT cycle */
15static inline void smp_callin_clear_local_apic(void)
16{
17}
18
19static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
20{
21}
22
23static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
24{
25}
26
27#ifdef CONFIG_SMP
28extern void __inquire_remote_apic(int apicid);
29#else /* CONFIG_SMP */
30static inline void __inquire_remote_apic(int apicid)
31{
32}
33#endif /* CONFIG_SMP */
34
35static inline void inquire_remote_apic(int apicid)
36{
37 if (apic_verbosity >= APIC_DEBUG)
38 __inquire_remote_apic(apicid);
39}
40
41#endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/mach-generic/gpio.h b/arch/x86/include/asm/mach-generic/gpio.h
deleted file mode 100644
index 995c45efdb33..000000000000
--- a/arch/x86/include/asm/mach-generic/gpio.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_GPIO_H
2#define _ASM_X86_MACH_GENERIC_GPIO_H
3
4int gpio_request(unsigned gpio, const char *label);
5void gpio_free(unsigned gpio);
6int gpio_direction_input(unsigned gpio);
7int gpio_direction_output(unsigned gpio, int value);
8int gpio_get_value(unsigned gpio);
9void gpio_set_value(unsigned gpio, int value);
10int gpio_to_irq(unsigned gpio);
11int irq_to_gpio(unsigned irq);
12
13#include <asm-generic/gpio.h> /* cansleep wrappers */
14
15#endif /* _ASM_X86_MACH_GENERIC_GPIO_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h
deleted file mode 100644
index 48553e958ad5..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_apic.h
+++ /dev/null
@@ -1,35 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_APIC_H
2#define _ASM_X86_MACH_GENERIC_MACH_APIC_H
3
4#include <asm/genapic.h>
5
6#define esr_disable (genapic->ESR_DISABLE)
7#define NO_BALANCE_IRQ (genapic->no_balance_irq)
8#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
9#define INT_DEST_MODE (genapic->int_dest_mode)
10#undef APIC_DEST_LOGICAL
11#define APIC_DEST_LOGICAL (genapic->apic_destination_logical)
12#define TARGET_CPUS (genapic->target_cpus())
13#define apic_id_registered (genapic->apic_id_registered)
14#define init_apic_ldr (genapic->init_apic_ldr)
15#define ioapic_phys_id_map (genapic->ioapic_phys_id_map)
16#define setup_apic_routing (genapic->setup_apic_routing)
17#define multi_timer_check (genapic->multi_timer_check)
18#define apicid_to_node (genapic->apicid_to_node)
19#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid)
20#define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
21#define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
22#define setup_portio_remap (genapic->setup_portio_remap)
23#define check_apicid_present (genapic->check_apicid_present)
24#define check_phys_apicid_present (genapic->check_phys_apicid_present)
25#define check_apicid_used (genapic->check_apicid_used)
26#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
27#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
28#define vector_allocation_domain (genapic->vector_allocation_domain)
29#define enable_apic_mode (genapic->enable_apic_mode)
30#define phys_pkg_id (genapic->phys_pkg_id)
31#define wakeup_secondary_cpu (genapic->wakeup_cpu)
32
33extern void generic_bigsmp_probe(void);
34
35#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apicdef.h b/arch/x86/include/asm/mach-generic/mach_apicdef.h
deleted file mode 100644
index 68041f3802f4..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_apicdef.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
2#define _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
3
4#ifndef APIC_DEFINITION
5#include <asm/genapic.h>
6
7#define GET_APIC_ID (genapic->get_apic_id)
8#define APIC_ID_MASK (genapic->apic_id_mask)
9#endif
10
11#endif /* _ASM_X86_MACH_GENERIC_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_ipi.h b/arch/x86/include/asm/mach-generic/mach_ipi.h
deleted file mode 100644
index ffd637e3c3d9..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_ipi.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_IPI_H
2#define _ASM_X86_MACH_GENERIC_MACH_IPI_H
3
4#include <asm/genapic.h>
5
6#define send_IPI_mask (genapic->send_IPI_mask)
7#define send_IPI_allbutself (genapic->send_IPI_allbutself)
8#define send_IPI_all (genapic->send_IPI_all)
9
10#endif /* _ASM_X86_MACH_GENERIC_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpparse.h b/arch/x86/include/asm/mach-generic/mach_mpparse.h
deleted file mode 100644
index 9444ab8dca94..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_mpparse.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
2#define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
3
4
5extern int mps_oem_check(struct mpc_table *, char *, char *);
6
7extern int acpi_madt_oem_check(char *, char *);
8
9#endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpspec.h b/arch/x86/include/asm/mach-generic/mach_mpspec.h
deleted file mode 100644
index 3bc407226578..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_mpspec.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
2#define _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6/* Summit or generic (i.e. installer) kernels need lots of bus entries. */
7/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
8#define MAX_MP_BUSSES 260
9
10extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
11
12#endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h
deleted file mode 100644
index 1ab16b168c8a..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_wakecpu.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
2#define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
3
4#define TRAMPOLINE_PHYS_LOW (genapic->trampoline_phys_low)
5#define TRAMPOLINE_PHYS_HIGH (genapic->trampoline_phys_high)
6#define wait_for_init_deassert (genapic->wait_for_init_deassert)
7#define smp_callin_clear_local_apic (genapic->smp_callin_clear_local_apic)
8#define store_NMI_vector (genapic->store_NMI_vector)
9#define restore_NMI_vector (genapic->restore_NMI_vector)
10#define inquire_remote_apic (genapic->inquire_remote_apic)
11
12#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-rdc321x/gpio.h b/arch/x86/include/asm/mach-rdc321x/gpio.h
deleted file mode 100644
index c210ab5788b0..000000000000
--- a/arch/x86/include/asm/mach-rdc321x/gpio.h
+++ /dev/null
@@ -1,60 +0,0 @@
1#ifndef _ASM_X86_MACH_RDC321X_GPIO_H
2#define _ASM_X86_MACH_RDC321X_GPIO_H
3
4#include <linux/kernel.h>
5
6extern int rdc_gpio_get_value(unsigned gpio);
7extern void rdc_gpio_set_value(unsigned gpio, int value);
8extern int rdc_gpio_direction_input(unsigned gpio);
9extern int rdc_gpio_direction_output(unsigned gpio, int value);
10extern int rdc_gpio_request(unsigned gpio, const char *label);
11extern void rdc_gpio_free(unsigned gpio);
12extern void __init rdc321x_gpio_setup(void);
13
14/* Wrappers for the arch-neutral GPIO API */
15
16static inline int gpio_request(unsigned gpio, const char *label)
17{
18 return rdc_gpio_request(gpio, label);
19}
20
21static inline void gpio_free(unsigned gpio)
22{
23 might_sleep();
24 rdc_gpio_free(gpio);
25}
26
27static inline int gpio_direction_input(unsigned gpio)
28{
29 return rdc_gpio_direction_input(gpio);
30}
31
32static inline int gpio_direction_output(unsigned gpio, int value)
33{
34 return rdc_gpio_direction_output(gpio, value);
35}
36
37static inline int gpio_get_value(unsigned gpio)
38{
39 return rdc_gpio_get_value(gpio);
40}
41
42static inline void gpio_set_value(unsigned gpio, int value)
43{
44 rdc_gpio_set_value(gpio, value);
45}
46
47static inline int gpio_to_irq(unsigned gpio)
48{
49 return gpio;
50}
51
52static inline int irq_to_gpio(unsigned irq)
53{
54 return irq;
55}
56
57/* For cansleep */
58#include <asm-generic/gpio.h>
59
60#endif /* _ASM_X86_MACH_RDC321X_GPIO_H */
diff --git a/arch/x86/include/asm/mach-voyager/do_timer.h b/arch/x86/include/asm/mach-voyager/do_timer.h
deleted file mode 100644
index 9e5a459fd15b..000000000000
--- a/arch/x86/include/asm/mach-voyager/do_timer.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/* defines for inline arch setup functions */
2#include <linux/clockchips.h>
3
4#include <asm/voyager.h>
5#include <asm/i8253.h>
6
7/**
8 * do_timer_interrupt_hook - hook into timer tick
9 *
10 * Call the pit clock event handler. see asm/i8253.h
11 **/
12static inline void do_timer_interrupt_hook(void)
13{
14 global_clock_event->event_handler(global_clock_event);
15 voyager_timer_interrupt();
16}
17
diff --git a/arch/x86/include/asm/mach-voyager/entry_arch.h b/arch/x86/include/asm/mach-voyager/entry_arch.h
deleted file mode 100644
index ae52624b5937..000000000000
--- a/arch/x86/include/asm/mach-voyager/entry_arch.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* Copyright (C) 2002
4 *
5 * Author: James.Bottomley@HansenPartnership.com
6 *
7 * linux/arch/i386/voyager/entry_arch.h
8 *
9 * This file builds the VIC and QIC CPI gates
10 */
11
12/* initialise the voyager interrupt gates
13 *
14 * This uses the macros in irq.h to set up assembly jump gates. The
15 * calls are then redirected to the same routine with smp_ prefixed */
16BUILD_INTERRUPT(vic_sys_interrupt, VIC_SYS_INT)
17BUILD_INTERRUPT(vic_cmn_interrupt, VIC_CMN_INT)
18BUILD_INTERRUPT(vic_cpi_interrupt, VIC_CPI_LEVEL0);
19
20/* do all the QIC interrupts */
21BUILD_INTERRUPT(qic_timer_interrupt, QIC_TIMER_CPI);
22BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
23BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
24BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
25BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
26BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI);
diff --git a/arch/x86/include/asm/mach-voyager/setup_arch.h b/arch/x86/include/asm/mach-voyager/setup_arch.h
deleted file mode 100644
index 71729ca05cd7..000000000000
--- a/arch/x86/include/asm/mach-voyager/setup_arch.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#include <asm/voyager.h>
2#include <asm/setup.h>
3#define VOYAGER_BIOS_INFO ((struct voyager_bios_info *) \
4 (&boot_params.apm_bios_info))
5
6/* Hook to call BIOS initialisation function */
7
8/* for voyager, pass the voyager BIOS/SUS info area to the detection
9 * routines */
10
11#define ARCH_SETUP voyager_detect(VOYAGER_BIOS_INFO);
12
diff --git a/arch/x86/include/asm/mach-default/mach_timer.h b/arch/x86/include/asm/mach_timer.h
index 853728519ae9..853728519ae9 100644
--- a/arch/x86/include/asm/mach-default/mach_timer.h
+++ b/arch/x86/include/asm/mach_timer.h
diff --git a/arch/x86/include/asm/mach-default/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index f7920601e472..f7920601e472 100644
--- a/arch/x86/include/asm/mach-default/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 32c6e17b960b..563933e06a35 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -11,6 +11,8 @@
11 */ 11 */
12 12
13#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */ 13#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */
14#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
15#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
14 16
15#define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */ 17#define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */
16#define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */ 18#define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */
@@ -90,14 +92,29 @@ extern int mce_disabled;
90 92
91#include <asm/atomic.h> 93#include <asm/atomic.h>
92 94
95void mce_setup(struct mce *m);
93void mce_log(struct mce *m); 96void mce_log(struct mce *m);
94DECLARE_PER_CPU(struct sys_device, device_mce); 97DECLARE_PER_CPU(struct sys_device, device_mce);
95extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); 98extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
96 99
100/*
101 * To support more than 128 would need to escape the predefined
102 * Linux defined extended banks first.
103 */
104#define MAX_NR_BANKS (MCE_EXTENDED_BANK - 1)
105
97#ifdef CONFIG_X86_MCE_INTEL 106#ifdef CONFIG_X86_MCE_INTEL
98void mce_intel_feature_init(struct cpuinfo_x86 *c); 107void mce_intel_feature_init(struct cpuinfo_x86 *c);
108void cmci_clear(void);
109void cmci_reenable(void);
110void cmci_rediscover(int dying);
111void cmci_recheck(void);
99#else 112#else
100static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } 113static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
114static inline void cmci_clear(void) {}
115static inline void cmci_reenable(void) {}
116static inline void cmci_rediscover(int dying) {}
117static inline void cmci_recheck(void) {}
101#endif 118#endif
102 119
103#ifdef CONFIG_X86_MCE_AMD 120#ifdef CONFIG_X86_MCE_AMD
@@ -106,11 +123,23 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c);
106static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { } 123static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
107#endif 124#endif
108 125
109void mce_log_therm_throt_event(unsigned int cpu, __u64 status); 126extern int mce_available(struct cpuinfo_x86 *c);
127
128void mce_log_therm_throt_event(__u64 status);
110 129
111extern atomic_t mce_entry; 130extern atomic_t mce_entry;
112 131
113extern void do_machine_check(struct pt_regs *, long); 132extern void do_machine_check(struct pt_regs *, long);
133
134typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
135DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
136
137enum mcp_flags {
138 MCP_TIMESTAMP = (1 << 0), /* log time stamp */
139 MCP_UC = (1 << 1), /* log uncorrected errors */
140};
141extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
142
114extern int mce_notify_user(void); 143extern int mce_notify_user(void);
115 144
116#endif /* !CONFIG_X86_32 */ 145#endif /* !CONFIG_X86_32 */
@@ -120,8 +149,8 @@ extern void mcheck_init(struct cpuinfo_x86 *c);
120#else 149#else
121#define mcheck_init(c) do { } while (0) 150#define mcheck_init(c) do { } while (0)
122#endif 151#endif
123extern void stop_mce(void); 152
124extern void restart_mce(void); 153extern void (*mce_threshold_vector)(void);
125 154
126#endif /* __KERNEL__ */ 155#endif /* __KERNEL__ */
127#endif /* _ASM_X86_MCE_H */ 156#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 8aeeb3fd73db..f923203dc39a 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -21,11 +21,54 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
21int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 21int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
22void destroy_context(struct mm_struct *mm); 22void destroy_context(struct mm_struct *mm);
23 23
24#ifdef CONFIG_X86_32 24
25# include "mmu_context_32.h" 25static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
26#else 26{
27# include "mmu_context_64.h" 27#ifdef CONFIG_SMP
28 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
29 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
30#endif
31}
32
33static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
34 struct task_struct *tsk)
35{
36 unsigned cpu = smp_processor_id();
37
38 if (likely(prev != next)) {
39 /* stop flush ipis for the previous mm */
40 cpu_clear(cpu, prev->cpu_vm_mask);
41#ifdef CONFIG_SMP
42 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
43 percpu_write(cpu_tlbstate.active_mm, next);
28#endif 44#endif
45 cpu_set(cpu, next->cpu_vm_mask);
46
47 /* Re-load page tables */
48 load_cr3(next->pgd);
49
50 /*
51 * load the LDT, if the LDT is different:
52 */
53 if (unlikely(prev->context.ldt != next->context.ldt))
54 load_LDT_nolock(&next->context);
55 }
56#ifdef CONFIG_SMP
57 else {
58 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
59 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
60
61 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
62 /* We were in lazy tlb mode and leave_mm disabled
63 * tlb flush IPI delivery. We must reload CR3
64 * to make sure to use no freed page tables.
65 */
66 load_cr3(next->pgd);
67 load_LDT_nolock(&next->context);
68 }
69 }
70#endif
71}
29 72
30#define activate_mm(prev, next) \ 73#define activate_mm(prev, next) \
31do { \ 74do { \
@@ -33,5 +76,17 @@ do { \
33 switch_mm((prev), (next), NULL); \ 76 switch_mm((prev), (next), NULL); \
34} while (0); 77} while (0);
35 78
79#ifdef CONFIG_X86_32
80#define deactivate_mm(tsk, mm) \
81do { \
82 lazy_load_gs(0); \
83} while (0)
84#else
85#define deactivate_mm(tsk, mm) \
86do { \
87 load_gs_index(0); \
88 loadsegment(fs, 0); \
89} while (0)
90#endif
36 91
37#endif /* _ASM_X86_MMU_CONTEXT_H */ 92#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/mmu_context_32.h b/arch/x86/include/asm/mmu_context_32.h
deleted file mode 100644
index 7e98ce1d2c0e..000000000000
--- a/arch/x86/include/asm/mmu_context_32.h
+++ /dev/null
@@ -1,55 +0,0 @@
1#ifndef _ASM_X86_MMU_CONTEXT_32_H
2#define _ASM_X86_MMU_CONTEXT_32_H
3
4static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
5{
6#ifdef CONFIG_SMP
7 if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK)
8 x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY);
9#endif
10}
11
12static inline void switch_mm(struct mm_struct *prev,
13 struct mm_struct *next,
14 struct task_struct *tsk)
15{
16 int cpu = smp_processor_id();
17
18 if (likely(prev != next)) {
19 /* stop flush ipis for the previous mm */
20 cpu_clear(cpu, prev->cpu_vm_mask);
21#ifdef CONFIG_SMP
22 x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
23 x86_write_percpu(cpu_tlbstate.active_mm, next);
24#endif
25 cpu_set(cpu, next->cpu_vm_mask);
26
27 /* Re-load page tables */
28 load_cr3(next->pgd);
29
30 /*
31 * load the LDT, if the LDT is different:
32 */
33 if (unlikely(prev->context.ldt != next->context.ldt))
34 load_LDT_nolock(&next->context);
35 }
36#ifdef CONFIG_SMP
37 else {
38 x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
39 BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next);
40
41 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
42 /* We were in lazy tlb mode and leave_mm disabled
43 * tlb flush IPI delivery. We must reload %cr3.
44 */
45 load_cr3(next->pgd);
46 load_LDT_nolock(&next->context);
47 }
48 }
49#endif
50}
51
52#define deactivate_mm(tsk, mm) \
53 asm("movl %0,%%gs": :"r" (0));
54
55#endif /* _ASM_X86_MMU_CONTEXT_32_H */
diff --git a/arch/x86/include/asm/mmu_context_64.h b/arch/x86/include/asm/mmu_context_64.h
deleted file mode 100644
index 677d36e9540a..000000000000
--- a/arch/x86/include/asm/mmu_context_64.h
+++ /dev/null
@@ -1,54 +0,0 @@
1#ifndef _ASM_X86_MMU_CONTEXT_64_H
2#define _ASM_X86_MMU_CONTEXT_64_H
3
4#include <asm/pda.h>
5
6static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7{
8#ifdef CONFIG_SMP
9 if (read_pda(mmu_state) == TLBSTATE_OK)
10 write_pda(mmu_state, TLBSTATE_LAZY);
11#endif
12}
13
14static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15 struct task_struct *tsk)
16{
17 unsigned cpu = smp_processor_id();
18 if (likely(prev != next)) {
19 /* stop flush ipis for the previous mm */
20 cpu_clear(cpu, prev->cpu_vm_mask);
21#ifdef CONFIG_SMP
22 write_pda(mmu_state, TLBSTATE_OK);
23 write_pda(active_mm, next);
24#endif
25 cpu_set(cpu, next->cpu_vm_mask);
26 load_cr3(next->pgd);
27
28 if (unlikely(next->context.ldt != prev->context.ldt))
29 load_LDT_nolock(&next->context);
30 }
31#ifdef CONFIG_SMP
32 else {
33 write_pda(mmu_state, TLBSTATE_OK);
34 if (read_pda(active_mm) != next)
35 BUG();
36 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
37 /* We were in lazy tlb mode and leave_mm disabled
38 * tlb flush IPI delivery. We must reload CR3
39 * to make sure to use no freed page tables.
40 */
41 load_cr3(next->pgd);
42 load_LDT_nolock(&next->context);
43 }
44 }
45#endif
46}
47
48#define deactivate_mm(tsk, mm) \
49do { \
50 load_gs_index(0); \
51 asm volatile("movl %0,%%fs"::"r"(0)); \
52} while (0)
53
54#endif /* _ASM_X86_MMU_CONTEXT_64_H */
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 105fb90a0635..ede6998bd92c 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -91,46 +91,9 @@ static inline int pfn_valid(int pfn)
91#endif /* CONFIG_DISCONTIGMEM */ 91#endif /* CONFIG_DISCONTIGMEM */
92 92
93#ifdef CONFIG_NEED_MULTIPLE_NODES 93#ifdef CONFIG_NEED_MULTIPLE_NODES
94 94/* always use node 0 for bootmem on this numa platform */
95/* 95#define bootmem_arch_preferred_node(__bdata, size, align, goal, limit) \
96 * Following are macros that are specific to this numa platform. 96 (NODE_DATA(0)->bdata)
97 */
98#define reserve_bootmem(addr, size, flags) \
99 reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags))
100#define alloc_bootmem(x) \
101 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
102#define alloc_bootmem_nopanic(x) \
103 __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
104 __pa(MAX_DMA_ADDRESS))
105#define alloc_bootmem_low(x) \
106 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
107#define alloc_bootmem_pages(x) \
108 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
109#define alloc_bootmem_pages_nopanic(x) \
110 __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \
111 __pa(MAX_DMA_ADDRESS))
112#define alloc_bootmem_low_pages(x) \
113 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
114#define alloc_bootmem_node(pgdat, x) \
115({ \
116 struct pglist_data __maybe_unused \
117 *__alloc_bootmem_node__pgdat = (pgdat); \
118 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
119 __pa(MAX_DMA_ADDRESS)); \
120})
121#define alloc_bootmem_pages_node(pgdat, x) \
122({ \
123 struct pglist_data __maybe_unused \
124 *__alloc_bootmem_node__pgdat = (pgdat); \
125 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
126 __pa(MAX_DMA_ADDRESS)); \
127})
128#define alloc_bootmem_low_pages_node(pgdat, x) \
129({ \
130 struct pglist_data __maybe_unused \
131 *__alloc_bootmem_node__pgdat = (pgdat); \
132 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \
133})
134#endif /* CONFIG_NEED_MULTIPLE_NODES */ 97#endif /* CONFIG_NEED_MULTIPLE_NODES */
135 98
136#endif /* _ASM_X86_MMZONE_32_H */ 99#endif /* _ASM_X86_MMZONE_32_H */
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index bd22f2a3713f..642fc7fc8cdc 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -9,7 +9,18 @@ extern int apic_version[MAX_APICS];
9extern int pic_mode; 9extern int pic_mode;
10 10
11#ifdef CONFIG_X86_32 11#ifdef CONFIG_X86_32
12#include <mach_mpspec.h> 12
13/*
14 * Summit or generic (i.e. installer) kernels need lots of bus entries.
15 * Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets.
16 */
17#if CONFIG_BASE_SMALL == 0
18# define MAX_MP_BUSSES 260
19#else
20# define MAX_MP_BUSSES 32
21#endif
22
23#define MAX_IRQ_SOURCES 256
13 24
14extern unsigned int def_to_bigsmp; 25extern unsigned int def_to_bigsmp;
15extern u8 apicid_2_node[]; 26extern u8 apicid_2_node[];
@@ -20,15 +31,15 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
20extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; 31extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
21#endif 32#endif
22 33
23#define MAX_APICID 256 34#define MAX_APICID 256
24 35
25#else 36#else /* CONFIG_X86_64: */
26 37
27#define MAX_MP_BUSSES 256 38#define MAX_MP_BUSSES 256
28/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ 39/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
29#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) 40#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
30 41
31#endif 42#endif /* CONFIG_X86_64 */
32 43
33extern void early_find_smp_config(void); 44extern void early_find_smp_config(void);
34extern void early_get_smp_config(void); 45extern void early_get_smp_config(void);
@@ -45,11 +56,13 @@ extern int smp_found_config;
45extern int mpc_default_type; 56extern int mpc_default_type;
46extern unsigned long mp_lapic_addr; 57extern unsigned long mp_lapic_addr;
47 58
48extern void find_smp_config(void);
49extern void get_smp_config(void); 59extern void get_smp_config(void);
60
50#ifdef CONFIG_X86_MPPARSE 61#ifdef CONFIG_X86_MPPARSE
62extern void find_smp_config(void);
51extern void early_reserve_e820_mpc_new(void); 63extern void early_reserve_e820_mpc_new(void);
52#else 64#else
65static inline void find_smp_config(void) { }
53static inline void early_reserve_e820_mpc_new(void) { } 66static inline void early_reserve_e820_mpc_new(void) { }
54#endif 67#endif
55 68
@@ -64,6 +77,8 @@ extern int acpi_probe_gsi(void);
64#ifdef CONFIG_X86_IO_APIC 77#ifdef CONFIG_X86_IO_APIC
65extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, 78extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
66 u32 gsi, int triggering, int polarity); 79 u32 gsi, int triggering, int polarity);
80extern int mp_find_ioapic(int gsi);
81extern int mp_find_ioapic_pin(int ioapic, int gsi);
67#else 82#else
68static inline int 83static inline int
69mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, 84mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
@@ -148,4 +163,8 @@ static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
148 163
149extern physid_mask_t phys_cpu_present_map; 164extern physid_mask_t phys_cpu_present_map;
150 165
166extern int generic_mps_oem_check(struct mpc_table *, char *, char *);
167
168extern int default_acpi_madt_oem_check(char *, char *);
169
151#endif /* _ASM_X86_MPSPEC_H */ 170#endif /* _ASM_X86_MPSPEC_H */
diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h
index 59568bc4767f..4a7f96d7c188 100644
--- a/arch/x86/include/asm/mpspec_def.h
+++ b/arch/x86/include/asm/mpspec_def.h
@@ -24,17 +24,18 @@
24# endif 24# endif
25#endif 25#endif
26 26
27struct intel_mp_floating { 27/* Intel MP Floating Pointer Structure */
28 char mpf_signature[4]; /* "_MP_" */ 28struct mpf_intel {
29 unsigned int mpf_physptr; /* Configuration table address */ 29 char signature[4]; /* "_MP_" */
30 unsigned char mpf_length; /* Our length (paragraphs) */ 30 unsigned int physptr; /* Configuration table address */
31 unsigned char mpf_specification;/* Specification version */ 31 unsigned char length; /* Our length (paragraphs) */
32 unsigned char mpf_checksum; /* Checksum (makes sum 0) */ 32 unsigned char specification; /* Specification version */
33 unsigned char mpf_feature1; /* Standard or configuration ? */ 33 unsigned char checksum; /* Checksum (makes sum 0) */
34 unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ 34 unsigned char feature1; /* Standard or configuration ? */
35 unsigned char mpf_feature3; /* Unused (0) */ 35 unsigned char feature2; /* Bit7 set for IMCR|PIC */
36 unsigned char mpf_feature4; /* Unused (0) */ 36 unsigned char feature3; /* Unused (0) */
37 unsigned char mpf_feature5; /* Unused (0) */ 37 unsigned char feature4; /* Unused (0) */
38 unsigned char feature5; /* Unused (0) */
38}; 39};
39 40
40#define MPC_SIGNATURE "PCMP" 41#define MPC_SIGNATURE "PCMP"
diff --git a/arch/x86/include/asm/msidef.h b/arch/x86/include/asm/msidef.h
index 6706b3006f13..4cc48af23fef 100644
--- a/arch/x86/include/asm/msidef.h
+++ b/arch/x86/include/asm/msidef.h
@@ -47,6 +47,7 @@
47#define MSI_ADDR_DEST_ID_MASK 0x00ffff0 47#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
48#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ 48#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
49 MSI_ADDR_DEST_ID_MASK) 49 MSI_ADDR_DEST_ID_MASK)
50#define MSI_ADDR_EXT_DEST_ID(dest) ((dest) & 0xffffff00)
50 51
51#define MSI_ADDR_IR_EXT_INT (1 << 4) 52#define MSI_ADDR_IR_EXT_INT (1 << 4)
52#define MSI_ADDR_IR_SHV (1 << 3) 53#define MSI_ADDR_IR_SHV (1 << 3)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 358acc59ae04..ec41fc16c167 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -18,11 +18,15 @@
18#define _EFER_LME 8 /* Long mode enable */ 18#define _EFER_LME 8 /* Long mode enable */
19#define _EFER_LMA 10 /* Long mode active (read-only) */ 19#define _EFER_LMA 10 /* Long mode active (read-only) */
20#define _EFER_NX 11 /* No execute enable */ 20#define _EFER_NX 11 /* No execute enable */
21#define _EFER_SVME 12 /* Enable virtualization */
22#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
21 23
22#define EFER_SCE (1<<_EFER_SCE) 24#define EFER_SCE (1<<_EFER_SCE)
23#define EFER_LME (1<<_EFER_LME) 25#define EFER_LME (1<<_EFER_LME)
24#define EFER_LMA (1<<_EFER_LMA) 26#define EFER_LMA (1<<_EFER_LMA)
25#define EFER_NX (1<<_EFER_NX) 27#define EFER_NX (1<<_EFER_NX)
28#define EFER_SVME (1<<_EFER_SVME)
29#define EFER_FFXSR (1<<_EFER_FFXSR)
26 30
27/* Intel MSRs. Some also available on other CPUs */ 31/* Intel MSRs. Some also available on other CPUs */
28#define MSR_IA32_PERFCTR0 0x000000c1 32#define MSR_IA32_PERFCTR0 0x000000c1
@@ -77,6 +81,11 @@
77#define MSR_IA32_MC0_ADDR 0x00000402 81#define MSR_IA32_MC0_ADDR 0x00000402
78#define MSR_IA32_MC0_MISC 0x00000403 82#define MSR_IA32_MC0_MISC 0x00000403
79 83
84/* These are consecutive and not in the normal 4er MCE bank block */
85#define MSR_IA32_MC0_CTL2 0x00000280
86#define CMCI_EN (1ULL << 30)
87#define CMCI_THRESHOLD_MASK 0xffffULL
88
80#define MSR_P6_PERFCTR0 0x000000c1 89#define MSR_P6_PERFCTR0 0x000000c1
81#define MSR_P6_PERFCTR1 0x000000c2 90#define MSR_P6_PERFCTR1 0x000000c2
82#define MSR_P6_EVNTSEL0 0x00000186 91#define MSR_P6_EVNTSEL0 0x00000186
@@ -360,4 +369,9 @@
360#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b 369#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
361#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c 370#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
362 371
372/* AMD-V MSRs */
373
374#define MSR_VM_CR 0xc0010114
375#define MSR_VM_HSAVE_PA 0xc0010117
376
363#endif /* _ASM_X86_MSR_INDEX_H */ 377#endif /* _ASM_X86_MSR_INDEX_H */
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h
index e9f5db796244..a37229011b56 100644
--- a/arch/x86/include/asm/numa_32.h
+++ b/arch/x86/include/asm/numa_32.h
@@ -4,8 +4,12 @@
4extern int pxm_to_nid(int pxm); 4extern int pxm_to_nid(int pxm);
5extern void numa_remove_cpu(int cpu); 5extern void numa_remove_cpu(int cpu);
6 6
7#ifdef CONFIG_NUMA 7#ifdef CONFIG_HIGHMEM
8extern void set_highmem_pages_init(void); 8extern void set_highmem_pages_init(void);
9#else
10static inline void set_highmem_pages_init(void)
11{
12}
9#endif 13#endif
10 14
11#endif /* _ASM_X86_NUMA_32_H */ 15#endif /* _ASM_X86_NUMA_32_H */
diff --git a/arch/x86/include/asm/numaq.h b/arch/x86/include/asm/numaq.h
index 1e8bd30b4c16..9f0a5f5d29ec 100644
--- a/arch/x86/include/asm/numaq.h
+++ b/arch/x86/include/asm/numaq.h
@@ -31,6 +31,8 @@
31extern int found_numaq; 31extern int found_numaq;
32extern int get_memcfg_numaq(void); 32extern int get_memcfg_numaq(void);
33 33
34extern void *xquad_portio;
35
34/* 36/*
35 * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the 37 * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
36 */ 38 */
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h
deleted file mode 100644
index bf37bc49bd8e..000000000000
--- a/arch/x86/include/asm/numaq/apic.h
+++ /dev/null
@@ -1,142 +0,0 @@
1#ifndef __ASM_NUMAQ_APIC_H
2#define __ASM_NUMAQ_APIC_H
3
4#include <asm/io.h>
5#include <linux/mmzone.h>
6#include <linux/nodemask.h>
7
8#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
9
10static inline const cpumask_t *target_cpus(void)
11{
12 return &CPU_MASK_ALL;
13}
14
15#define NO_BALANCE_IRQ (1)
16#define esr_disable (1)
17
18#define INT_DELIVERY_MODE dest_LowestPrio
19#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */
20
21static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
22{
23 return physid_isset(apicid, bitmap);
24}
25static inline unsigned long check_apicid_present(int bit)
26{
27 return physid_isset(bit, phys_cpu_present_map);
28}
29#define apicid_cluster(apicid) (apicid & 0xF0)
30
31static inline int apic_id_registered(void)
32{
33 return 1;
34}
35
36static inline void init_apic_ldr(void)
37{
38 /* Already done in NUMA-Q firmware */
39}
40
41static inline void setup_apic_routing(void)
42{
43 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
44 "NUMA-Q", nr_ioapics);
45}
46
47/*
48 * Skip adding the timer int on secondary nodes, which causes
49 * a small but painful rift in the time-space continuum.
50 */
51static inline int multi_timer_check(int apic, int irq)
52{
53 return apic != 0 && irq == 0;
54}
55
56static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
57{
58 /* We don't have a good way to do this yet - hack */
59 return physids_promote(0xFUL);
60}
61
62/* Mapping from cpu number to logical apicid */
63extern u8 cpu_2_logical_apicid[];
64static inline int cpu_to_logical_apicid(int cpu)
65{
66 if (cpu >= nr_cpu_ids)
67 return BAD_APICID;
68 return (int)cpu_2_logical_apicid[cpu];
69}
70
71/*
72 * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
73 * cpu to APIC ID relation to properly interact with the intelligent
74 * mode of the cluster controller.
75 */
76static inline int cpu_present_to_apicid(int mps_cpu)
77{
78 if (mps_cpu < 60)
79 return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
80 else
81 return BAD_APICID;
82}
83
84static inline int apicid_to_node(int logical_apicid)
85{
86 return logical_apicid >> 4;
87}
88
89static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
90{
91 int node = apicid_to_node(logical_apicid);
92 int cpu = __ffs(logical_apicid & 0xf);
93
94 return physid_mask_of_physid(cpu + 4*node);
95}
96
97extern void *xquad_portio;
98
99static inline void setup_portio_remap(void)
100{
101 int num_quads = num_online_nodes();
102
103 if (num_quads <= 1)
104 return;
105
106 printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
107 xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
108 printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
109 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
110}
111
112static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
113{
114 return (1);
115}
116
117static inline void enable_apic_mode(void)
118{
119}
120
121/*
122 * We use physical apicids here, not logical, so just return the default
123 * physical broadcast to stop people from breaking us
124 */
125static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
126{
127 return (int) 0xF;
128}
129
130static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
131 const struct cpumask *andmask)
132{
133 return (int) 0xF;
134}
135
136/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
137static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
138{
139 return cpuid_apic >> index_msb;
140}
141
142#endif /* __ASM_NUMAQ_APIC_H */
diff --git a/arch/x86/include/asm/numaq/apicdef.h b/arch/x86/include/asm/numaq/apicdef.h
deleted file mode 100644
index e012a46cc22a..000000000000
--- a/arch/x86/include/asm/numaq/apicdef.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __ASM_NUMAQ_APICDEF_H
2#define __ASM_NUMAQ_APICDEF_H
3
4
5#define APIC_ID_MASK (0xF<<24)
6
7static inline unsigned get_apic_id(unsigned long x)
8{
9 return (((x)>>24)&0x0F);
10}
11
12#define GET_APIC_ID(x) get_apic_id(x)
13
14#endif
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h
deleted file mode 100644
index a8374c652778..000000000000
--- a/arch/x86/include/asm/numaq/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef __ASM_NUMAQ_IPI_H
2#define __ASM_NUMAQ_IPI_H
3
4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
6
7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 send_IPI_mask_allbutself(cpu_online_mask, vector);
15}
16
17static inline void send_IPI_all(int vector)
18{
19 send_IPI_mask(cpu_online_mask, vector);
20}
21
22#endif /* __ASM_NUMAQ_IPI_H */
diff --git a/arch/x86/include/asm/numaq/mpparse.h b/arch/x86/include/asm/numaq/mpparse.h
deleted file mode 100644
index a2eeefcd1cc7..000000000000
--- a/arch/x86/include/asm/numaq/mpparse.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_NUMAQ_MPPARSE_H
2#define __ASM_NUMAQ_MPPARSE_H
3
4extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
5
6#endif /* __ASM_NUMAQ_MPPARSE_H */
diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h
deleted file mode 100644
index 6f499df8eddb..000000000000
--- a/arch/x86/include/asm/numaq/wakecpu.h
+++ /dev/null
@@ -1,45 +0,0 @@
1#ifndef __ASM_NUMAQ_WAKECPU_H
2#define __ASM_NUMAQ_WAKECPU_H
3
4/* This file copes with machines that wakeup secondary CPUs by NMIs */
5
6#define TRAMPOLINE_PHYS_LOW (0x8)
7#define TRAMPOLINE_PHYS_HIGH (0xa)
8
9/* We don't do anything here because we use NMI's to boot instead */
10static inline void wait_for_init_deassert(atomic_t *deassert)
11{
12}
13
14/*
15 * Because we use NMIs rather than the INIT-STARTUP sequence to
16 * bootstrap the CPUs, the APIC may be in a weird state. Kick it.
17 */
18static inline void smp_callin_clear_local_apic(void)
19{
20 clear_local_APIC();
21}
22
23static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
24{
25 printk("Storing NMI vector\n");
26 *high =
27 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH));
28 *low =
29 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW));
30}
31
32static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
33{
34 printk("Restoring NMI vector\n");
35 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
36 *high;
37 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
38 *low;
39}
40
41static inline void inquire_remote_apic(int apicid)
42{
43}
44
45#endif /* __ASM_NUMAQ_WAKECPU_H */
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 776579119a00..89ed9d70b0aa 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -1,42 +1,11 @@
1#ifndef _ASM_X86_PAGE_H 1#ifndef _ASM_X86_PAGE_H
2#define _ASM_X86_PAGE_H 2#define _ASM_X86_PAGE_H
3 3
4#include <linux/const.h> 4#include <linux/types.h>
5
6/* PAGE_SHIFT determines the page size */
7#define PAGE_SHIFT 12
8#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
9#define PAGE_MASK (~(PAGE_SIZE-1))
10 5
11#ifdef __KERNEL__ 6#ifdef __KERNEL__
12 7
13#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1) 8#include <asm/page_types.h>
14#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
15
16/* Cast PAGE_MASK to a signed type so that it is sign-extended if
17 virtual addresses are 32-bits but physical addresses are larger
18 (ie, 32-bit PAE). */
19#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
20
21/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
22#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
23
24/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
25#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
26
27#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
28#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
29
30#define HPAGE_SHIFT PMD_SHIFT
31#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
32#define HPAGE_MASK (~(HPAGE_SIZE - 1))
33#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
34
35#define HUGE_MAX_HSTATE 2
36
37#ifndef __ASSEMBLY__
38#include <linux/types.h>
39#endif
40 9
41#ifdef CONFIG_X86_64 10#ifdef CONFIG_X86_64
42#include <asm/page_64.h> 11#include <asm/page_64.h>
@@ -44,38 +13,18 @@
44#include <asm/page_32.h> 13#include <asm/page_32.h>
45#endif /* CONFIG_X86_64 */ 14#endif /* CONFIG_X86_64 */
46 15
47#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
48
49#define VM_DATA_DEFAULT_FLAGS \
50 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
51 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
52
53
54#ifndef __ASSEMBLY__ 16#ifndef __ASSEMBLY__
55 17
56typedef struct { pgdval_t pgd; } pgd_t;
57typedef struct { pgprotval_t pgprot; } pgprot_t;
58
59extern int page_is_ram(unsigned long pagenr);
60extern int devmem_is_allowed(unsigned long pagenr);
61extern void map_devmem(unsigned long pfn, unsigned long size,
62 pgprot_t vma_prot);
63extern void unmap_devmem(unsigned long pfn, unsigned long size,
64 pgprot_t vma_prot);
65
66extern unsigned long max_low_pfn_mapped;
67extern unsigned long max_pfn_mapped;
68
69struct page; 18struct page;
70 19
71static inline void clear_user_page(void *page, unsigned long vaddr, 20static inline void clear_user_page(void *page, unsigned long vaddr,
72 struct page *pg) 21 struct page *pg)
73{ 22{
74 clear_page(page); 23 clear_page(page);
75} 24}
76 25
77static inline void copy_user_page(void *to, void *from, unsigned long vaddr, 26static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
78 struct page *topage) 27 struct page *topage)
79{ 28{
80 copy_page(to, from); 29 copy_page(to, from);
81} 30}
@@ -84,99 +33,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
84 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) 33 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
85#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 34#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
86 35
87static inline pgd_t native_make_pgd(pgdval_t val)
88{
89 return (pgd_t) { val };
90}
91
92static inline pgdval_t native_pgd_val(pgd_t pgd)
93{
94 return pgd.pgd;
95}
96
97#if PAGETABLE_LEVELS >= 3
98#if PAGETABLE_LEVELS == 4
99typedef struct { pudval_t pud; } pud_t;
100
101static inline pud_t native_make_pud(pmdval_t val)
102{
103 return (pud_t) { val };
104}
105
106static inline pudval_t native_pud_val(pud_t pud)
107{
108 return pud.pud;
109}
110#else /* PAGETABLE_LEVELS == 3 */
111#include <asm-generic/pgtable-nopud.h>
112
113static inline pudval_t native_pud_val(pud_t pud)
114{
115 return native_pgd_val(pud.pgd);
116}
117#endif /* PAGETABLE_LEVELS == 4 */
118
119typedef struct { pmdval_t pmd; } pmd_t;
120
121static inline pmd_t native_make_pmd(pmdval_t val)
122{
123 return (pmd_t) { val };
124}
125
126static inline pmdval_t native_pmd_val(pmd_t pmd)
127{
128 return pmd.pmd;
129}
130#else /* PAGETABLE_LEVELS == 2 */
131#include <asm-generic/pgtable-nopmd.h>
132
133static inline pmdval_t native_pmd_val(pmd_t pmd)
134{
135 return native_pgd_val(pmd.pud.pgd);
136}
137#endif /* PAGETABLE_LEVELS >= 3 */
138
139static inline pte_t native_make_pte(pteval_t val)
140{
141 return (pte_t) { .pte = val };
142}
143
144static inline pteval_t native_pte_val(pte_t pte)
145{
146 return pte.pte;
147}
148
149static inline pteval_t native_pte_flags(pte_t pte)
150{
151 return native_pte_val(pte) & PTE_FLAGS_MASK;
152}
153
154#define pgprot_val(x) ((x).pgprot)
155#define __pgprot(x) ((pgprot_t) { (x) } )
156
157#ifdef CONFIG_PARAVIRT
158#include <asm/paravirt.h>
159#else /* !CONFIG_PARAVIRT */
160
161#define pgd_val(x) native_pgd_val(x)
162#define __pgd(x) native_make_pgd(x)
163
164#ifndef __PAGETABLE_PUD_FOLDED
165#define pud_val(x) native_pud_val(x)
166#define __pud(x) native_make_pud(x)
167#endif
168
169#ifndef __PAGETABLE_PMD_FOLDED
170#define pmd_val(x) native_pmd_val(x)
171#define __pmd(x) native_make_pmd(x)
172#endif
173
174#define pte_val(x) native_pte_val(x)
175#define pte_flags(x) native_pte_flags(x)
176#define __pte(x) native_make_pte(x)
177
178#endif /* CONFIG_PARAVIRT */
179
180#define __pa(x) __phys_addr((unsigned long)(x)) 36#define __pa(x) __phys_addr((unsigned long)(x))
181#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x)) 37#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
182/* __pa_symbol should be used for C visible symbols. 38/* __pa_symbol should be used for C visible symbols.
diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h
index bcde0d7b4325..da4e762406f7 100644
--- a/arch/x86/include/asm/page_32.h
+++ b/arch/x86/include/asm/page_32.h
@@ -1,82 +1,14 @@
1#ifndef _ASM_X86_PAGE_32_H 1#ifndef _ASM_X86_PAGE_32_H
2#define _ASM_X86_PAGE_32_H 2#define _ASM_X86_PAGE_32_H
3 3
4/* 4#include <asm/page_32_types.h>
5 * This handles the memory map.
6 *
7 * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
8 * a virtual address space of one gigabyte, which limits the
9 * amount of physical memory you can use to about 950MB.
10 *
11 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
12 * and CONFIG_HIGHMEM64G options in the kernel configuration.
13 */
14#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
15
16#ifdef CONFIG_4KSTACKS
17#define THREAD_ORDER 0
18#else
19#define THREAD_ORDER 1
20#endif
21#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
22
23#define STACKFAULT_STACK 0
24#define DOUBLEFAULT_STACK 1
25#define NMI_STACK 0
26#define DEBUG_STACK 0
27#define MCE_STACK 0
28#define N_EXCEPTION_STACKS 1
29
30#ifdef CONFIG_X86_PAE
31/* 44=32+12, the limit we can fit into an unsigned long pfn */
32#define __PHYSICAL_MASK_SHIFT 44
33#define __VIRTUAL_MASK_SHIFT 32
34#define PAGETABLE_LEVELS 3
35
36#ifndef __ASSEMBLY__
37typedef u64 pteval_t;
38typedef u64 pmdval_t;
39typedef u64 pudval_t;
40typedef u64 pgdval_t;
41typedef u64 pgprotval_t;
42
43typedef union {
44 struct {
45 unsigned long pte_low, pte_high;
46 };
47 pteval_t pte;
48} pte_t;
49#endif /* __ASSEMBLY__
50 */
51#else /* !CONFIG_X86_PAE */
52#define __PHYSICAL_MASK_SHIFT 32
53#define __VIRTUAL_MASK_SHIFT 32
54#define PAGETABLE_LEVELS 2
55
56#ifndef __ASSEMBLY__
57typedef unsigned long pteval_t;
58typedef unsigned long pmdval_t;
59typedef unsigned long pudval_t;
60typedef unsigned long pgdval_t;
61typedef unsigned long pgprotval_t;
62
63typedef union {
64 pteval_t pte;
65 pteval_t pte_low;
66} pte_t;
67
68#endif /* __ASSEMBLY__ */
69#endif /* CONFIG_X86_PAE */
70 5
71#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
72typedef struct page *pgtable_t;
73#endif
74 7
75#ifdef CONFIG_HUGETLB_PAGE 8#ifdef CONFIG_HUGETLB_PAGE
76#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 9#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
77#endif 10#endif
78 11
79#ifndef __ASSEMBLY__
80#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) 12#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
81#ifdef CONFIG_DEBUG_VIRTUAL 13#ifdef CONFIG_DEBUG_VIRTUAL
82extern unsigned long __phys_addr(unsigned long); 14extern unsigned long __phys_addr(unsigned long);
@@ -89,23 +21,6 @@ extern unsigned long __phys_addr(unsigned long);
89#define pfn_valid(pfn) ((pfn) < max_mapnr) 21#define pfn_valid(pfn) ((pfn) < max_mapnr)
90#endif /* CONFIG_FLATMEM */ 22#endif /* CONFIG_FLATMEM */
91 23
92extern int nx_enabled;
93
94/*
95 * This much address space is reserved for vmalloc() and iomap()
96 * as well as fixmap mappings.
97 */
98extern unsigned int __VMALLOC_RESERVE;
99extern int sysctl_legacy_va_layout;
100
101extern void find_low_pfn_range(void);
102extern unsigned long init_memory_mapping(unsigned long start,
103 unsigned long end);
104extern void initmem_init(unsigned long, unsigned long);
105extern void free_initmem(void);
106extern void setup_bootmem_allocator(void);
107
108
109#ifdef CONFIG_X86_USE_3DNOW 24#ifdef CONFIG_X86_USE_3DNOW
110#include <asm/mmx.h> 25#include <asm/mmx.h>
111 26
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
new file mode 100644
index 000000000000..0f915ae649a7
--- /dev/null
+++ b/arch/x86/include/asm/page_32_types.h
@@ -0,0 +1,65 @@
1#ifndef _ASM_X86_PAGE_32_DEFS_H
2#define _ASM_X86_PAGE_32_DEFS_H
3
4#include <linux/const.h>
5
6/*
7 * This handles the memory map.
8 *
9 * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
10 * a virtual address space of one gigabyte, which limits the
11 * amount of physical memory you can use to about 950MB.
12 *
13 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
14 * and CONFIG_HIGHMEM64G options in the kernel configuration.
15 */
16#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
17
18#ifdef CONFIG_4KSTACKS
19#define THREAD_ORDER 0
20#else
21#define THREAD_ORDER 1
22#endif
23#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
24
25#define STACKFAULT_STACK 0
26#define DOUBLEFAULT_STACK 1
27#define NMI_STACK 0
28#define DEBUG_STACK 0
29#define MCE_STACK 0
30#define N_EXCEPTION_STACKS 1
31
32#ifdef CONFIG_X86_PAE
33/* 44=32+12, the limit we can fit into an unsigned long pfn */
34#define __PHYSICAL_MASK_SHIFT 44
35#define __VIRTUAL_MASK_SHIFT 32
36
37#else /* !CONFIG_X86_PAE */
38#define __PHYSICAL_MASK_SHIFT 32
39#define __VIRTUAL_MASK_SHIFT 32
40#endif /* CONFIG_X86_PAE */
41
42/*
43 * Kernel image size is limited to 512 MB (see in arch/x86/kernel/head_32.S)
44 */
45#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
46
47#ifndef __ASSEMBLY__
48
49/*
50 * This much address space is reserved for vmalloc() and iomap()
51 * as well as fixmap mappings.
52 */
53extern unsigned int __VMALLOC_RESERVE;
54extern int sysctl_legacy_va_layout;
55
56extern void find_low_pfn_range(void);
57extern unsigned long init_memory_mapping(unsigned long start,
58 unsigned long end);
59extern void initmem_init(unsigned long, unsigned long);
60extern void free_initmem(void);
61extern void setup_bootmem_allocator(void);
62
63#endif /* !__ASSEMBLY__ */
64
65#endif /* _ASM_X86_PAGE_32_DEFS_H */
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 5ebca29f44f0..072694ed81a5 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -1,105 +1,6 @@
1#ifndef _ASM_X86_PAGE_64_H 1#ifndef _ASM_X86_PAGE_64_H
2#define _ASM_X86_PAGE_64_H 2#define _ASM_X86_PAGE_64_H
3 3
4#define PAGETABLE_LEVELS 4 4#include <asm/page_64_types.h>
5
6#define THREAD_ORDER 1
7#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
8#define CURRENT_MASK (~(THREAD_SIZE - 1))
9
10#define EXCEPTION_STACK_ORDER 0
11#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
12
13#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
14#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
15
16#define IRQSTACK_ORDER 2
17#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
18
19#define STACKFAULT_STACK 1
20#define DOUBLEFAULT_STACK 2
21#define NMI_STACK 3
22#define DEBUG_STACK 4
23#define MCE_STACK 5
24#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
25
26#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
27#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
28
29/*
30 * Set __PAGE_OFFSET to the most negative possible address +
31 * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
32 * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
33 * what Xen requires.
34 */
35#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
36
37#define __PHYSICAL_START CONFIG_PHYSICAL_START
38#define __KERNEL_ALIGN 0x200000
39
40/*
41 * Make sure kernel is aligned to 2MB address. Catching it at compile
42 * time is better. Change your config file and compile the kernel
43 * for a 2MB aligned address (CONFIG_PHYSICAL_START)
44 */
45#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
46#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
47#endif
48
49#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
50#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
51
52/* See Documentation/x86_64/mm.txt for a description of the memory map. */
53#define __PHYSICAL_MASK_SHIFT 46
54#define __VIRTUAL_MASK_SHIFT 48
55
56/*
57 * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
58 * arch/x86/kernel/head_64.S), and it is mapped here:
59 */
60#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
61#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
62
63#ifndef __ASSEMBLY__
64void clear_page(void *page);
65void copy_page(void *to, void *from);
66
67/* duplicated to the one in bootmem.h */
68extern unsigned long max_pfn;
69extern unsigned long phys_base;
70
71extern unsigned long __phys_addr(unsigned long);
72#define __phys_reloc_hide(x) (x)
73
74/*
75 * These are used to make use of C type-checking..
76 */
77typedef unsigned long pteval_t;
78typedef unsigned long pmdval_t;
79typedef unsigned long pudval_t;
80typedef unsigned long pgdval_t;
81typedef unsigned long pgprotval_t;
82
83typedef struct page *pgtable_t;
84
85typedef struct { pteval_t pte; } pte_t;
86
87#define vmemmap ((struct page *)VMEMMAP_START)
88
89extern unsigned long init_memory_mapping(unsigned long start,
90 unsigned long end);
91
92extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
93extern void free_initmem(void);
94
95extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
96extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
97
98#endif /* !__ASSEMBLY__ */
99
100#ifdef CONFIG_FLATMEM
101#define pfn_valid(pfn) ((pfn) < max_pfn)
102#endif
103
104 5
105#endif /* _ASM_X86_PAGE_64_H */ 6#endif /* _ASM_X86_PAGE_64_H */
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
new file mode 100644
index 000000000000..d38c91b70248
--- /dev/null
+++ b/arch/x86/include/asm/page_64_types.h
@@ -0,0 +1,89 @@
1#ifndef _ASM_X86_PAGE_64_DEFS_H
2#define _ASM_X86_PAGE_64_DEFS_H
3
4#define THREAD_ORDER 1
5#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
6#define CURRENT_MASK (~(THREAD_SIZE - 1))
7
8#define EXCEPTION_STACK_ORDER 0
9#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
10
11#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
12#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
13
14#define IRQ_STACK_ORDER 2
15#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
16
17#define STACKFAULT_STACK 1
18#define DOUBLEFAULT_STACK 2
19#define NMI_STACK 3
20#define DEBUG_STACK 4
21#define MCE_STACK 5
22#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
23
24#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
25#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
26
27/*
28 * Set __PAGE_OFFSET to the most negative possible address +
29 * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
30 * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
31 * what Xen requires.
32 */
33#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
34
35#define __PHYSICAL_START CONFIG_PHYSICAL_START
36#define __KERNEL_ALIGN 0x200000
37
38/*
39 * Make sure kernel is aligned to 2MB address. Catching it at compile
40 * time is better. Change your config file and compile the kernel
41 * for a 2MB aligned address (CONFIG_PHYSICAL_START)
42 */
43#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
44#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
45#endif
46
47#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
48#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
49
50/* See Documentation/x86_64/mm.txt for a description of the memory map. */
51#define __PHYSICAL_MASK_SHIFT 46
52#define __VIRTUAL_MASK_SHIFT 48
53
54/*
55 * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
56 * arch/x86/kernel/head_64.S), and it is mapped here:
57 */
58#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
59#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
60
61#ifndef __ASSEMBLY__
62void clear_page(void *page);
63void copy_page(void *to, void *from);
64
65/* duplicated to the one in bootmem.h */
66extern unsigned long max_pfn;
67extern unsigned long phys_base;
68
69extern unsigned long __phys_addr(unsigned long);
70#define __phys_reloc_hide(x) (x)
71
72#define vmemmap ((struct page *)VMEMMAP_START)
73
74extern unsigned long init_memory_mapping(unsigned long start,
75 unsigned long end);
76
77extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
78extern void free_initmem(void);
79
80extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
81extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
82
83#endif /* !__ASSEMBLY__ */
84
85#ifdef CONFIG_FLATMEM
86#define pfn_valid(pfn) ((pfn) < max_pfn)
87#endif
88
89#endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
new file mode 100644
index 000000000000..826ad37006ab
--- /dev/null
+++ b/arch/x86/include/asm/page_types.h
@@ -0,0 +1,51 @@
1#ifndef _ASM_X86_PAGE_DEFS_H
2#define _ASM_X86_PAGE_DEFS_H
3
4#include <linux/const.h>
5
6/* PAGE_SHIFT determines the page size */
7#define PAGE_SHIFT 12
8#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
9#define PAGE_MASK (~(PAGE_SIZE-1))
10
11#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
12#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
13
14/* Cast PAGE_MASK to a signed type so that it is sign-extended if
15 virtual addresses are 32-bits but physical addresses are larger
16 (ie, 32-bit PAE). */
17#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
18
19#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
20#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
21
22#define HPAGE_SHIFT PMD_SHIFT
23#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
24#define HPAGE_MASK (~(HPAGE_SIZE - 1))
25#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
26
27#define HUGE_MAX_HSTATE 2
28
29#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
30
31#define VM_DATA_DEFAULT_FLAGS \
32 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
33 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
34
35#ifdef CONFIG_X86_64
36#include <asm/page_64_types.h>
37#else
38#include <asm/page_32_types.h>
39#endif /* CONFIG_X86_64 */
40
41#ifndef __ASSEMBLY__
42
43extern int page_is_ram(unsigned long pagenr);
44extern int devmem_is_allowed(unsigned long pagenr);
45
46extern unsigned long max_low_pfn_mapped;
47extern unsigned long max_pfn_mapped;
48
49#endif /* !__ASSEMBLY__ */
50
51#endif /* _ASM_X86_PAGE_DEFS_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index e299287e8e33..7727aa8b7dda 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -4,7 +4,7 @@
4 * para-virtualization: those hooks are defined here. */ 4 * para-virtualization: those hooks are defined here. */
5 5
6#ifdef CONFIG_PARAVIRT 6#ifdef CONFIG_PARAVIRT
7#include <asm/page.h> 7#include <asm/pgtable_types.h>
8#include <asm/asm.h> 8#include <asm/asm.h>
9 9
10/* Bitmask of what can be clobbered: usually at least eax. */ 10/* Bitmask of what can be clobbered: usually at least eax. */
@@ -12,21 +12,38 @@
12#define CLBR_EAX (1 << 0) 12#define CLBR_EAX (1 << 0)
13#define CLBR_ECX (1 << 1) 13#define CLBR_ECX (1 << 1)
14#define CLBR_EDX (1 << 2) 14#define CLBR_EDX (1 << 2)
15#define CLBR_EDI (1 << 3)
15 16
16#ifdef CONFIG_X86_64 17#ifdef CONFIG_X86_32
17#define CLBR_RSI (1 << 3) 18/* CLBR_ANY should match all regs platform has. For i386, that's just it */
18#define CLBR_RDI (1 << 4) 19#define CLBR_ANY ((1 << 4) - 1)
20
21#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
22#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
23#define CLBR_SCRATCH (0)
24#else
25#define CLBR_RAX CLBR_EAX
26#define CLBR_RCX CLBR_ECX
27#define CLBR_RDX CLBR_EDX
28#define CLBR_RDI CLBR_EDI
29#define CLBR_RSI (1 << 4)
19#define CLBR_R8 (1 << 5) 30#define CLBR_R8 (1 << 5)
20#define CLBR_R9 (1 << 6) 31#define CLBR_R9 (1 << 6)
21#define CLBR_R10 (1 << 7) 32#define CLBR_R10 (1 << 7)
22#define CLBR_R11 (1 << 8) 33#define CLBR_R11 (1 << 8)
34
23#define CLBR_ANY ((1 << 9) - 1) 35#define CLBR_ANY ((1 << 9) - 1)
36
37#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
38 CLBR_RCX | CLBR_R8 | CLBR_R9)
39#define CLBR_RET_REG (CLBR_RAX)
40#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
41
24#include <asm/desc_defs.h> 42#include <asm/desc_defs.h>
25#else
26/* CLBR_ANY should match all regs platform has. For i386, that's just it */
27#define CLBR_ANY ((1 << 3) - 1)
28#endif /* X86_64 */ 43#endif /* X86_64 */
29 44
45#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
46
30#ifndef __ASSEMBLY__ 47#ifndef __ASSEMBLY__
31#include <linux/types.h> 48#include <linux/types.h>
32#include <linux/cpumask.h> 49#include <linux/cpumask.h>
@@ -40,6 +57,14 @@ struct tss_struct;
40struct mm_struct; 57struct mm_struct;
41struct desc_struct; 58struct desc_struct;
42 59
60/*
61 * Wrapper type for pointers to code which uses the non-standard
62 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
63 */
64struct paravirt_callee_save {
65 void *func;
66};
67
43/* general info */ 68/* general info */
44struct pv_info { 69struct pv_info {
45 unsigned int kernel_rpl; 70 unsigned int kernel_rpl;
@@ -189,11 +214,15 @@ struct pv_irq_ops {
189 * expected to use X86_EFLAGS_IF; all other bits 214 * expected to use X86_EFLAGS_IF; all other bits
190 * returned from save_fl are undefined, and may be ignored by 215 * returned from save_fl are undefined, and may be ignored by
191 * restore_fl. 216 * restore_fl.
217 *
218 * NOTE: These functions callers expect the callee to preserve
219 * more registers than the standard C calling convention.
192 */ 220 */
193 unsigned long (*save_fl)(void); 221 struct paravirt_callee_save save_fl;
194 void (*restore_fl)(unsigned long); 222 struct paravirt_callee_save restore_fl;
195 void (*irq_disable)(void); 223 struct paravirt_callee_save irq_disable;
196 void (*irq_enable)(void); 224 struct paravirt_callee_save irq_enable;
225
197 void (*safe_halt)(void); 226 void (*safe_halt)(void);
198 void (*halt)(void); 227 void (*halt)(void);
199 228
@@ -244,7 +273,8 @@ struct pv_mmu_ops {
244 void (*flush_tlb_user)(void); 273 void (*flush_tlb_user)(void);
245 void (*flush_tlb_kernel)(void); 274 void (*flush_tlb_kernel)(void);
246 void (*flush_tlb_single)(unsigned long addr); 275 void (*flush_tlb_single)(unsigned long addr);
247 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm, 276 void (*flush_tlb_others)(const struct cpumask *cpus,
277 struct mm_struct *mm,
248 unsigned long va); 278 unsigned long va);
249 279
250 /* Hooks for allocating and freeing a pagetable top-level */ 280 /* Hooks for allocating and freeing a pagetable top-level */
@@ -278,18 +308,15 @@ struct pv_mmu_ops {
278 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, 308 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
279 pte_t *ptep, pte_t pte); 309 pte_t *ptep, pte_t pte);
280 310
281 pteval_t (*pte_val)(pte_t); 311 struct paravirt_callee_save pte_val;
282 pteval_t (*pte_flags)(pte_t); 312 struct paravirt_callee_save make_pte;
283 pte_t (*make_pte)(pteval_t pte);
284 313
285 pgdval_t (*pgd_val)(pgd_t); 314 struct paravirt_callee_save pgd_val;
286 pgd_t (*make_pgd)(pgdval_t pgd); 315 struct paravirt_callee_save make_pgd;
287 316
288#if PAGETABLE_LEVELS >= 3 317#if PAGETABLE_LEVELS >= 3
289#ifdef CONFIG_X86_PAE 318#ifdef CONFIG_X86_PAE
290 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); 319 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
291 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
292 pte_t *ptep, pte_t pte);
293 void (*pte_clear)(struct mm_struct *mm, unsigned long addr, 320 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
294 pte_t *ptep); 321 pte_t *ptep);
295 void (*pmd_clear)(pmd_t *pmdp); 322 void (*pmd_clear)(pmd_t *pmdp);
@@ -298,12 +325,12 @@ struct pv_mmu_ops {
298 325
299 void (*set_pud)(pud_t *pudp, pud_t pudval); 326 void (*set_pud)(pud_t *pudp, pud_t pudval);
300 327
301 pmdval_t (*pmd_val)(pmd_t); 328 struct paravirt_callee_save pmd_val;
302 pmd_t (*make_pmd)(pmdval_t pmd); 329 struct paravirt_callee_save make_pmd;
303 330
304#if PAGETABLE_LEVELS == 4 331#if PAGETABLE_LEVELS == 4
305 pudval_t (*pud_val)(pud_t); 332 struct paravirt_callee_save pud_val;
306 pud_t (*make_pud)(pudval_t pud); 333 struct paravirt_callee_save make_pud;
307 334
308 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); 335 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
309#endif /* PAGETABLE_LEVELS == 4 */ 336#endif /* PAGETABLE_LEVELS == 4 */
@@ -360,7 +387,7 @@ extern struct pv_lock_ops pv_lock_ops;
360 387
361#define paravirt_type(op) \ 388#define paravirt_type(op) \
362 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ 389 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
363 [paravirt_opptr] "m" (op) 390 [paravirt_opptr] "i" (&(op))
364#define paravirt_clobber(clobber) \ 391#define paravirt_clobber(clobber) \
365 [paravirt_clobber] "i" (clobber) 392 [paravirt_clobber] "i" (clobber)
366 393
@@ -388,6 +415,8 @@ extern struct pv_lock_ops pv_lock_ops;
388 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") 415 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
389 416
390unsigned paravirt_patch_nop(void); 417unsigned paravirt_patch_nop(void);
418unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
419unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
391unsigned paravirt_patch_ignore(unsigned len); 420unsigned paravirt_patch_ignore(unsigned len);
392unsigned paravirt_patch_call(void *insnbuf, 421unsigned paravirt_patch_call(void *insnbuf,
393 const void *target, u16 tgt_clobbers, 422 const void *target, u16 tgt_clobbers,
@@ -412,7 +441,7 @@ int paravirt_disable_iospace(void);
412 * offset into the paravirt_patch_template structure, and can therefore be 441 * offset into the paravirt_patch_template structure, and can therefore be
413 * freely converted back into a structure offset. 442 * freely converted back into a structure offset.
414 */ 443 */
415#define PARAVIRT_CALL "call *%[paravirt_opptr];" 444#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
416 445
417/* 446/*
418 * These macros are intended to wrap calls through one of the paravirt 447 * These macros are intended to wrap calls through one of the paravirt
@@ -479,25 +508,45 @@ int paravirt_disable_iospace(void);
479 * makes sure the incoming and outgoing types are always correct. 508 * makes sure the incoming and outgoing types are always correct.
480 */ 509 */
481#ifdef CONFIG_X86_32 510#ifdef CONFIG_X86_32
482#define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx 511#define PVOP_VCALL_ARGS \
512 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
483#define PVOP_CALL_ARGS PVOP_VCALL_ARGS 513#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
514
515#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
516#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
517#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
518
484#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ 519#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
485 "=c" (__ecx) 520 "=c" (__ecx)
486#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS 521#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
522
523#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
524#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
525
487#define EXTRA_CLOBBERS 526#define EXTRA_CLOBBERS
488#define VEXTRA_CLOBBERS 527#define VEXTRA_CLOBBERS
489#else 528#else /* CONFIG_X86_64 */
490#define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx 529#define PVOP_VCALL_ARGS \
530 unsigned long __edi = __edi, __esi = __esi, \
531 __edx = __edx, __ecx = __ecx
491#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax 532#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
533
534#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
535#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
536#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
537#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
538
492#define PVOP_VCALL_CLOBBERS "=D" (__edi), \ 539#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
493 "=S" (__esi), "=d" (__edx), \ 540 "=S" (__esi), "=d" (__edx), \
494 "=c" (__ecx) 541 "=c" (__ecx)
495
496#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) 542#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
497 543
544#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
545#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
546
498#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" 547#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
499#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" 548#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
500#endif 549#endif /* CONFIG_X86_32 */
501 550
502#ifdef CONFIG_PARAVIRT_DEBUG 551#ifdef CONFIG_PARAVIRT_DEBUG
503#define PVOP_TEST_NULL(op) BUG_ON(op == NULL) 552#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
@@ -505,10 +554,11 @@ int paravirt_disable_iospace(void);
505#define PVOP_TEST_NULL(op) ((void)op) 554#define PVOP_TEST_NULL(op) ((void)op)
506#endif 555#endif
507 556
508#define __PVOP_CALL(rettype, op, pre, post, ...) \ 557#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
558 pre, post, ...) \
509 ({ \ 559 ({ \
510 rettype __ret; \ 560 rettype __ret; \
511 PVOP_CALL_ARGS; \ 561 PVOP_CALL_ARGS; \
512 PVOP_TEST_NULL(op); \ 562 PVOP_TEST_NULL(op); \
513 /* This is 32-bit specific, but is okay in 64-bit */ \ 563 /* This is 32-bit specific, but is okay in 64-bit */ \
514 /* since this condition will never hold */ \ 564 /* since this condition will never hold */ \
@@ -516,70 +566,113 @@ int paravirt_disable_iospace(void);
516 asm volatile(pre \ 566 asm volatile(pre \
517 paravirt_alt(PARAVIRT_CALL) \ 567 paravirt_alt(PARAVIRT_CALL) \
518 post \ 568 post \
519 : PVOP_CALL_CLOBBERS \ 569 : call_clbr \
520 : paravirt_type(op), \ 570 : paravirt_type(op), \
521 paravirt_clobber(CLBR_ANY), \ 571 paravirt_clobber(clbr), \
522 ##__VA_ARGS__ \ 572 ##__VA_ARGS__ \
523 : "memory", "cc" EXTRA_CLOBBERS); \ 573 : "memory", "cc" extra_clbr); \
524 __ret = (rettype)((((u64)__edx) << 32) | __eax); \ 574 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
525 } else { \ 575 } else { \
526 asm volatile(pre \ 576 asm volatile(pre \
527 paravirt_alt(PARAVIRT_CALL) \ 577 paravirt_alt(PARAVIRT_CALL) \
528 post \ 578 post \
529 : PVOP_CALL_CLOBBERS \ 579 : call_clbr \
530 : paravirt_type(op), \ 580 : paravirt_type(op), \
531 paravirt_clobber(CLBR_ANY), \ 581 paravirt_clobber(clbr), \
532 ##__VA_ARGS__ \ 582 ##__VA_ARGS__ \
533 : "memory", "cc" EXTRA_CLOBBERS); \ 583 : "memory", "cc" extra_clbr); \
534 __ret = (rettype)__eax; \ 584 __ret = (rettype)__eax; \
535 } \ 585 } \
536 __ret; \ 586 __ret; \
537 }) 587 })
538#define __PVOP_VCALL(op, pre, post, ...) \ 588
589#define __PVOP_CALL(rettype, op, pre, post, ...) \
590 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
591 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
592
593#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
594 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
595 PVOP_CALLEE_CLOBBERS, , \
596 pre, post, ##__VA_ARGS__)
597
598
599#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
539 ({ \ 600 ({ \
540 PVOP_VCALL_ARGS; \ 601 PVOP_VCALL_ARGS; \
541 PVOP_TEST_NULL(op); \ 602 PVOP_TEST_NULL(op); \
542 asm volatile(pre \ 603 asm volatile(pre \
543 paravirt_alt(PARAVIRT_CALL) \ 604 paravirt_alt(PARAVIRT_CALL) \
544 post \ 605 post \
545 : PVOP_VCALL_CLOBBERS \ 606 : call_clbr \
546 : paravirt_type(op), \ 607 : paravirt_type(op), \
547 paravirt_clobber(CLBR_ANY), \ 608 paravirt_clobber(clbr), \
548 ##__VA_ARGS__ \ 609 ##__VA_ARGS__ \
549 : "memory", "cc" VEXTRA_CLOBBERS); \ 610 : "memory", "cc" extra_clbr); \
550 }) 611 })
551 612
613#define __PVOP_VCALL(op, pre, post, ...) \
614 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
615 VEXTRA_CLOBBERS, \
616 pre, post, ##__VA_ARGS__)
617
618#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
619 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
620 PVOP_VCALLEE_CLOBBERS, , \
621 pre, post, ##__VA_ARGS__)
622
623
624
552#define PVOP_CALL0(rettype, op) \ 625#define PVOP_CALL0(rettype, op) \
553 __PVOP_CALL(rettype, op, "", "") 626 __PVOP_CALL(rettype, op, "", "")
554#define PVOP_VCALL0(op) \ 627#define PVOP_VCALL0(op) \
555 __PVOP_VCALL(op, "", "") 628 __PVOP_VCALL(op, "", "")
556 629
630#define PVOP_CALLEE0(rettype, op) \
631 __PVOP_CALLEESAVE(rettype, op, "", "")
632#define PVOP_VCALLEE0(op) \
633 __PVOP_VCALLEESAVE(op, "", "")
634
635
557#define PVOP_CALL1(rettype, op, arg1) \ 636#define PVOP_CALL1(rettype, op, arg1) \
558 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1))) 637 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
559#define PVOP_VCALL1(op, arg1) \ 638#define PVOP_VCALL1(op, arg1) \
560 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1))) 639 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
640
641#define PVOP_CALLEE1(rettype, op, arg1) \
642 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
643#define PVOP_VCALLEE1(op, arg1) \
644 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
645
561 646
562#define PVOP_CALL2(rettype, op, arg1, arg2) \ 647#define PVOP_CALL2(rettype, op, arg1, arg2) \
563 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ 648 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
564 "1" ((unsigned long)(arg2))) 649 PVOP_CALL_ARG2(arg2))
565#define PVOP_VCALL2(op, arg1, arg2) \ 650#define PVOP_VCALL2(op, arg1, arg2) \
566 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ 651 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
567 "1" ((unsigned long)(arg2))) 652 PVOP_CALL_ARG2(arg2))
653
654#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
655 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
656 PVOP_CALL_ARG2(arg2))
657#define PVOP_VCALLEE2(op, arg1, arg2) \
658 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
659 PVOP_CALL_ARG2(arg2))
660
568 661
569#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ 662#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
570 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ 663 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
571 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) 664 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
572#define PVOP_VCALL3(op, arg1, arg2, arg3) \ 665#define PVOP_VCALL3(op, arg1, arg2, arg3) \
573 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ 666 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
574 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) 667 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
575 668
576/* This is the only difference in x86_64. We can make it much simpler */ 669/* This is the only difference in x86_64. We can make it much simpler */
577#ifdef CONFIG_X86_32 670#ifdef CONFIG_X86_32
578#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 671#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
579 __PVOP_CALL(rettype, op, \ 672 __PVOP_CALL(rettype, op, \
580 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 673 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
581 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ 674 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
582 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) 675 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
583#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 676#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
584 __PVOP_VCALL(op, \ 677 __PVOP_VCALL(op, \
585 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 678 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
@@ -587,13 +680,13 @@ int paravirt_disable_iospace(void);
587 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) 680 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
588#else 681#else
589#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 682#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
590 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ 683 __PVOP_CALL(rettype, op, "", "", \
591 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ 684 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
592 "3"((unsigned long)(arg4))) 685 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
593#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 686#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
594 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ 687 __PVOP_VCALL(op, "", "", \
595 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ 688 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
596 "3"((unsigned long)(arg4))) 689 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
597#endif 690#endif
598 691
599static inline int paravirt_enabled(void) 692static inline int paravirt_enabled(void)
@@ -984,10 +1077,11 @@ static inline void __flush_tlb_single(unsigned long addr)
984 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); 1077 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
985} 1078}
986 1079
987static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, 1080static inline void flush_tlb_others(const struct cpumask *cpumask,
1081 struct mm_struct *mm,
988 unsigned long va) 1082 unsigned long va)
989{ 1083{
990 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va); 1084 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
991} 1085}
992 1086
993static inline int paravirt_pgd_alloc(struct mm_struct *mm) 1087static inline int paravirt_pgd_alloc(struct mm_struct *mm)
@@ -1059,13 +1153,13 @@ static inline pte_t __pte(pteval_t val)
1059 pteval_t ret; 1153 pteval_t ret;
1060 1154
1061 if (sizeof(pteval_t) > sizeof(long)) 1155 if (sizeof(pteval_t) > sizeof(long))
1062 ret = PVOP_CALL2(pteval_t, 1156 ret = PVOP_CALLEE2(pteval_t,
1063 pv_mmu_ops.make_pte, 1157 pv_mmu_ops.make_pte,
1064 val, (u64)val >> 32); 1158 val, (u64)val >> 32);
1065 else 1159 else
1066 ret = PVOP_CALL1(pteval_t, 1160 ret = PVOP_CALLEE1(pteval_t,
1067 pv_mmu_ops.make_pte, 1161 pv_mmu_ops.make_pte,
1068 val); 1162 val);
1069 1163
1070 return (pte_t) { .pte = ret }; 1164 return (pte_t) { .pte = ret };
1071} 1165}
@@ -1075,29 +1169,12 @@ static inline pteval_t pte_val(pte_t pte)
1075 pteval_t ret; 1169 pteval_t ret;
1076 1170
1077 if (sizeof(pteval_t) > sizeof(long)) 1171 if (sizeof(pteval_t) > sizeof(long))
1078 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val, 1172 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
1079 pte.pte, (u64)pte.pte >> 32); 1173 pte.pte, (u64)pte.pte >> 32);
1080 else
1081 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
1082 pte.pte);
1083
1084 return ret;
1085}
1086
1087static inline pteval_t pte_flags(pte_t pte)
1088{
1089 pteval_t ret;
1090
1091 if (sizeof(pteval_t) > sizeof(long))
1092 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1093 pte.pte, (u64)pte.pte >> 32);
1094 else 1174 else
1095 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags, 1175 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
1096 pte.pte); 1176 pte.pte);
1097 1177
1098#ifdef CONFIG_PARAVIRT_DEBUG
1099 BUG_ON(ret & PTE_PFN_MASK);
1100#endif
1101 return ret; 1178 return ret;
1102} 1179}
1103 1180
@@ -1106,11 +1183,11 @@ static inline pgd_t __pgd(pgdval_t val)
1106 pgdval_t ret; 1183 pgdval_t ret;
1107 1184
1108 if (sizeof(pgdval_t) > sizeof(long)) 1185 if (sizeof(pgdval_t) > sizeof(long))
1109 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd, 1186 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
1110 val, (u64)val >> 32); 1187 val, (u64)val >> 32);
1111 else 1188 else
1112 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd, 1189 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
1113 val); 1190 val);
1114 1191
1115 return (pgd_t) { ret }; 1192 return (pgd_t) { ret };
1116} 1193}
@@ -1120,11 +1197,11 @@ static inline pgdval_t pgd_val(pgd_t pgd)
1120 pgdval_t ret; 1197 pgdval_t ret;
1121 1198
1122 if (sizeof(pgdval_t) > sizeof(long)) 1199 if (sizeof(pgdval_t) > sizeof(long))
1123 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val, 1200 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
1124 pgd.pgd, (u64)pgd.pgd >> 32); 1201 pgd.pgd, (u64)pgd.pgd >> 32);
1125 else 1202 else
1126 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val, 1203 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
1127 pgd.pgd); 1204 pgd.pgd);
1128 1205
1129 return ret; 1206 return ret;
1130} 1207}
@@ -1188,11 +1265,11 @@ static inline pmd_t __pmd(pmdval_t val)
1188 pmdval_t ret; 1265 pmdval_t ret;
1189 1266
1190 if (sizeof(pmdval_t) > sizeof(long)) 1267 if (sizeof(pmdval_t) > sizeof(long))
1191 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd, 1268 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
1192 val, (u64)val >> 32); 1269 val, (u64)val >> 32);
1193 else 1270 else
1194 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd, 1271 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
1195 val); 1272 val);
1196 1273
1197 return (pmd_t) { ret }; 1274 return (pmd_t) { ret };
1198} 1275}
@@ -1202,11 +1279,11 @@ static inline pmdval_t pmd_val(pmd_t pmd)
1202 pmdval_t ret; 1279 pmdval_t ret;
1203 1280
1204 if (sizeof(pmdval_t) > sizeof(long)) 1281 if (sizeof(pmdval_t) > sizeof(long))
1205 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val, 1282 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
1206 pmd.pmd, (u64)pmd.pmd >> 32); 1283 pmd.pmd, (u64)pmd.pmd >> 32);
1207 else 1284 else
1208 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val, 1285 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
1209 pmd.pmd); 1286 pmd.pmd);
1210 1287
1211 return ret; 1288 return ret;
1212} 1289}
@@ -1228,11 +1305,11 @@ static inline pud_t __pud(pudval_t val)
1228 pudval_t ret; 1305 pudval_t ret;
1229 1306
1230 if (sizeof(pudval_t) > sizeof(long)) 1307 if (sizeof(pudval_t) > sizeof(long))
1231 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud, 1308 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
1232 val, (u64)val >> 32); 1309 val, (u64)val >> 32);
1233 else 1310 else
1234 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud, 1311 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
1235 val); 1312 val);
1236 1313
1237 return (pud_t) { ret }; 1314 return (pud_t) { ret };
1238} 1315}
@@ -1242,11 +1319,11 @@ static inline pudval_t pud_val(pud_t pud)
1242 pudval_t ret; 1319 pudval_t ret;
1243 1320
1244 if (sizeof(pudval_t) > sizeof(long)) 1321 if (sizeof(pudval_t) > sizeof(long))
1245 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val, 1322 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
1246 pud.pud, (u64)pud.pud >> 32); 1323 pud.pud, (u64)pud.pud >> 32);
1247 else 1324 else
1248 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val, 1325 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
1249 pud.pud); 1326 pud.pud);
1250 1327
1251 return ret; 1328 return ret;
1252} 1329}
@@ -1286,13 +1363,6 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1286 pte.pte, pte.pte >> 32); 1363 pte.pte, pte.pte >> 32);
1287} 1364}
1288 1365
1289static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1290 pte_t *ptep, pte_t pte)
1291{
1292 /* 5 arg words */
1293 pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
1294}
1295
1296static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 1366static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1297 pte_t *ptep) 1367 pte_t *ptep)
1298{ 1368{
@@ -1309,12 +1379,6 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1309 set_pte(ptep, pte); 1379 set_pte(ptep, pte);
1310} 1380}
1311 1381
1312static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1313 pte_t *ptep, pte_t pte)
1314{
1315 set_pte(ptep, pte);
1316}
1317
1318static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 1382static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1319 pte_t *ptep) 1383 pte_t *ptep)
1320{ 1384{
@@ -1374,9 +1438,10 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1374} 1438}
1375 1439
1376void _paravirt_nop(void); 1440void _paravirt_nop(void);
1377#define paravirt_nop ((void *)_paravirt_nop) 1441u32 _paravirt_ident_32(u32);
1442u64 _paravirt_ident_64(u64);
1378 1443
1379void paravirt_use_bytelocks(void); 1444#define paravirt_nop ((void *)_paravirt_nop)
1380 1445
1381#ifdef CONFIG_SMP 1446#ifdef CONFIG_SMP
1382 1447
@@ -1426,12 +1491,37 @@ extern struct paravirt_patch_site __parainstructions[],
1426 __parainstructions_end[]; 1491 __parainstructions_end[];
1427 1492
1428#ifdef CONFIG_X86_32 1493#ifdef CONFIG_X86_32
1429#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;" 1494#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
1430#define PV_RESTORE_REGS "popl %%edx; popl %%ecx" 1495#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
1496
1497/* save and restore all caller-save registers, except return value */
1498#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
1499#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
1500
1431#define PV_FLAGS_ARG "0" 1501#define PV_FLAGS_ARG "0"
1432#define PV_EXTRA_CLOBBERS 1502#define PV_EXTRA_CLOBBERS
1433#define PV_VEXTRA_CLOBBERS 1503#define PV_VEXTRA_CLOBBERS
1434#else 1504#else
1505/* save and restore all caller-save registers, except return value */
1506#define PV_SAVE_ALL_CALLER_REGS \
1507 "push %rcx;" \
1508 "push %rdx;" \
1509 "push %rsi;" \
1510 "push %rdi;" \
1511 "push %r8;" \
1512 "push %r9;" \
1513 "push %r10;" \
1514 "push %r11;"
1515#define PV_RESTORE_ALL_CALLER_REGS \
1516 "pop %r11;" \
1517 "pop %r10;" \
1518 "pop %r9;" \
1519 "pop %r8;" \
1520 "pop %rdi;" \
1521 "pop %rsi;" \
1522 "pop %rdx;" \
1523 "pop %rcx;"
1524
1435/* We save some registers, but all of them, that's too much. We clobber all 1525/* We save some registers, but all of them, that's too much. We clobber all
1436 * caller saved registers but the argument parameter */ 1526 * caller saved registers but the argument parameter */
1437#define PV_SAVE_REGS "pushq %%rdi;" 1527#define PV_SAVE_REGS "pushq %%rdi;"
@@ -1441,52 +1531,76 @@ extern struct paravirt_patch_site __parainstructions[],
1441#define PV_FLAGS_ARG "D" 1531#define PV_FLAGS_ARG "D"
1442#endif 1532#endif
1443 1533
1534/*
1535 * Generate a thunk around a function which saves all caller-save
1536 * registers except for the return value. This allows C functions to
1537 * be called from assembler code where fewer than normal registers are
1538 * available. It may also help code generation around calls from C
1539 * code if the common case doesn't use many registers.
1540 *
1541 * When a callee is wrapped in a thunk, the caller can assume that all
1542 * arg regs and all scratch registers are preserved across the
1543 * call. The return value in rax/eax will not be saved, even for void
1544 * functions.
1545 */
1546#define PV_CALLEE_SAVE_REGS_THUNK(func) \
1547 extern typeof(func) __raw_callee_save_##func; \
1548 static void *__##func##__ __used = func; \
1549 \
1550 asm(".pushsection .text;" \
1551 "__raw_callee_save_" #func ": " \
1552 PV_SAVE_ALL_CALLER_REGS \
1553 "call " #func ";" \
1554 PV_RESTORE_ALL_CALLER_REGS \
1555 "ret;" \
1556 ".popsection")
1557
1558/* Get a reference to a callee-save function */
1559#define PV_CALLEE_SAVE(func) \
1560 ((struct paravirt_callee_save) { __raw_callee_save_##func })
1561
1562/* Promise that "func" already uses the right calling convention */
1563#define __PV_IS_CALLEE_SAVE(func) \
1564 ((struct paravirt_callee_save) { func })
1565
1444static inline unsigned long __raw_local_save_flags(void) 1566static inline unsigned long __raw_local_save_flags(void)
1445{ 1567{
1446 unsigned long f; 1568 unsigned long f;
1447 1569
1448 asm volatile(paravirt_alt(PV_SAVE_REGS 1570 asm volatile(paravirt_alt(PARAVIRT_CALL)
1449 PARAVIRT_CALL
1450 PV_RESTORE_REGS)
1451 : "=a"(f) 1571 : "=a"(f)
1452 : paravirt_type(pv_irq_ops.save_fl), 1572 : paravirt_type(pv_irq_ops.save_fl),
1453 paravirt_clobber(CLBR_EAX) 1573 paravirt_clobber(CLBR_EAX)
1454 : "memory", "cc" PV_VEXTRA_CLOBBERS); 1574 : "memory", "cc");
1455 return f; 1575 return f;
1456} 1576}
1457 1577
1458static inline void raw_local_irq_restore(unsigned long f) 1578static inline void raw_local_irq_restore(unsigned long f)
1459{ 1579{
1460 asm volatile(paravirt_alt(PV_SAVE_REGS 1580 asm volatile(paravirt_alt(PARAVIRT_CALL)
1461 PARAVIRT_CALL
1462 PV_RESTORE_REGS)
1463 : "=a"(f) 1581 : "=a"(f)
1464 : PV_FLAGS_ARG(f), 1582 : PV_FLAGS_ARG(f),
1465 paravirt_type(pv_irq_ops.restore_fl), 1583 paravirt_type(pv_irq_ops.restore_fl),
1466 paravirt_clobber(CLBR_EAX) 1584 paravirt_clobber(CLBR_EAX)
1467 : "memory", "cc" PV_EXTRA_CLOBBERS); 1585 : "memory", "cc");
1468} 1586}
1469 1587
1470static inline void raw_local_irq_disable(void) 1588static inline void raw_local_irq_disable(void)
1471{ 1589{
1472 asm volatile(paravirt_alt(PV_SAVE_REGS 1590 asm volatile(paravirt_alt(PARAVIRT_CALL)
1473 PARAVIRT_CALL
1474 PV_RESTORE_REGS)
1475 : 1591 :
1476 : paravirt_type(pv_irq_ops.irq_disable), 1592 : paravirt_type(pv_irq_ops.irq_disable),
1477 paravirt_clobber(CLBR_EAX) 1593 paravirt_clobber(CLBR_EAX)
1478 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); 1594 : "memory", "eax", "cc");
1479} 1595}
1480 1596
1481static inline void raw_local_irq_enable(void) 1597static inline void raw_local_irq_enable(void)
1482{ 1598{
1483 asm volatile(paravirt_alt(PV_SAVE_REGS 1599 asm volatile(paravirt_alt(PARAVIRT_CALL)
1484 PARAVIRT_CALL
1485 PV_RESTORE_REGS)
1486 : 1600 :
1487 : paravirt_type(pv_irq_ops.irq_enable), 1601 : paravirt_type(pv_irq_ops.irq_enable),
1488 paravirt_clobber(CLBR_EAX) 1602 paravirt_clobber(CLBR_EAX)
1489 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); 1603 : "memory", "eax", "cc");
1490} 1604}
1491 1605
1492static inline unsigned long __raw_local_irq_save(void) 1606static inline unsigned long __raw_local_irq_save(void)
@@ -1529,33 +1643,49 @@ static inline unsigned long __raw_local_irq_save(void)
1529 .popsection 1643 .popsection
1530 1644
1531 1645
1646#define COND_PUSH(set, mask, reg) \
1647 .if ((~(set)) & mask); push %reg; .endif
1648#define COND_POP(set, mask, reg) \
1649 .if ((~(set)) & mask); pop %reg; .endif
1650
1532#ifdef CONFIG_X86_64 1651#ifdef CONFIG_X86_64
1533#define PV_SAVE_REGS \ 1652
1534 push %rax; \ 1653#define PV_SAVE_REGS(set) \
1535 push %rcx; \ 1654 COND_PUSH(set, CLBR_RAX, rax); \
1536 push %rdx; \ 1655 COND_PUSH(set, CLBR_RCX, rcx); \
1537 push %rsi; \ 1656 COND_PUSH(set, CLBR_RDX, rdx); \
1538 push %rdi; \ 1657 COND_PUSH(set, CLBR_RSI, rsi); \
1539 push %r8; \ 1658 COND_PUSH(set, CLBR_RDI, rdi); \
1540 push %r9; \ 1659 COND_PUSH(set, CLBR_R8, r8); \
1541 push %r10; \ 1660 COND_PUSH(set, CLBR_R9, r9); \
1542 push %r11 1661 COND_PUSH(set, CLBR_R10, r10); \
1543#define PV_RESTORE_REGS \ 1662 COND_PUSH(set, CLBR_R11, r11)
1544 pop %r11; \ 1663#define PV_RESTORE_REGS(set) \
1545 pop %r10; \ 1664 COND_POP(set, CLBR_R11, r11); \
1546 pop %r9; \ 1665 COND_POP(set, CLBR_R10, r10); \
1547 pop %r8; \ 1666 COND_POP(set, CLBR_R9, r9); \
1548 pop %rdi; \ 1667 COND_POP(set, CLBR_R8, r8); \
1549 pop %rsi; \ 1668 COND_POP(set, CLBR_RDI, rdi); \
1550 pop %rdx; \ 1669 COND_POP(set, CLBR_RSI, rsi); \
1551 pop %rcx; \ 1670 COND_POP(set, CLBR_RDX, rdx); \
1552 pop %rax 1671 COND_POP(set, CLBR_RCX, rcx); \
1672 COND_POP(set, CLBR_RAX, rax)
1673
1553#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) 1674#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
1554#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) 1675#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1555#define PARA_INDIRECT(addr) *addr(%rip) 1676#define PARA_INDIRECT(addr) *addr(%rip)
1556#else 1677#else
1557#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx 1678#define PV_SAVE_REGS(set) \
1558#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax 1679 COND_PUSH(set, CLBR_EAX, eax); \
1680 COND_PUSH(set, CLBR_EDI, edi); \
1681 COND_PUSH(set, CLBR_ECX, ecx); \
1682 COND_PUSH(set, CLBR_EDX, edx)
1683#define PV_RESTORE_REGS(set) \
1684 COND_POP(set, CLBR_EDX, edx); \
1685 COND_POP(set, CLBR_ECX, ecx); \
1686 COND_POP(set, CLBR_EDI, edi); \
1687 COND_POP(set, CLBR_EAX, eax)
1688
1559#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) 1689#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1560#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) 1690#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1561#define PARA_INDIRECT(addr) *%cs:addr 1691#define PARA_INDIRECT(addr) *%cs:addr
@@ -1567,15 +1697,15 @@ static inline unsigned long __raw_local_irq_save(void)
1567 1697
1568#define DISABLE_INTERRUPTS(clobbers) \ 1698#define DISABLE_INTERRUPTS(clobbers) \
1569 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ 1699 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1570 PV_SAVE_REGS; \ 1700 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1571 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ 1701 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
1572 PV_RESTORE_REGS;) \ 1702 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1573 1703
1574#define ENABLE_INTERRUPTS(clobbers) \ 1704#define ENABLE_INTERRUPTS(clobbers) \
1575 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ 1705 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1576 PV_SAVE_REGS; \ 1706 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1577 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ 1707 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
1578 PV_RESTORE_REGS;) 1708 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1579 1709
1580#define USERGS_SYSRET32 \ 1710#define USERGS_SYSRET32 \
1581 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \ 1711 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
@@ -1605,11 +1735,15 @@ static inline unsigned long __raw_local_irq_save(void)
1605 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 1735 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1606 swapgs) 1736 swapgs)
1607 1737
1738/*
1739 * Note: swapgs is very special, and in practise is either going to be
1740 * implemented with a single "swapgs" instruction or something very
1741 * special. Either way, we don't need to save any registers for
1742 * it.
1743 */
1608#define SWAPGS \ 1744#define SWAPGS \
1609 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 1745 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1610 PV_SAVE_REGS; \ 1746 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
1611 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
1612 PV_RESTORE_REGS \
1613 ) 1747 )
1614 1748
1615#define GET_CR2_INTO_RCX \ 1749#define GET_CR2_INTO_RCX \
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index b8493b3b9890..2cd07b9422f4 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -2,13 +2,12 @@
2#define _ASM_X86_PAT_H 2#define _ASM_X86_PAT_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/pgtable_types.h>
5 6
6#ifdef CONFIG_X86_PAT 7#ifdef CONFIG_X86_PAT
7extern int pat_enabled; 8extern int pat_enabled;
8extern void validate_pat_support(struct cpuinfo_x86 *c);
9#else 9#else
10static const int pat_enabled; 10static const int pat_enabled;
11static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
12#endif 11#endif
13 12
14extern void pat_init(void); 13extern void pat_init(void);
@@ -17,6 +16,11 @@ extern int reserve_memtype(u64 start, u64 end,
17 unsigned long req_type, unsigned long *ret_type); 16 unsigned long req_type, unsigned long *ret_type);
18extern int free_memtype(u64 start, u64 end); 17extern int free_memtype(u64 start, u64 end);
19 18
20extern void pat_disable(char *reason); 19extern int kernel_map_sync_memtype(u64 base, unsigned long size,
20 unsigned long flag);
21extern void map_devmem(unsigned long pfn, unsigned long size,
22 struct pgprot vma_prot);
23extern void unmap_devmem(unsigned long pfn, unsigned long size,
24 struct pgprot vma_prot);
21 25
22#endif /* _ASM_X86_PAT_H */ 26#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/mach-default/pci-functions.h b/arch/x86/include/asm/pci-functions.h
index ed0bab427354..ed0bab427354 100644
--- a/arch/x86/include/asm/mach-default/pci-functions.h
+++ b/arch/x86/include/asm/pci-functions.h
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index a977de23cb4d..e545ea01abcf 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -86,12 +86,43 @@ static inline void early_quirks(void) { }
86 86
87extern void pci_iommu_alloc(void); 87extern void pci_iommu_alloc(void);
88 88
89#endif /* __KERNEL__ */ 89/* MSI arch hook */
90#define arch_setup_msi_irqs arch_setup_msi_irqs
91
92#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
93
94#if defined(CONFIG_X86_64) || defined(CONFIG_DMA_API_DEBUG)
95
96#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
97 dma_addr_t ADDR_NAME;
98#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
99 __u32 LEN_NAME;
100#define pci_unmap_addr(PTR, ADDR_NAME) \
101 ((PTR)->ADDR_NAME)
102#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
103 (((PTR)->ADDR_NAME) = (VAL))
104#define pci_unmap_len(PTR, LEN_NAME) \
105 ((PTR)->LEN_NAME)
106#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
107 (((PTR)->LEN_NAME) = (VAL))
90 108
91#ifdef CONFIG_X86_32
92# include "pci_32.h"
93#else 109#else
94# include "pci_64.h" 110
111#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME[0];
112#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) unsigned LEN_NAME[0];
113#define pci_unmap_addr(PTR, ADDR_NAME) sizeof((PTR)->ADDR_NAME)
114#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
115 do { break; } while (pci_unmap_addr(PTR, ADDR_NAME))
116#define pci_unmap_len(PTR, LEN_NAME) sizeof((PTR)->LEN_NAME)
117#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
118 do { break; } while (pci_unmap_len(PTR, LEN_NAME))
119
120#endif
121
122#endif /* __KERNEL__ */
123
124#ifdef CONFIG_X86_64
125#include "pci_64.h"
95#endif 126#endif
96 127
97/* implement the pci_ DMA API in terms of the generic device dma_ one */ 128/* implement the pci_ DMA API in terms of the generic device dma_ one */
diff --git a/arch/x86/include/asm/pci_32.h b/arch/x86/include/asm/pci_32.h
deleted file mode 100644
index 6f1213a6ef4f..000000000000
--- a/arch/x86/include/asm/pci_32.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#ifndef _ASM_X86_PCI_32_H
2#define _ASM_X86_PCI_32_H
3
4
5#ifdef __KERNEL__
6
7
8/* Dynamic DMA mapping stuff.
9 * i386 has everything mapped statically.
10 */
11
12struct pci_dev;
13
14/* The PCI address space does equal the physical memory
15 * address space. The networking and block device layers use
16 * this boolean for bounce buffer decisions.
17 */
18#define PCI_DMA_BUS_IS_PHYS (1)
19
20/* pci_unmap_{page,single} is a nop so... */
21#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME[0];
22#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) unsigned LEN_NAME[0];
23#define pci_unmap_addr(PTR, ADDR_NAME) sizeof((PTR)->ADDR_NAME)
24#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
25 do { break; } while (pci_unmap_addr(PTR, ADDR_NAME))
26#define pci_unmap_len(PTR, LEN_NAME) sizeof((PTR)->LEN_NAME)
27#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
28 do { break; } while (pci_unmap_len(PTR, LEN_NAME))
29
30
31#endif /* __KERNEL__ */
32
33
34#endif /* _ASM_X86_PCI_32_H */
diff --git a/arch/x86/include/asm/pci_64.h b/arch/x86/include/asm/pci_64.h
index 4da207982777..ae5e40f67daf 100644
--- a/arch/x86/include/asm/pci_64.h
+++ b/arch/x86/include/asm/pci_64.h
@@ -24,28 +24,6 @@ extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
24 24
25extern void dma32_reserve_bootmem(void); 25extern void dma32_reserve_bootmem(void);
26 26
27/* The PCI address space does equal the physical memory
28 * address space. The networking and block device layers use
29 * this boolean for bounce buffer decisions
30 *
31 * On AMD64 it mostly equals, but we set it to zero if a hardware
32 * IOMMU (gart) of sotware IOMMU (swiotlb) is available.
33 */
34#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
35
36#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
37 dma_addr_t ADDR_NAME;
38#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
39 __u32 LEN_NAME;
40#define pci_unmap_addr(PTR, ADDR_NAME) \
41 ((PTR)->ADDR_NAME)
42#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
43 (((PTR)->ADDR_NAME) = (VAL))
44#define pci_unmap_len(PTR, LEN_NAME) \
45 ((PTR)->LEN_NAME)
46#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
47 (((PTR)->LEN_NAME) = (VAL))
48
49#endif /* __KERNEL__ */ 27#endif /* __KERNEL__ */
50 28
51#endif /* _ASM_X86_PCI_64_H */ 29#endif /* _ASM_X86_PCI_64_H */
diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h
deleted file mode 100644
index 2fbfff88df37..000000000000
--- a/arch/x86/include/asm/pda.h
+++ /dev/null
@@ -1,137 +0,0 @@
1#ifndef _ASM_X86_PDA_H
2#define _ASM_X86_PDA_H
3
4#ifndef __ASSEMBLY__
5#include <linux/stddef.h>
6#include <linux/types.h>
7#include <linux/cache.h>
8#include <asm/page.h>
9
10/* Per processor datastructure. %gs points to it while the kernel runs */
11struct x8664_pda {
12 struct task_struct *pcurrent; /* 0 Current process */
13 unsigned long data_offset; /* 8 Per cpu data offset from linker
14 address */
15 unsigned long kernelstack; /* 16 top of kernel stack for current */
16 unsigned long oldrsp; /* 24 user rsp for system call */
17 int irqcount; /* 32 Irq nesting counter. Starts -1 */
18 unsigned int cpunumber; /* 36 Logical CPU number */
19#ifdef CONFIG_CC_STACKPROTECTOR
20 unsigned long stack_canary; /* 40 stack canary value */
21 /* gcc-ABI: this canary MUST be at
22 offset 40!!! */
23#endif
24 char *irqstackptr;
25 short nodenumber; /* number of current node (32k max) */
26 short in_bootmem; /* pda lives in bootmem */
27 unsigned int __softirq_pending;
28 unsigned int __nmi_count; /* number of NMI on this CPUs */
29 short mmu_state;
30 short isidle;
31 struct mm_struct *active_mm;
32 unsigned apic_timer_irqs;
33 unsigned irq0_irqs;
34 unsigned irq_resched_count;
35 unsigned irq_call_count;
36 unsigned irq_tlb_count;
37 unsigned irq_thermal_count;
38 unsigned irq_threshold_count;
39 unsigned irq_spurious_count;
40} ____cacheline_aligned_in_smp;
41
42extern struct x8664_pda **_cpu_pda;
43extern void pda_init(int);
44
45#define cpu_pda(i) (_cpu_pda[i])
46
47/*
48 * There is no fast way to get the base address of the PDA, all the accesses
49 * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
50 */
51extern void __bad_pda_field(void) __attribute__((noreturn));
52
53/*
54 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
55 * all PDA accesses so it gets read/write dependencies right.
56 */
57extern struct x8664_pda _proxy_pda;
58
59#define pda_offset(field) offsetof(struct x8664_pda, field)
60
61#define pda_to_op(op, field, val) \
62do { \
63 typedef typeof(_proxy_pda.field) T__; \
64 if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
65 switch (sizeof(_proxy_pda.field)) { \
66 case 2: \
67 asm(op "w %1,%%gs:%c2" : \
68 "+m" (_proxy_pda.field) : \
69 "ri" ((T__)val), \
70 "i"(pda_offset(field))); \
71 break; \
72 case 4: \
73 asm(op "l %1,%%gs:%c2" : \
74 "+m" (_proxy_pda.field) : \
75 "ri" ((T__)val), \
76 "i" (pda_offset(field))); \
77 break; \
78 case 8: \
79 asm(op "q %1,%%gs:%c2": \
80 "+m" (_proxy_pda.field) : \
81 "ri" ((T__)val), \
82 "i"(pda_offset(field))); \
83 break; \
84 default: \
85 __bad_pda_field(); \
86 } \
87} while (0)
88
89#define pda_from_op(op, field) \
90({ \
91 typeof(_proxy_pda.field) ret__; \
92 switch (sizeof(_proxy_pda.field)) { \
93 case 2: \
94 asm(op "w %%gs:%c1,%0" : \
95 "=r" (ret__) : \
96 "i" (pda_offset(field)), \
97 "m" (_proxy_pda.field)); \
98 break; \
99 case 4: \
100 asm(op "l %%gs:%c1,%0": \
101 "=r" (ret__): \
102 "i" (pda_offset(field)), \
103 "m" (_proxy_pda.field)); \
104 break; \
105 case 8: \
106 asm(op "q %%gs:%c1,%0": \
107 "=r" (ret__) : \
108 "i" (pda_offset(field)), \
109 "m" (_proxy_pda.field)); \
110 break; \
111 default: \
112 __bad_pda_field(); \
113 } \
114 ret__; \
115})
116
117#define read_pda(field) pda_from_op("mov", field)
118#define write_pda(field, val) pda_to_op("mov", field, val)
119#define add_pda(field, val) pda_to_op("add", field, val)
120#define sub_pda(field, val) pda_to_op("sub", field, val)
121#define or_pda(field, val) pda_to_op("or", field, val)
122
123/* This is not atomic against other CPUs -- CPU preemption needs to be off */
124#define test_and_clear_bit_pda(bit, field) \
125({ \
126 int old__; \
127 asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
128 : "=r" (old__), "+m" (_proxy_pda.field) \
129 : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
130 old__; \
131})
132
133#endif
134
135#define PDA_STACKOFFSET (5*8)
136
137#endif /* _ASM_X86_PDA_H */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index ece72053ba63..aee103b26d01 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -2,53 +2,12 @@
2#define _ASM_X86_PERCPU_H 2#define _ASM_X86_PERCPU_H
3 3
4#ifdef CONFIG_X86_64 4#ifdef CONFIG_X86_64
5#include <linux/compiler.h> 5#define __percpu_seg gs
6 6#define __percpu_mov_op movq
7/* Same as asm-generic/percpu.h, except that we store the per cpu offset 7#else
8 in the PDA. Longer term the PDA and every per cpu variable 8#define __percpu_seg fs
9 should be just put into a single section and referenced directly 9#define __percpu_mov_op movl
10 from %gs */
11
12#ifdef CONFIG_SMP
13#include <asm/pda.h>
14
15#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
16#define __my_cpu_offset read_pda(data_offset)
17
18#define per_cpu_offset(x) (__per_cpu_offset(x))
19
20#endif 10#endif
21#include <asm-generic/percpu.h>
22
23DECLARE_PER_CPU(struct x8664_pda, pda);
24
25/*
26 * These are supposed to be implemented as a single instruction which
27 * operates on the per-cpu data base segment. x86-64 doesn't have
28 * that yet, so this is a fairly inefficient workaround for the
29 * meantime. The single instruction is atomic with respect to
30 * preemption and interrupts, so we need to explicitly disable
31 * interrupts here to achieve the same effect. However, because it
32 * can be used from within interrupt-disable/enable, we can't actually
33 * disable interrupts; disabling preemption is enough.
34 */
35#define x86_read_percpu(var) \
36 ({ \
37 typeof(per_cpu_var(var)) __tmp; \
38 preempt_disable(); \
39 __tmp = __get_cpu_var(var); \
40 preempt_enable(); \
41 __tmp; \
42 })
43
44#define x86_write_percpu(var, val) \
45 do { \
46 preempt_disable(); \
47 __get_cpu_var(var) = (val); \
48 preempt_enable(); \
49 } while(0)
50
51#else /* CONFIG_X86_64 */
52 11
53#ifdef __ASSEMBLY__ 12#ifdef __ASSEMBLY__
54 13
@@ -65,47 +24,48 @@ DECLARE_PER_CPU(struct x8664_pda, pda);
65 * PER_CPU(cpu_gdt_descr, %ebx) 24 * PER_CPU(cpu_gdt_descr, %ebx)
66 */ 25 */
67#ifdef CONFIG_SMP 26#ifdef CONFIG_SMP
68#define PER_CPU(var, reg) \ 27#define PER_CPU(var, reg) \
69 movl %fs:per_cpu__##this_cpu_off, reg; \ 28 __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \
70 lea per_cpu__##var(reg), reg 29 lea per_cpu__##var(reg), reg
71#define PER_CPU_VAR(var) %fs:per_cpu__##var 30#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var
72#else /* ! SMP */ 31#else /* ! SMP */
73#define PER_CPU(var, reg) \ 32#define PER_CPU(var, reg) \
74 movl $per_cpu__##var, reg 33 __percpu_mov_op $per_cpu__##var, reg
75#define PER_CPU_VAR(var) per_cpu__##var 34#define PER_CPU_VAR(var) per_cpu__##var
76#endif /* SMP */ 35#endif /* SMP */
77 36
37#ifdef CONFIG_X86_64_SMP
38#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
39#else
40#define INIT_PER_CPU_VAR(var) per_cpu__##var
41#endif
42
78#else /* ...!ASSEMBLY */ 43#else /* ...!ASSEMBLY */
79 44
45#include <linux/stringify.h>
46
47#ifdef CONFIG_SMP
48#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
49#define __my_cpu_offset percpu_read(this_cpu_off)
50#else
51#define __percpu_arg(x) "%" #x
52#endif
53
80/* 54/*
81 * PER_CPU finds an address of a per-cpu variable. 55 * Initialized pointers to per-cpu variables needed for the boot
56 * processor need to use these macros to get the proper address
57 * offset from __per_cpu_load on SMP.
82 * 58 *
83 * Args: 59 * There also must be an entry in vmlinux_64.lds.S
84 * var - variable name
85 * cpu - 32bit register containing the current CPU number
86 *
87 * The resulting address is stored in the "cpu" argument.
88 *
89 * Example:
90 * PER_CPU(cpu_gdt_descr, %ebx)
91 */ 60 */
92#ifdef CONFIG_SMP 61#define DECLARE_INIT_PER_CPU(var) \
93 62 extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
94#define __my_cpu_offset x86_read_percpu(this_cpu_off)
95
96/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
97#define __percpu_seg "%%fs:"
98
99#else /* !SMP */
100
101#define __percpu_seg ""
102
103#endif /* SMP */
104
105#include <asm-generic/percpu.h>
106 63
107/* We can use this directly for local CPU (faster). */ 64#ifdef CONFIG_X86_64_SMP
108DECLARE_PER_CPU(unsigned long, this_cpu_off); 65#define init_per_cpu_var(var) init_per_cpu__##var
66#else
67#define init_per_cpu_var(var) per_cpu_var(var)
68#endif
109 69
110/* For arch-specific code, we can use direct single-insn ops (they 70/* For arch-specific code, we can use direct single-insn ops (they
111 * don't give an lvalue though). */ 71 * don't give an lvalue though). */
@@ -120,20 +80,25 @@ do { \
120 } \ 80 } \
121 switch (sizeof(var)) { \ 81 switch (sizeof(var)) { \
122 case 1: \ 82 case 1: \
123 asm(op "b %1,"__percpu_seg"%0" \ 83 asm(op "b %1,"__percpu_arg(0) \
124 : "+m" (var) \ 84 : "+m" (var) \
125 : "ri" ((T__)val)); \ 85 : "ri" ((T__)val)); \
126 break; \ 86 break; \
127 case 2: \ 87 case 2: \
128 asm(op "w %1,"__percpu_seg"%0" \ 88 asm(op "w %1,"__percpu_arg(0) \
129 : "+m" (var) \ 89 : "+m" (var) \
130 : "ri" ((T__)val)); \ 90 : "ri" ((T__)val)); \
131 break; \ 91 break; \
132 case 4: \ 92 case 4: \
133 asm(op "l %1,"__percpu_seg"%0" \ 93 asm(op "l %1,"__percpu_arg(0) \
134 : "+m" (var) \ 94 : "+m" (var) \
135 : "ri" ((T__)val)); \ 95 : "ri" ((T__)val)); \
136 break; \ 96 break; \
97 case 8: \
98 asm(op "q %1,"__percpu_arg(0) \
99 : "+m" (var) \
100 : "re" ((T__)val)); \
101 break; \
137 default: __bad_percpu_size(); \ 102 default: __bad_percpu_size(); \
138 } \ 103 } \
139} while (0) 104} while (0)
@@ -143,17 +108,22 @@ do { \
143 typeof(var) ret__; \ 108 typeof(var) ret__; \
144 switch (sizeof(var)) { \ 109 switch (sizeof(var)) { \
145 case 1: \ 110 case 1: \
146 asm(op "b "__percpu_seg"%1,%0" \ 111 asm(op "b "__percpu_arg(1)",%0" \
147 : "=r" (ret__) \ 112 : "=r" (ret__) \
148 : "m" (var)); \ 113 : "m" (var)); \
149 break; \ 114 break; \
150 case 2: \ 115 case 2: \
151 asm(op "w "__percpu_seg"%1,%0" \ 116 asm(op "w "__percpu_arg(1)",%0" \
152 : "=r" (ret__) \ 117 : "=r" (ret__) \
153 : "m" (var)); \ 118 : "m" (var)); \
154 break; \ 119 break; \
155 case 4: \ 120 case 4: \
156 asm(op "l "__percpu_seg"%1,%0" \ 121 asm(op "l "__percpu_arg(1)",%0" \
122 : "=r" (ret__) \
123 : "m" (var)); \
124 break; \
125 case 8: \
126 asm(op "q "__percpu_arg(1)",%0" \
157 : "=r" (ret__) \ 127 : "=r" (ret__) \
158 : "m" (var)); \ 128 : "m" (var)); \
159 break; \ 129 break; \
@@ -162,13 +132,30 @@ do { \
162 ret__; \ 132 ret__; \
163}) 133})
164 134
165#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) 135#define percpu_read(var) percpu_from_op("mov", per_cpu__##var)
166#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val) 136#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
167#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val) 137#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
168#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val) 138#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
169#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val) 139#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val)
140#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
141#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
142
143/* This is not atomic against other CPUs -- CPU preemption needs to be off */
144#define x86_test_and_clear_bit_percpu(bit, var) \
145({ \
146 int old__; \
147 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
148 : "=r" (old__), "+m" (per_cpu__##var) \
149 : "dIr" (bit)); \
150 old__; \
151})
152
153#include <asm-generic/percpu.h>
154
155/* We can use this directly for local CPU (faster). */
156DECLARE_PER_CPU(unsigned long, this_cpu_off);
157
170#endif /* !__ASSEMBLY__ */ 158#endif /* !__ASSEMBLY__ */
171#endif /* !CONFIG_X86_64 */
172 159
173#ifdef CONFIG_SMP 160#ifdef CONFIG_SMP
174 161
@@ -195,9 +182,9 @@ do { \
195#define early_per_cpu_ptr(_name) (_name##_early_ptr) 182#define early_per_cpu_ptr(_name) (_name##_early_ptr)
196#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) 183#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
197#define early_per_cpu(_name, _cpu) \ 184#define early_per_cpu(_name, _cpu) \
198 (early_per_cpu_ptr(_name) ? \ 185 *(early_per_cpu_ptr(_name) ? \
199 early_per_cpu_ptr(_name)[_cpu] : \ 186 &early_per_cpu_ptr(_name)[_cpu] : \
200 per_cpu(_name, _cpu)) 187 &per_cpu(_name, _cpu))
201 188
202#else /* !CONFIG_SMP */ 189#else /* !CONFIG_SMP */
203#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ 190#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index e0d199fe1d83..2334982b339e 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -26,13 +26,6 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
26 native_set_pte(ptep, pte); 26 native_set_pte(ptep, pte);
27} 27}
28 28
29static inline void native_set_pte_present(struct mm_struct *mm,
30 unsigned long addr,
31 pte_t *ptep, pte_t pte)
32{
33 native_set_pte(ptep, pte);
34}
35
36static inline void native_pmd_clear(pmd_t *pmdp) 29static inline void native_pmd_clear(pmd_t *pmdp)
37{ 30{
38 native_set_pmd(pmdp, __pmd(0)); 31 native_set_pmd(pmdp, __pmd(0));
@@ -53,8 +46,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 46#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
54#endif 47#endif
55 48
56#define pte_none(x) (!(x).pte_low)
57
58/* 49/*
59 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, 50 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
60 * split up the 29 bits of offset into this range: 51 * split up the 29 bits of offset into this range:
diff --git a/arch/x86/include/asm/pgtable-2level-defs.h b/arch/x86/include/asm/pgtable-2level_types.h
index d77db8990eaa..daacc23e3fb9 100644
--- a/arch/x86/include/asm/pgtable-2level-defs.h
+++ b/arch/x86/include/asm/pgtable-2level_types.h
@@ -1,7 +1,23 @@
1#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H 1#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H
2#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H 2#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H
3 3
4#ifndef __ASSEMBLY__
5#include <linux/types.h>
6
7typedef unsigned long pteval_t;
8typedef unsigned long pmdval_t;
9typedef unsigned long pudval_t;
10typedef unsigned long pgdval_t;
11typedef unsigned long pgprotval_t;
12
13typedef union {
14 pteval_t pte;
15 pteval_t pte_low;
16} pte_t;
17#endif /* !__ASSEMBLY__ */
18
4#define SHARED_KERNEL_PMD 0 19#define SHARED_KERNEL_PMD 0
20#define PAGETABLE_LEVELS 2
5 21
6/* 22/*
7 * traditional i386 two-level paging structure: 23 * traditional i386 two-level paging structure:
@@ -10,6 +26,7 @@
10#define PGDIR_SHIFT 22 26#define PGDIR_SHIFT 22
11#define PTRS_PER_PGD 1024 27#define PTRS_PER_PGD 1024
12 28
29
13/* 30/*
14 * the i386 is two-level, so we don't really have any 31 * the i386 is two-level, so we don't really have any
15 * PMD directory physically. 32 * PMD directory physically.
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 447da43cddb3..177b0165ea01 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -18,21 +18,6 @@
18 printk("%s:%d: bad pgd %p(%016Lx).\n", \ 18 printk("%s:%d: bad pgd %p(%016Lx).\n", \
19 __FILE__, __LINE__, &(e), pgd_val(e)) 19 __FILE__, __LINE__, &(e), pgd_val(e))
20 20
21static inline int pud_none(pud_t pud)
22{
23 return pud_val(pud) == 0;
24}
25
26static inline int pud_bad(pud_t pud)
27{
28 return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
29}
30
31static inline int pud_present(pud_t pud)
32{
33 return pud_val(pud) & _PAGE_PRESENT;
34}
35
36/* Rules for using set_pte: the pte being assigned *must* be 21/* Rules for using set_pte: the pte being assigned *must* be
37 * either not present or in a state where the hardware will 22 * either not present or in a state where the hardware will
38 * not attempt to update the pte. In places where this is 23 * not attempt to update the pte. In places where this is
@@ -46,23 +31,6 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
46 ptep->pte_low = pte.pte_low; 31 ptep->pte_low = pte.pte_low;
47} 32}
48 33
49/*
50 * Since this is only called on user PTEs, and the page fault handler
51 * must handle the already racy situation of simultaneous page faults,
52 * we are justified in merely clearing the PTE present bit, followed
53 * by a set. The ordering here is important.
54 */
55static inline void native_set_pte_present(struct mm_struct *mm,
56 unsigned long addr,
57 pte_t *ptep, pte_t pte)
58{
59 ptep->pte_low = 0;
60 smp_wmb();
61 ptep->pte_high = pte.pte_high;
62 smp_wmb();
63 ptep->pte_low = pte.pte_low;
64}
65
66static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) 34static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
67{ 35{
68 set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); 36 set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
@@ -120,15 +88,6 @@ static inline void pud_clear(pud_t *pudp)
120 write_cr3(pgd); 88 write_cr3(pgd);
121} 89}
122 90
123#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
124
125#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
126
127
128/* Find an entry in the second-level page table.. */
129#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \
130 pmd_index(address))
131
132#ifdef CONFIG_SMP 91#ifdef CONFIG_SMP
133static inline pte_t native_ptep_get_and_clear(pte_t *ptep) 92static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
134{ 93{
@@ -145,17 +104,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
145#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 104#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
146#endif 105#endif
147 106
148#define __HAVE_ARCH_PTE_SAME
149static inline int pte_same(pte_t a, pte_t b)
150{
151 return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
152}
153
154static inline int pte_none(pte_t pte)
155{
156 return !pte.pte_low && !pte.pte_high;
157}
158
159/* 107/*
160 * Bits 0, 6 and 7 are taken in the low part of the pte, 108 * Bits 0, 6 and 7 are taken in the low part of the pte,
161 * put the 32 bits of offset into the high part. 109 * put the 32 bits of offset into the high part.
diff --git a/arch/x86/include/asm/pgtable-3level-defs.h b/arch/x86/include/asm/pgtable-3level_types.h
index 62561367653c..1bd5876c8649 100644
--- a/arch/x86/include/asm/pgtable-3level-defs.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -1,12 +1,31 @@
1#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H 1#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H
2#define _ASM_X86_PGTABLE_3LEVEL_DEFS_H 2#define _ASM_X86_PGTABLE_3LEVEL_DEFS_H
3 3
4#ifndef __ASSEMBLY__
5#include <linux/types.h>
6
7typedef u64 pteval_t;
8typedef u64 pmdval_t;
9typedef u64 pudval_t;
10typedef u64 pgdval_t;
11typedef u64 pgprotval_t;
12
13typedef union {
14 struct {
15 unsigned long pte_low, pte_high;
16 };
17 pteval_t pte;
18} pte_t;
19#endif /* !__ASSEMBLY__ */
20
4#ifdef CONFIG_PARAVIRT 21#ifdef CONFIG_PARAVIRT
5#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd) 22#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
6#else 23#else
7#define SHARED_KERNEL_PMD 1 24#define SHARED_KERNEL_PMD 1
8#endif 25#endif
9 26
27#define PAGETABLE_LEVELS 3
28
10/* 29/*
11 * PGDIR_SHIFT determines what a top-level page table entry can map 30 * PGDIR_SHIFT determines what a top-level page table entry can map
12 */ 31 */
@@ -25,4 +44,5 @@
25 */ 44 */
26#define PTRS_PER_PTE 512 45#define PTRS_PER_PTE 512
27 46
47
28#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */ 48#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 4f5af8447d54..29d96d168bc0 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1,164 +1,9 @@
1#ifndef _ASM_X86_PGTABLE_H 1#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H 2#define _ASM_X86_PGTABLE_H
3 3
4#define FIRST_USER_ADDRESS 0 4#include <asm/page.h>
5
6#define _PAGE_BIT_PRESENT 0 /* is present */
7#define _PAGE_BIT_RW 1 /* writeable */
8#define _PAGE_BIT_USER 2 /* userspace addressable */
9#define _PAGE_BIT_PWT 3 /* page write through */
10#define _PAGE_BIT_PCD 4 /* page cache disabled */
11#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
12#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
13#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
14#define _PAGE_BIT_PAT 7 /* on 4KB pages */
15#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
16#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
17#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
18#define _PAGE_BIT_UNUSED3 11
19#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
20#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
21#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
22#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
23
24/* If _PAGE_BIT_PRESENT is clear, we use these: */
25/* - if the user mapped it with PROT_NONE; pte_present gives true */
26#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
27/* - set: nonlinear file mapping, saved PTE; unset:swap */
28#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
29
30#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
31#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
32#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
33#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
34#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
35#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
36#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
37#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
38#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
39#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
40#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
41#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
42#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
43#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
44#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
45#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
46#define __HAVE_ARCH_PTE_SPECIAL
47
48#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
49#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
50#else
51#define _PAGE_NX (_AT(pteval_t, 0))
52#endif
53 5
54#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) 6#include <asm/pgtable_types.h>
55#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
56
57#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
58 _PAGE_ACCESSED | _PAGE_DIRTY)
59#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
60 _PAGE_DIRTY)
61
62/* Set of bits not changed in pte_modify */
63#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
64 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
65
66#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
67#define _PAGE_CACHE_WB (0)
68#define _PAGE_CACHE_WC (_PAGE_PWT)
69#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
70#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
71
72#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
73#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
74 _PAGE_ACCESSED | _PAGE_NX)
75
76#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
77 _PAGE_USER | _PAGE_ACCESSED)
78#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
79 _PAGE_ACCESSED | _PAGE_NX)
80#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
81 _PAGE_ACCESSED)
82#define PAGE_COPY PAGE_COPY_NOEXEC
83#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
84 _PAGE_ACCESSED | _PAGE_NX)
85#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
86 _PAGE_ACCESSED)
87
88#define __PAGE_KERNEL_EXEC \
89 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
90#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
91
92#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
93#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
94#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
95#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
96#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
97#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
98#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
99#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
100#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
101#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
102#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
103
104#define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
105#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
106#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
107#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
108
109#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
110#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
111#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
112#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
113#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
114#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
115#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
116#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
117#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
118#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
119#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
120#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
121#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
122
123#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
124#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
125#define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
126#define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
127
128/* xwr */
129#define __P000 PAGE_NONE
130#define __P001 PAGE_READONLY
131#define __P010 PAGE_COPY
132#define __P011 PAGE_COPY
133#define __P100 PAGE_READONLY_EXEC
134#define __P101 PAGE_READONLY_EXEC
135#define __P110 PAGE_COPY_EXEC
136#define __P111 PAGE_COPY_EXEC
137
138#define __S000 PAGE_NONE
139#define __S001 PAGE_READONLY
140#define __S010 PAGE_SHARED
141#define __S011 PAGE_SHARED
142#define __S100 PAGE_READONLY_EXEC
143#define __S101 PAGE_READONLY_EXEC
144#define __S110 PAGE_SHARED_EXEC
145#define __S111 PAGE_SHARED_EXEC
146
147/*
148 * early identity mapping pte attrib macros.
149 */
150#ifdef CONFIG_X86_64
151#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
152#else
153/*
154 * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
155 * bits are combined, this will alow user to access the high address mapped
156 * VDSO in the presence of CONFIG_COMPAT_VDSO
157 */
158#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
159#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
160#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
161#endif
162 7
163/* 8/*
164 * Macro to mark a page protection value as UC- 9 * Macro to mark a page protection value as UC-
@@ -170,9 +15,6 @@
170 15
171#ifndef __ASSEMBLY__ 16#ifndef __ASSEMBLY__
172 17
173#define pgprot_writecombine pgprot_writecombine
174extern pgprot_t pgprot_writecombine(pgprot_t prot);
175
176/* 18/*
177 * ZERO_PAGE is a global shared page that is always zero: used 19 * ZERO_PAGE is a global shared page that is always zero: used
178 * for zero-mapped memory areas etc.. 20 * for zero-mapped memory areas etc..
@@ -183,6 +25,64 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
183extern spinlock_t pgd_lock; 25extern spinlock_t pgd_lock;
184extern struct list_head pgd_list; 26extern struct list_head pgd_list;
185 27
28#ifdef CONFIG_PARAVIRT
29#include <asm/paravirt.h>
30#else /* !CONFIG_PARAVIRT */
31#define set_pte(ptep, pte) native_set_pte(ptep, pte)
32#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
33
34#define set_pte_atomic(ptep, pte) \
35 native_set_pte_atomic(ptep, pte)
36
37#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
38
39#ifndef __PAGETABLE_PUD_FOLDED
40#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
41#define pgd_clear(pgd) native_pgd_clear(pgd)
42#endif
43
44#ifndef set_pud
45# define set_pud(pudp, pud) native_set_pud(pudp, pud)
46#endif
47
48#ifndef __PAGETABLE_PMD_FOLDED
49#define pud_clear(pud) native_pud_clear(pud)
50#endif
51
52#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
53#define pmd_clear(pmd) native_pmd_clear(pmd)
54
55#define pte_update(mm, addr, ptep) do { } while (0)
56#define pte_update_defer(mm, addr, ptep) do { } while (0)
57
58static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
59{
60 native_pagetable_setup_start(base);
61}
62
63static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
64{
65 native_pagetable_setup_done(base);
66}
67
68#define pgd_val(x) native_pgd_val(x)
69#define __pgd(x) native_make_pgd(x)
70
71#ifndef __PAGETABLE_PUD_FOLDED
72#define pud_val(x) native_pud_val(x)
73#define __pud(x) native_make_pud(x)
74#endif
75
76#ifndef __PAGETABLE_PMD_FOLDED
77#define pmd_val(x) native_pmd_val(x)
78#define __pmd(x) native_make_pmd(x)
79#endif
80
81#define pte_val(x) native_pte_val(x)
82#define __pte(x) native_make_pte(x)
83
84#endif /* CONFIG_PARAVIRT */
85
186/* 86/*
187 * The following only work if pte_present() is true. 87 * The following only work if pte_present() is true.
188 * Undefined behaviour if not.. 88 * Undefined behaviour if not..
@@ -236,72 +136,84 @@ static inline unsigned long pte_pfn(pte_t pte)
236 136
237static inline int pmd_large(pmd_t pte) 137static inline int pmd_large(pmd_t pte)
238{ 138{
239 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == 139 return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
240 (_PAGE_PSE | _PAGE_PRESENT); 140 (_PAGE_PSE | _PAGE_PRESENT);
241} 141}
242 142
143static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
144{
145 pteval_t v = native_pte_val(pte);
146
147 return native_make_pte(v | set);
148}
149
150static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
151{
152 pteval_t v = native_pte_val(pte);
153
154 return native_make_pte(v & ~clear);
155}
156
243static inline pte_t pte_mkclean(pte_t pte) 157static inline pte_t pte_mkclean(pte_t pte)
244{ 158{
245 return __pte(pte_val(pte) & ~_PAGE_DIRTY); 159 return pte_clear_flags(pte, _PAGE_DIRTY);
246} 160}
247 161
248static inline pte_t pte_mkold(pte_t pte) 162static inline pte_t pte_mkold(pte_t pte)
249{ 163{
250 return __pte(pte_val(pte) & ~_PAGE_ACCESSED); 164 return pte_clear_flags(pte, _PAGE_ACCESSED);
251} 165}
252 166
253static inline pte_t pte_wrprotect(pte_t pte) 167static inline pte_t pte_wrprotect(pte_t pte)
254{ 168{
255 return __pte(pte_val(pte) & ~_PAGE_RW); 169 return pte_clear_flags(pte, _PAGE_RW);
256} 170}
257 171
258static inline pte_t pte_mkexec(pte_t pte) 172static inline pte_t pte_mkexec(pte_t pte)
259{ 173{
260 return __pte(pte_val(pte) & ~_PAGE_NX); 174 return pte_clear_flags(pte, _PAGE_NX);
261} 175}
262 176
263static inline pte_t pte_mkdirty(pte_t pte) 177static inline pte_t pte_mkdirty(pte_t pte)
264{ 178{
265 return __pte(pte_val(pte) | _PAGE_DIRTY); 179 return pte_set_flags(pte, _PAGE_DIRTY);
266} 180}
267 181
268static inline pte_t pte_mkyoung(pte_t pte) 182static inline pte_t pte_mkyoung(pte_t pte)
269{ 183{
270 return __pte(pte_val(pte) | _PAGE_ACCESSED); 184 return pte_set_flags(pte, _PAGE_ACCESSED);
271} 185}
272 186
273static inline pte_t pte_mkwrite(pte_t pte) 187static inline pte_t pte_mkwrite(pte_t pte)
274{ 188{
275 return __pte(pte_val(pte) | _PAGE_RW); 189 return pte_set_flags(pte, _PAGE_RW);
276} 190}
277 191
278static inline pte_t pte_mkhuge(pte_t pte) 192static inline pte_t pte_mkhuge(pte_t pte)
279{ 193{
280 return __pte(pte_val(pte) | _PAGE_PSE); 194 return pte_set_flags(pte, _PAGE_PSE);
281} 195}
282 196
283static inline pte_t pte_clrhuge(pte_t pte) 197static inline pte_t pte_clrhuge(pte_t pte)
284{ 198{
285 return __pte(pte_val(pte) & ~_PAGE_PSE); 199 return pte_clear_flags(pte, _PAGE_PSE);
286} 200}
287 201
288static inline pte_t pte_mkglobal(pte_t pte) 202static inline pte_t pte_mkglobal(pte_t pte)
289{ 203{
290 return __pte(pte_val(pte) | _PAGE_GLOBAL); 204 return pte_set_flags(pte, _PAGE_GLOBAL);
291} 205}
292 206
293static inline pte_t pte_clrglobal(pte_t pte) 207static inline pte_t pte_clrglobal(pte_t pte)
294{ 208{
295 return __pte(pte_val(pte) & ~_PAGE_GLOBAL); 209 return pte_clear_flags(pte, _PAGE_GLOBAL);
296} 210}
297 211
298static inline pte_t pte_mkspecial(pte_t pte) 212static inline pte_t pte_mkspecial(pte_t pte)
299{ 213{
300 return __pte(pte_val(pte) | _PAGE_SPECIAL); 214 return pte_set_flags(pte, _PAGE_SPECIAL);
301} 215}
302 216
303extern pteval_t __supported_pte_mask;
304
305/* 217/*
306 * Mask out unsupported bits in a present pgprot. Non-present pgprots 218 * Mask out unsupported bits in a present pgprot. Non-present pgprots
307 * can use those bits for other purposes, so leave them be. 219 * can use those bits for other purposes, so leave them be.
@@ -374,82 +286,197 @@ static inline int is_new_memtype_allowed(unsigned long flags,
374 return 1; 286 return 1;
375} 287}
376 288
377#ifndef __ASSEMBLY__ 289pmd_t *populate_extra_pmd(unsigned long vaddr);
378/* Indicate that x86 has its own track and untrack pfn vma functions */ 290pte_t *populate_extra_pte(unsigned long vaddr);
379#define __HAVE_PFNMAP_TRACKING 291#endif /* __ASSEMBLY__ */
380
381#define __HAVE_PHYS_MEM_ACCESS_PROT
382struct file;
383pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
384 unsigned long size, pgprot_t vma_prot);
385int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
386 unsigned long size, pgprot_t *vma_prot);
387#endif
388
389/* Install a pte for a particular vaddr in kernel space. */
390void set_pte_vaddr(unsigned long vaddr, pte_t pte);
391 292
392#ifdef CONFIG_X86_32 293#ifdef CONFIG_X86_32
393extern void native_pagetable_setup_start(pgd_t *base); 294# include "pgtable_32.h"
394extern void native_pagetable_setup_done(pgd_t *base);
395#else 295#else
396static inline void native_pagetable_setup_start(pgd_t *base) {} 296# include "pgtable_64.h"
397static inline void native_pagetable_setup_done(pgd_t *base) {}
398#endif 297#endif
399 298
400struct seq_file; 299#ifndef __ASSEMBLY__
401extern void arch_report_meminfo(struct seq_file *m); 300#include <linux/mm_types.h>
402 301
403#ifdef CONFIG_PARAVIRT 302static inline int pte_none(pte_t pte)
404#include <asm/paravirt.h> 303{
405#else /* !CONFIG_PARAVIRT */ 304 return !pte.pte;
406#define set_pte(ptep, pte) native_set_pte(ptep, pte) 305}
407#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
408 306
409#define set_pte_present(mm, addr, ptep, pte) \ 307#define __HAVE_ARCH_PTE_SAME
410 native_set_pte_present(mm, addr, ptep, pte) 308static inline int pte_same(pte_t a, pte_t b)
411#define set_pte_atomic(ptep, pte) \ 309{
412 native_set_pte_atomic(ptep, pte) 310 return a.pte == b.pte;
311}
413 312
414#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) 313static inline int pte_present(pte_t a)
314{
315 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
316}
415 317
416#ifndef __PAGETABLE_PUD_FOLDED 318static inline int pmd_present(pmd_t pmd)
417#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) 319{
418#define pgd_clear(pgd) native_pgd_clear(pgd) 320 return pmd_flags(pmd) & _PAGE_PRESENT;
419#endif 321}
420 322
421#ifndef set_pud 323static inline int pmd_none(pmd_t pmd)
422# define set_pud(pudp, pud) native_set_pud(pudp, pud) 324{
423#endif 325 /* Only check low word on 32-bit platforms, since it might be
326 out of sync with upper half. */
327 return (unsigned long)native_pmd_val(pmd) == 0;
328}
424 329
425#ifndef __PAGETABLE_PMD_FOLDED 330static inline unsigned long pmd_page_vaddr(pmd_t pmd)
426#define pud_clear(pud) native_pud_clear(pud) 331{
427#endif 332 return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
333}
428 334
429#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) 335/*
430#define pmd_clear(pmd) native_pmd_clear(pmd) 336 * Currently stuck as a macro due to indirect forward reference to
337 * linux/mmzone.h's __section_mem_map_addr() definition:
338 */
339#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
431 340
432#define pte_update(mm, addr, ptep) do { } while (0) 341/*
433#define pte_update_defer(mm, addr, ptep) do { } while (0) 342 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
343 *
344 * this macro returns the index of the entry in the pmd page which would
345 * control the given virtual address
346 */
347static inline unsigned pmd_index(unsigned long address)
348{
349 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
350}
434 351
435static inline void __init paravirt_pagetable_setup_start(pgd_t *base) 352/*
353 * Conversion functions: convert a page and protection to a page entry,
354 * and a page entry and page directory to the page they refer to.
355 *
356 * (Currently stuck as a macro because of indirect forward reference
357 * to linux/mm.h:page_to_nid())
358 */
359#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
360
361/*
362 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
363 *
364 * this function returns the index of the entry in the pte page which would
365 * control the given virtual address
366 */
367static inline unsigned pte_index(unsigned long address)
436{ 368{
437 native_pagetable_setup_start(base); 369 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
438} 370}
439 371
440static inline void __init paravirt_pagetable_setup_done(pgd_t *base) 372static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
441{ 373{
442 native_pagetable_setup_done(base); 374 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
443} 375}
444#endif /* CONFIG_PARAVIRT */
445 376
446#endif /* __ASSEMBLY__ */ 377static inline int pmd_bad(pmd_t pmd)
378{
379 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
380}
447 381
448#ifdef CONFIG_X86_32 382static inline unsigned long pages_to_mb(unsigned long npg)
449# include "pgtable_32.h" 383{
384 return npg >> (20 - PAGE_SHIFT);
385}
386
387#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
388 remap_pfn_range(vma, vaddr, pfn, size, prot)
389
390#if PAGETABLE_LEVELS > 2
391static inline int pud_none(pud_t pud)
392{
393 return native_pud_val(pud) == 0;
394}
395
396static inline int pud_present(pud_t pud)
397{
398 return pud_flags(pud) & _PAGE_PRESENT;
399}
400
401static inline unsigned long pud_page_vaddr(pud_t pud)
402{
403 return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
404}
405
406/*
407 * Currently stuck as a macro due to indirect forward reference to
408 * linux/mmzone.h's __section_mem_map_addr() definition:
409 */
410#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
411
412/* Find an entry in the second-level page table.. */
413static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
414{
415 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
416}
417
418static inline unsigned long pmd_pfn(pmd_t pmd)
419{
420 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
421}
422
423static inline int pud_large(pud_t pud)
424{
425 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
426 (_PAGE_PSE | _PAGE_PRESENT);
427}
428
429static inline int pud_bad(pud_t pud)
430{
431 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
432}
450#else 433#else
451# include "pgtable_64.h" 434static inline int pud_large(pud_t pud)
452#endif 435{
436 return 0;
437}
438#endif /* PAGETABLE_LEVELS > 2 */
439
440#if PAGETABLE_LEVELS > 3
441static inline int pgd_present(pgd_t pgd)
442{
443 return pgd_flags(pgd) & _PAGE_PRESENT;
444}
445
446static inline unsigned long pgd_page_vaddr(pgd_t pgd)
447{
448 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
449}
450
451/*
452 * Currently stuck as a macro due to indirect forward reference to
453 * linux/mmzone.h's __section_mem_map_addr() definition:
454 */
455#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
456
457/* to find an entry in a page-table-directory. */
458static inline unsigned pud_index(unsigned long address)
459{
460 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
461}
462
463static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
464{
465 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
466}
467
468static inline int pgd_bad(pgd_t pgd)
469{
470 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
471}
472
473static inline int pgd_none(pgd_t pgd)
474{
475 return !native_pgd_val(pgd);
476}
477#endif /* PAGETABLE_LEVELS > 3 */
478
479#endif /* __ASSEMBLY__ */
453 480
454/* 481/*
455 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] 482 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
@@ -476,28 +503,6 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
476 503
477#ifndef __ASSEMBLY__ 504#ifndef __ASSEMBLY__
478 505
479enum {
480 PG_LEVEL_NONE,
481 PG_LEVEL_4K,
482 PG_LEVEL_2M,
483 PG_LEVEL_1G,
484 PG_LEVEL_NUM
485};
486
487#ifdef CONFIG_PROC_FS
488extern void update_page_count(int level, unsigned long pages);
489#else
490static inline void update_page_count(int level, unsigned long pages) { }
491#endif
492
493/*
494 * Helper function that returns the kernel pagetable entry controlling
495 * the virtual address 'address'. NULL means no pagetable entry present.
496 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
497 * as a pte too.
498 */
499extern pte_t *lookup_address(unsigned long address, unsigned int *level);
500
501/* local pte updates need not use xchg for locking */ 506/* local pte updates need not use xchg for locking */
502static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) 507static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
503{ 508{
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 72b020deb46b..31bd120cf2a2 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_X86_PGTABLE_32_H 1#ifndef _ASM_X86_PGTABLE_32_H
2#define _ASM_X86_PGTABLE_32_H 2#define _ASM_X86_PGTABLE_32_H
3 3
4#include <asm/pgtable_32_types.h>
4 5
5/* 6/*
6 * The Linux memory management assumes a three-level page table setup. On 7 * The Linux memory management assumes a three-level page table setup. On
@@ -33,47 +34,6 @@ void paging_init(void);
33 34
34extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); 35extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
35 36
36/*
37 * The Linux x86 paging architecture is 'compile-time dual-mode', it
38 * implements both the traditional 2-level x86 page tables and the
39 * newer 3-level PAE-mode page tables.
40 */
41#ifdef CONFIG_X86_PAE
42# include <asm/pgtable-3level-defs.h>
43# define PMD_SIZE (1UL << PMD_SHIFT)
44# define PMD_MASK (~(PMD_SIZE - 1))
45#else
46# include <asm/pgtable-2level-defs.h>
47#endif
48
49#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
50#define PGDIR_MASK (~(PGDIR_SIZE - 1))
51
52/* Just any arbitrary offset to the start of the vmalloc VM area: the
53 * current 8MB value just means that there will be a 8MB "hole" after the
54 * physical memory until the kernel virtual memory starts. That means that
55 * any out-of-bounds memory accesses will hopefully be caught.
56 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
57 * area for the same reason. ;)
58 */
59#define VMALLOC_OFFSET (8 * 1024 * 1024)
60#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
61#ifdef CONFIG_X86_PAE
62#define LAST_PKMAP 512
63#else
64#define LAST_PKMAP 1024
65#endif
66
67#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
68 & PMD_MASK)
69
70#ifdef CONFIG_HIGHMEM
71# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
72#else
73# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
74#endif
75
76#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
77 37
78/* 38/*
79 * Define this if things work differently on an i386 and an i486: 39 * Define this if things work differently on an i386 and an i486:
@@ -82,58 +42,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
82 */ 42 */
83#undef TEST_ACCESS_OK 43#undef TEST_ACCESS_OK
84 44
85/* The boot page tables (all created as a single array) */
86extern unsigned long pg0[];
87
88#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
89
90/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
91#define pmd_none(x) (!(unsigned long)pmd_val((x)))
92#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
93#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
94
95#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
96
97#ifdef CONFIG_X86_PAE 45#ifdef CONFIG_X86_PAE
98# include <asm/pgtable-3level.h> 46# include <asm/pgtable-3level.h>
99#else 47#else
100# include <asm/pgtable-2level.h> 48# include <asm/pgtable-2level.h>
101#endif 49#endif
102 50
103/*
104 * Conversion functions: convert a page and protection to a page entry,
105 * and a page entry and page directory to the page they refer to.
106 */
107#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
108
109
110static inline int pud_large(pud_t pud) { return 0; }
111
112/*
113 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
114 *
115 * this macro returns the index of the entry in the pmd page which would
116 * control the given virtual address
117 */
118#define pmd_index(address) \
119 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
120
121/*
122 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
123 *
124 * this macro returns the index of the entry in the pte page which would
125 * control the given virtual address
126 */
127#define pte_index(address) \
128 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
129#define pte_offset_kernel(dir, address) \
130 ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address)))
131
132#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
133
134#define pmd_page_vaddr(pmd) \
135 ((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK))
136
137#if defined(CONFIG_HIGHPTE) 51#if defined(CONFIG_HIGHPTE)
138#define pte_offset_map(dir, address) \ 52#define pte_offset_map(dir, address) \
139 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \ 53 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \
@@ -176,7 +90,4 @@ do { \
176#define kern_addr_valid(kaddr) (0) 90#define kern_addr_valid(kaddr) (0)
177#endif 91#endif
178 92
179#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
180 remap_pfn_range(vma, vaddr, pfn, size, prot)
181
182#endif /* _ASM_X86_PGTABLE_32_H */ 93#endif /* _ASM_X86_PGTABLE_32_H */
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
new file mode 100644
index 000000000000..2733fad45f98
--- /dev/null
+++ b/arch/x86/include/asm/pgtable_32_types.h
@@ -0,0 +1,51 @@
1#ifndef _ASM_X86_PGTABLE_32_DEFS_H
2#define _ASM_X86_PGTABLE_32_DEFS_H
3
4/*
5 * The Linux x86 paging architecture is 'compile-time dual-mode', it
6 * implements both the traditional 2-level x86 page tables and the
7 * newer 3-level PAE-mode page tables.
8 */
9#ifdef CONFIG_X86_PAE
10# include <asm/pgtable-3level_types.h>
11# define PMD_SIZE (1UL << PMD_SHIFT)
12# define PMD_MASK (~(PMD_SIZE - 1))
13#else
14# include <asm/pgtable-2level_types.h>
15#endif
16
17#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
18#define PGDIR_MASK (~(PGDIR_SIZE - 1))
19
20/* Just any arbitrary offset to the start of the vmalloc VM area: the
21 * current 8MB value just means that there will be a 8MB "hole" after the
22 * physical memory until the kernel virtual memory starts. That means that
23 * any out-of-bounds memory accesses will hopefully be caught.
24 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
25 * area for the same reason. ;)
26 */
27#define VMALLOC_OFFSET (8 * 1024 * 1024)
28
29#ifndef __ASSEMBLER__
30extern bool __vmalloc_start_set; /* set once high_memory is set */
31#endif
32
33#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
34#ifdef CONFIG_X86_PAE
35#define LAST_PKMAP 512
36#else
37#define LAST_PKMAP 1024
38#endif
39
40#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
41 & PMD_MASK)
42
43#ifdef CONFIG_HIGHMEM
44# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
45#else
46# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
47#endif
48
49#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
50
51#endif /* _ASM_X86_PGTABLE_32_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index ba09289accaa..6b87bc6d5018 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -2,6 +2,8 @@
2#define _ASM_X86_PGTABLE_64_H 2#define _ASM_X86_PGTABLE_64_H
3 3
4#include <linux/const.h> 4#include <linux/const.h>
5#include <asm/pgtable_64_types.h>
6
5#ifndef __ASSEMBLY__ 7#ifndef __ASSEMBLY__
6 8
7/* 9/*
@@ -11,7 +13,6 @@
11#include <asm/processor.h> 13#include <asm/processor.h>
12#include <linux/bitops.h> 14#include <linux/bitops.h>
13#include <linux/threads.h> 15#include <linux/threads.h>
14#include <asm/pda.h>
15 16
16extern pud_t level3_kernel_pgt[512]; 17extern pud_t level3_kernel_pgt[512];
17extern pud_t level3_ident_pgt[512]; 18extern pud_t level3_ident_pgt[512];
@@ -26,32 +27,6 @@ extern void paging_init(void);
26 27
27#endif /* !__ASSEMBLY__ */ 28#endif /* !__ASSEMBLY__ */
28 29
29#define SHARED_KERNEL_PMD 0
30
31/*
32 * PGDIR_SHIFT determines what a top-level page table entry can map
33 */
34#define PGDIR_SHIFT 39
35#define PTRS_PER_PGD 512
36
37/*
38 * 3rd level page
39 */
40#define PUD_SHIFT 30
41#define PTRS_PER_PUD 512
42
43/*
44 * PMD_SHIFT determines the size of the area a middle-level
45 * page table can map
46 */
47#define PMD_SHIFT 21
48#define PTRS_PER_PMD 512
49
50/*
51 * entries per page directory level
52 */
53#define PTRS_PER_PTE 512
54
55#ifndef __ASSEMBLY__ 30#ifndef __ASSEMBLY__
56 31
57#define pte_ERROR(e) \ 32#define pte_ERROR(e) \
@@ -67,9 +42,6 @@ extern void paging_init(void);
67 printk("%s:%d: bad pgd %p(%016lx).\n", \ 42 printk("%s:%d: bad pgd %p(%016lx).\n", \
68 __FILE__, __LINE__, &(e), pgd_val(e)) 43 __FILE__, __LINE__, &(e), pgd_val(e))
69 44
70#define pgd_none(x) (!pgd_val(x))
71#define pud_none(x) (!pud_val(x))
72
73struct mm_struct; 45struct mm_struct;
74 46
75void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); 47void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
@@ -134,48 +106,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
134 native_set_pgd(pgd, native_make_pgd(0)); 106 native_set_pgd(pgd, native_make_pgd(0));
135} 107}
136 108
137#define pte_same(a, b) ((a).pte == (b).pte)
138
139#endif /* !__ASSEMBLY__ */
140
141#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
142#define PMD_MASK (~(PMD_SIZE - 1))
143#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
144#define PUD_MASK (~(PUD_SIZE - 1))
145#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
146#define PGDIR_MASK (~(PGDIR_SIZE - 1))
147
148
149#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
150#define VMALLOC_START _AC(0xffffc20000000000, UL)
151#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
152#define VMEMMAP_START _AC(0xffffe20000000000, UL)
153#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
154#define MODULES_END _AC(0xffffffffff000000, UL)
155#define MODULES_LEN (MODULES_END - MODULES_VADDR)
156
157#ifndef __ASSEMBLY__
158
159static inline int pgd_bad(pgd_t pgd)
160{
161 return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
162}
163
164static inline int pud_bad(pud_t pud)
165{
166 return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
167}
168
169static inline int pmd_bad(pmd_t pmd)
170{
171 return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
172}
173
174#define pte_none(x) (!pte_val((x)))
175#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
176
177#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
178
179/* 109/*
180 * Conversion functions: convert a page and protection to a page entry, 110 * Conversion functions: convert a page and protection to a page entry,
181 * and a page entry and page directory to the page they refer to. 111 * and a page entry and page directory to the page they refer to.
@@ -184,41 +114,12 @@ static inline int pmd_bad(pmd_t pmd)
184/* 114/*
185 * Level 4 access. 115 * Level 4 access.
186 */ 116 */
187#define pgd_page_vaddr(pgd) \
188 ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK))
189#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
190#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
191static inline int pgd_large(pgd_t pgd) { return 0; } 117static inline int pgd_large(pgd_t pgd) { return 0; }
192#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) 118#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
193 119
194/* PUD - Level3 access */ 120/* PUD - Level3 access */
195/* to find an entry in a page-table-directory. */
196#define pud_page_vaddr(pud) \
197 ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK))
198#define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT))
199#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
200#define pud_offset(pgd, address) \
201 ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address)))
202#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT)
203
204static inline int pud_large(pud_t pte)
205{
206 return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
207 (_PAGE_PSE | _PAGE_PRESENT);
208}
209 121
210/* PMD - Level 2 access */ 122/* PMD - Level 2 access */
211#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK))
212#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
213
214#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
215#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \
216 pmd_index(address))
217#define pmd_none(x) (!pmd_val((x)))
218#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
219#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot))))
220#define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
221
222#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) 123#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
223#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \ 124#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
224 _PAGE_FILE }) 125 _PAGE_FILE })
@@ -226,13 +127,6 @@ static inline int pud_large(pud_t pte)
226 127
227/* PTE - Level 1 access. */ 128/* PTE - Level 1 access. */
228 129
229/* page, protection -> pte */
230#define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot))
231
232#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
233#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
234 pte_index((address)))
235
236/* x86-64 always has all page tables mapped. */ 130/* x86-64 always has all page tables mapped. */
237#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) 131#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
238#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address)) 132#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
@@ -266,9 +160,6 @@ extern int direct_gbpages;
266extern int kern_addr_valid(unsigned long addr); 160extern int kern_addr_valid(unsigned long addr);
267extern void cleanup_highmap(void); 161extern void cleanup_highmap(void);
268 162
269#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
270 remap_pfn_range(vma, vaddr, pfn, size, prot)
271
272#define HAVE_ARCH_UNMAPPED_AREA 163#define HAVE_ARCH_UNMAPPED_AREA
273#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 164#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
274 165
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
new file mode 100644
index 000000000000..fbf42b8e0383
--- /dev/null
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -0,0 +1,63 @@
1#ifndef _ASM_X86_PGTABLE_64_DEFS_H
2#define _ASM_X86_PGTABLE_64_DEFS_H
3
4#ifndef __ASSEMBLY__
5#include <linux/types.h>
6
7/*
8 * These are used to make use of C type-checking..
9 */
10typedef unsigned long pteval_t;
11typedef unsigned long pmdval_t;
12typedef unsigned long pudval_t;
13typedef unsigned long pgdval_t;
14typedef unsigned long pgprotval_t;
15
16typedef struct { pteval_t pte; } pte_t;
17
18#endif /* !__ASSEMBLY__ */
19
20#define SHARED_KERNEL_PMD 0
21#define PAGETABLE_LEVELS 4
22
23/*
24 * PGDIR_SHIFT determines what a top-level page table entry can map
25 */
26#define PGDIR_SHIFT 39
27#define PTRS_PER_PGD 512
28
29/*
30 * 3rd level page
31 */
32#define PUD_SHIFT 30
33#define PTRS_PER_PUD 512
34
35/*
36 * PMD_SHIFT determines the size of the area a middle-level
37 * page table can map
38 */
39#define PMD_SHIFT 21
40#define PTRS_PER_PMD 512
41
42/*
43 * entries per page directory level
44 */
45#define PTRS_PER_PTE 512
46
47#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
48#define PMD_MASK (~(PMD_SIZE - 1))
49#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
50#define PUD_MASK (~(PUD_SIZE - 1))
51#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
52#define PGDIR_MASK (~(PGDIR_SIZE - 1))
53
54
55#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
56#define VMALLOC_START _AC(0xffffc20000000000, UL)
57#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
58#define VMEMMAP_START _AC(0xffffe20000000000, UL)
59#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
60#define MODULES_END _AC(0xffffffffff000000, UL)
61#define MODULES_LEN (MODULES_END - MODULES_VADDR)
62
63#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
new file mode 100644
index 000000000000..b8238dc8786d
--- /dev/null
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -0,0 +1,329 @@
1#ifndef _ASM_X86_PGTABLE_DEFS_H
2#define _ASM_X86_PGTABLE_DEFS_H
3
4#include <linux/const.h>
5#include <asm/page_types.h>
6
7#define FIRST_USER_ADDRESS 0
8
9#define _PAGE_BIT_PRESENT 0 /* is present */
10#define _PAGE_BIT_RW 1 /* writeable */
11#define _PAGE_BIT_USER 2 /* userspace addressable */
12#define _PAGE_BIT_PWT 3 /* page write through */
13#define _PAGE_BIT_PCD 4 /* page cache disabled */
14#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
15#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
16#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
17#define _PAGE_BIT_PAT 7 /* on 4KB pages */
18#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
19#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
20#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
21#define _PAGE_BIT_UNUSED3 11
22#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
23#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
24#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
25#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
26
27/* If _PAGE_BIT_PRESENT is clear, we use these: */
28/* - if the user mapped it with PROT_NONE; pte_present gives true */
29#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
30/* - set: nonlinear file mapping, saved PTE; unset:swap */
31#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
32
33#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
34#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
35#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
36#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
37#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
38#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
39#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
40#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
41#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
42#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
43#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
44#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
45#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
46#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
47#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
48#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
49#define __HAVE_ARCH_PTE_SPECIAL
50
51#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
52#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
53#else
54#define _PAGE_NX (_AT(pteval_t, 0))
55#endif
56
57#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
58#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
59
60#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
61 _PAGE_ACCESSED | _PAGE_DIRTY)
62#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
63 _PAGE_DIRTY)
64
65/* Set of bits not changed in pte_modify */
66#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
67 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
68
69#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
70#define _PAGE_CACHE_WB (0)
71#define _PAGE_CACHE_WC (_PAGE_PWT)
72#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
73#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
74
75#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
76#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
77 _PAGE_ACCESSED | _PAGE_NX)
78
79#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
80 _PAGE_USER | _PAGE_ACCESSED)
81#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
82 _PAGE_ACCESSED | _PAGE_NX)
83#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
84 _PAGE_ACCESSED)
85#define PAGE_COPY PAGE_COPY_NOEXEC
86#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
87 _PAGE_ACCESSED | _PAGE_NX)
88#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
89 _PAGE_ACCESSED)
90
91#define __PAGE_KERNEL_EXEC \
92 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
93#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
94
95#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
96#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
97#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
98#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
99#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
100#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
101#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
102#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
103#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
104#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
105#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
106
107#define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
108#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
109#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
110#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
111
112#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
113#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
114#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
115#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
116#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
117#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
118#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
119#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
120#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
121#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
122#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
123#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
124#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
125
126#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
127#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
128#define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
129#define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
130
131/* xwr */
132#define __P000 PAGE_NONE
133#define __P001 PAGE_READONLY
134#define __P010 PAGE_COPY
135#define __P011 PAGE_COPY
136#define __P100 PAGE_READONLY_EXEC
137#define __P101 PAGE_READONLY_EXEC
138#define __P110 PAGE_COPY_EXEC
139#define __P111 PAGE_COPY_EXEC
140
141#define __S000 PAGE_NONE
142#define __S001 PAGE_READONLY
143#define __S010 PAGE_SHARED
144#define __S011 PAGE_SHARED
145#define __S100 PAGE_READONLY_EXEC
146#define __S101 PAGE_READONLY_EXEC
147#define __S110 PAGE_SHARED_EXEC
148#define __S111 PAGE_SHARED_EXEC
149
150/*
151 * early identity mapping pte attrib macros.
152 */
153#ifdef CONFIG_X86_64
154#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
155#else
156/*
157 * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
158 * bits are combined, this will alow user to access the high address mapped
159 * VDSO in the presence of CONFIG_COMPAT_VDSO
160 */
161#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
162#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
163#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
164#endif
165
166#ifdef CONFIG_X86_32
167# include "pgtable_32_types.h"
168#else
169# include "pgtable_64_types.h"
170#endif
171
172#ifndef __ASSEMBLY__
173
174#include <linux/types.h>
175
176/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
177#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
178
179/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
180#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
181
182typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
183
184typedef struct { pgdval_t pgd; } pgd_t;
185
186static inline pgd_t native_make_pgd(pgdval_t val)
187{
188 return (pgd_t) { val };
189}
190
191static inline pgdval_t native_pgd_val(pgd_t pgd)
192{
193 return pgd.pgd;
194}
195
196static inline pgdval_t pgd_flags(pgd_t pgd)
197{
198 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
199}
200
201#if PAGETABLE_LEVELS > 3
202typedef struct { pudval_t pud; } pud_t;
203
204static inline pud_t native_make_pud(pmdval_t val)
205{
206 return (pud_t) { val };
207}
208
209static inline pudval_t native_pud_val(pud_t pud)
210{
211 return pud.pud;
212}
213#else
214#include <asm-generic/pgtable-nopud.h>
215
216static inline pudval_t native_pud_val(pud_t pud)
217{
218 return native_pgd_val(pud.pgd);
219}
220#endif
221
222#if PAGETABLE_LEVELS > 2
223typedef struct { pmdval_t pmd; } pmd_t;
224
225static inline pmd_t native_make_pmd(pmdval_t val)
226{
227 return (pmd_t) { val };
228}
229
230static inline pmdval_t native_pmd_val(pmd_t pmd)
231{
232 return pmd.pmd;
233}
234#else
235#include <asm-generic/pgtable-nopmd.h>
236
237static inline pmdval_t native_pmd_val(pmd_t pmd)
238{
239 return native_pgd_val(pmd.pud.pgd);
240}
241#endif
242
243static inline pudval_t pud_flags(pud_t pud)
244{
245 return native_pud_val(pud) & PTE_FLAGS_MASK;
246}
247
248static inline pmdval_t pmd_flags(pmd_t pmd)
249{
250 return native_pmd_val(pmd) & PTE_FLAGS_MASK;
251}
252
253static inline pte_t native_make_pte(pteval_t val)
254{
255 return (pte_t) { .pte = val };
256}
257
258static inline pteval_t native_pte_val(pte_t pte)
259{
260 return pte.pte;
261}
262
263static inline pteval_t pte_flags(pte_t pte)
264{
265 return native_pte_val(pte) & PTE_FLAGS_MASK;
266}
267
268#define pgprot_val(x) ((x).pgprot)
269#define __pgprot(x) ((pgprot_t) { (x) } )
270
271
272typedef struct page *pgtable_t;
273
274extern pteval_t __supported_pte_mask;
275extern int nx_enabled;
276extern void set_nx(void);
277
278#define pgprot_writecombine pgprot_writecombine
279extern pgprot_t pgprot_writecombine(pgprot_t prot);
280
281/* Indicate that x86 has its own track and untrack pfn vma functions */
282#define __HAVE_PFNMAP_TRACKING
283
284#define __HAVE_PHYS_MEM_ACCESS_PROT
285struct file;
286pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
287 unsigned long size, pgprot_t vma_prot);
288int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
289 unsigned long size, pgprot_t *vma_prot);
290
291/* Install a pte for a particular vaddr in kernel space. */
292void set_pte_vaddr(unsigned long vaddr, pte_t pte);
293
294#ifdef CONFIG_X86_32
295extern void native_pagetable_setup_start(pgd_t *base);
296extern void native_pagetable_setup_done(pgd_t *base);
297#else
298static inline void native_pagetable_setup_start(pgd_t *base) {}
299static inline void native_pagetable_setup_done(pgd_t *base) {}
300#endif
301
302struct seq_file;
303extern void arch_report_meminfo(struct seq_file *m);
304
305enum {
306 PG_LEVEL_NONE,
307 PG_LEVEL_4K,
308 PG_LEVEL_2M,
309 PG_LEVEL_1G,
310 PG_LEVEL_NUM
311};
312
313#ifdef CONFIG_PROC_FS
314extern void update_page_count(int level, unsigned long pages);
315#else
316static inline void update_page_count(int level, unsigned long pages) { }
317#endif
318
319/*
320 * Helper function that returns the kernel pagetable entry controlling
321 * the virtual address 'address'. NULL means no pagetable entry present.
322 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
323 * as a pte too.
324 */
325extern pte_t *lookup_address(unsigned long address, unsigned int *level);
326
327#endif /* !__ASSEMBLY__ */
328
329#endif /* _ASM_X86_PGTABLE_DEFS_H */
diff --git a/arch/x86/include/asm/prctl.h b/arch/x86/include/asm/prctl.h
index a8894647dd9a..3ac5032fae09 100644
--- a/arch/x86/include/asm/prctl.h
+++ b/arch/x86/include/asm/prctl.h
@@ -6,8 +6,4 @@
6#define ARCH_GET_FS 0x1003 6#define ARCH_GET_FS 0x1003
7#define ARCH_GET_GS 0x1004 7#define ARCH_GET_GS 0x1004
8 8
9#ifdef CONFIG_X86_64
10extern long sys_arch_prctl(int, unsigned long);
11#endif /* CONFIG_X86_64 */
12
13#endif /* _ASM_X86_PRCTL_H */ 9#endif /* _ASM_X86_PRCTL_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 3bfd5235a9eb..ae85a8d66a30 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -16,6 +16,7 @@ struct mm_struct;
16#include <asm/cpufeature.h> 16#include <asm/cpufeature.h>
17#include <asm/system.h> 17#include <asm/system.h>
18#include <asm/page.h> 18#include <asm/page.h>
19#include <asm/pgtable_types.h>
19#include <asm/percpu.h> 20#include <asm/percpu.h>
20#include <asm/msr.h> 21#include <asm/msr.h>
21#include <asm/desc_defs.h> 22#include <asm/desc_defs.h>
@@ -73,10 +74,10 @@ struct cpuinfo_x86 {
73 char pad0; 74 char pad0;
74#else 75#else
75 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 76 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
76 int x86_tlbsize; 77 int x86_tlbsize;
78#endif
77 __u8 x86_virt_bits; 79 __u8 x86_virt_bits;
78 __u8 x86_phys_bits; 80 __u8 x86_phys_bits;
79#endif
80 /* CPUID returned core id bits: */ 81 /* CPUID returned core id bits: */
81 __u8 x86_coreid_bits; 82 __u8 x86_coreid_bits;
82 /* Max extended CPUID function supported: */ 83 /* Max extended CPUID function supported: */
@@ -247,7 +248,6 @@ struct x86_hw_tss {
247#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) 248#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
248#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) 249#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
249#define INVALID_IO_BITMAP_OFFSET 0x8000 250#define INVALID_IO_BITMAP_OFFSET 0x8000
250#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
251 251
252struct tss_struct { 252struct tss_struct {
253 /* 253 /*
@@ -262,11 +262,6 @@ struct tss_struct {
262 * be within the limit. 262 * be within the limit.
263 */ 263 */
264 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 264 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
265 /*
266 * Cache the current maximum and the last task that used the bitmap:
267 */
268 unsigned long io_bitmap_max;
269 struct thread_struct *io_bitmap_owner;
270 265
271 /* 266 /*
272 * .. and then another 0x100 bytes for the emergency kernel stack: 267 * .. and then another 0x100 bytes for the emergency kernel stack:
@@ -378,9 +373,33 @@ union thread_xstate {
378 373
379#ifdef CONFIG_X86_64 374#ifdef CONFIG_X86_64
380DECLARE_PER_CPU(struct orig_ist, orig_ist); 375DECLARE_PER_CPU(struct orig_ist, orig_ist);
376
377union irq_stack_union {
378 char irq_stack[IRQ_STACK_SIZE];
379 /*
380 * GCC hardcodes the stack canary as %gs:40. Since the
381 * irq_stack is the object at %gs:0, we reserve the bottom
382 * 48 bytes of the irq stack for the canary.
383 */
384 struct {
385 char gs_base[40];
386 unsigned long stack_canary;
387 };
388};
389
390DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
391DECLARE_INIT_PER_CPU(irq_stack_union);
392
393DECLARE_PER_CPU(char *, irq_stack_ptr);
394DECLARE_PER_CPU(unsigned int, irq_count);
395extern unsigned long kernel_eflags;
396extern asmlinkage void ignore_sysret(void);
397#else /* X86_64 */
398#ifdef CONFIG_CC_STACKPROTECTOR
399DECLARE_PER_CPU(unsigned long, stack_canary);
381#endif 400#endif
401#endif /* X86_64 */
382 402
383extern void print_cpu_info(struct cpuinfo_x86 *);
384extern unsigned int xstate_size; 403extern unsigned int xstate_size;
385extern void free_thread_xstate(struct task_struct *); 404extern void free_thread_xstate(struct task_struct *);
386extern struct kmem_cache *task_xstate_cachep; 405extern struct kmem_cache *task_xstate_cachep;
@@ -752,9 +771,9 @@ extern int sysenter_setup(void);
752extern struct desc_ptr early_gdt_descr; 771extern struct desc_ptr early_gdt_descr;
753 772
754extern void cpu_set_gdt(int); 773extern void cpu_set_gdt(int);
755extern void switch_to_new_gdt(void); 774extern void switch_to_new_gdt(int);
775extern void load_percpu_segment(int);
756extern void cpu_init(void); 776extern void cpu_init(void);
757extern void init_gdt(int cpu);
758 777
759static inline unsigned long get_debugctlmsr(void) 778static inline unsigned long get_debugctlmsr(void)
760{ 779{
@@ -839,6 +858,7 @@ static inline void spin_lock_prefetch(const void *x)
839 * User space process size: 3GB (default). 858 * User space process size: 3GB (default).
840 */ 859 */
841#define TASK_SIZE PAGE_OFFSET 860#define TASK_SIZE PAGE_OFFSET
861#define TASK_SIZE_MAX TASK_SIZE
842#define STACK_TOP TASK_SIZE 862#define STACK_TOP TASK_SIZE
843#define STACK_TOP_MAX STACK_TOP 863#define STACK_TOP_MAX STACK_TOP
844 864
@@ -898,7 +918,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
898/* 918/*
899 * User space process size. 47bits minus one guard page. 919 * User space process size. 47bits minus one guard page.
900 */ 920 */
901#define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE) 921#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
902 922
903/* This decides where the kernel will search for a free chunk of vm 923/* This decides where the kernel will search for a free chunk of vm
904 * space during mmap's. 924 * space during mmap's.
@@ -907,12 +927,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
907 0xc0000000 : 0xFFFFe000) 927 0xc0000000 : 0xFFFFe000)
908 928
909#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ 929#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
910 IA32_PAGE_OFFSET : TASK_SIZE64) 930 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
911#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ 931#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
912 IA32_PAGE_OFFSET : TASK_SIZE64) 932 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
913 933
914#define STACK_TOP TASK_SIZE 934#define STACK_TOP TASK_SIZE
915#define STACK_TOP_MAX TASK_SIZE64 935#define STACK_TOP_MAX TASK_SIZE_MAX
916 936
917#define INIT_THREAD { \ 937#define INIT_THREAD { \
918 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 938 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index d6a22f92ba77..49fb3ecf3bb3 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -18,11 +18,7 @@ extern void syscall32_cpu_init(void);
18 18
19extern void check_efer(void); 19extern void check_efer(void);
20 20
21#ifdef CONFIG_X86_BIOS_REBOOT
22extern int reboot_force; 21extern int reboot_force;
23#else
24static const int reboot_force = 0;
25#endif
26 22
27long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); 23long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
28 24
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 6d34d954c228..e304b66abeea 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -28,7 +28,7 @@ struct pt_regs {
28 int xds; 28 int xds;
29 int xes; 29 int xes;
30 int xfs; 30 int xfs;
31 /* int gs; */ 31 int xgs;
32 long orig_eax; 32 long orig_eax;
33 long eip; 33 long eip;
34 int xcs; 34 int xcs;
@@ -50,7 +50,7 @@ struct pt_regs {
50 unsigned long ds; 50 unsigned long ds;
51 unsigned long es; 51 unsigned long es;
52 unsigned long fs; 52 unsigned long fs;
53 /* int gs; */ 53 unsigned long gs;
54 unsigned long orig_ax; 54 unsigned long orig_ax;
55 unsigned long ip; 55 unsigned long ip;
56 unsigned long cs; 56 unsigned long cs;
diff --git a/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h b/arch/x86/include/asm/rdc321x_defs.h
index c8e9c8bed3d0..c8e9c8bed3d0 100644
--- a/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h
+++ b/arch/x86/include/asm/rdc321x_defs.h
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index 2b8c5160388f..1b7ee5d673c2 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -1 +1,8 @@
1#ifndef _ASM_X86_SECTIONS_H
2#define _ASM_X86_SECTIONS_H
3
1#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5
6extern char __brk_base[], __brk_limit[];
7
8#endif /* _ASM_X86_SECTIONS_H */
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 1dc1b51ac623..14e0ed86a6f9 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -61,7 +61,7 @@
61 * 61 *
62 * 26 - ESPFIX small SS 62 * 26 - ESPFIX small SS
63 * 27 - per-cpu [ offset to per-cpu data area ] 63 * 27 - per-cpu [ offset to per-cpu data area ]
64 * 28 - unused 64 * 28 - stack_canary-20 [ for stack protector ]
65 * 29 - unused 65 * 29 - unused
66 * 30 - unused 66 * 30 - unused
67 * 31 - TSS for double fault handler 67 * 31 - TSS for double fault handler
@@ -95,6 +95,13 @@
95#define __KERNEL_PERCPU 0 95#define __KERNEL_PERCPU 0
96#endif 96#endif
97 97
98#define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE + 16)
99#ifdef CONFIG_CC_STACKPROTECTOR
100#define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY * 8)
101#else
102#define __KERNEL_STACK_CANARY 0
103#endif
104
98#define GDT_ENTRY_DOUBLEFAULT_TSS 31 105#define GDT_ENTRY_DOUBLEFAULT_TSS 31
99 106
100/* 107/*
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ebe858cdc8a3..bdc2ada05ae0 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -1,33 +1,19 @@
1#ifndef _ASM_X86_SETUP_H 1#ifndef _ASM_X86_SETUP_H
2#define _ASM_X86_SETUP_H 2#define _ASM_X86_SETUP_H
3 3
4#ifdef __KERNEL__
5
4#define COMMAND_LINE_SIZE 2048 6#define COMMAND_LINE_SIZE 2048
5 7
6#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
7 9
8/* Interrupt control for vSMPowered x86_64 systems */
9void vsmp_init(void);
10
11
12void setup_bios_corruption_check(void);
13
14
15#ifdef CONFIG_X86_VISWS
16extern void visws_early_detect(void);
17extern int is_visws_box(void);
18#else
19static inline void visws_early_detect(void) { }
20static inline int is_visws_box(void) { return 0; }
21#endif
22
23extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
24extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip);
25/* 10/*
26 * Any setup quirks to be performed? 11 * Any setup quirks to be performed?
27 */ 12 */
28struct mpc_cpu; 13struct mpc_cpu;
29struct mpc_bus; 14struct mpc_bus;
30struct mpc_oemtable; 15struct mpc_oemtable;
16
31struct x86_quirks { 17struct x86_quirks {
32 int (*arch_pre_time_init)(void); 18 int (*arch_pre_time_init)(void);
33 int (*arch_time_init)(void); 19 int (*arch_time_init)(void);
@@ -43,20 +29,19 @@ struct x86_quirks {
43 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); 29 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
44 void (*mpc_oem_pci_bus)(struct mpc_bus *m); 30 void (*mpc_oem_pci_bus)(struct mpc_bus *m);
45 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable, 31 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
46 unsigned short oemsize); 32 unsigned short oemsize);
47 int (*setup_ioapic_ids)(void); 33 int (*setup_ioapic_ids)(void);
48 int (*update_genapic)(void);
49}; 34};
50 35
51extern struct x86_quirks *x86_quirks; 36extern void x86_quirk_pre_intr_init(void);
52extern unsigned long saved_video_mode; 37extern void x86_quirk_intr_init(void);
53 38
54#ifndef CONFIG_PARAVIRT 39extern void x86_quirk_trap_init(void);
55#define paravirt_post_allocator_init() do {} while (0)
56#endif
57#endif /* __ASSEMBLY__ */
58 40
59#ifdef __KERNEL__ 41extern void x86_quirk_pre_time_init(void);
42extern void x86_quirk_time_init(void);
43
44#endif /* __ASSEMBLY__ */
60 45
61#ifdef __i386__ 46#ifdef __i386__
62 47
@@ -78,6 +63,30 @@ extern unsigned long saved_video_mode;
78#ifndef __ASSEMBLY__ 63#ifndef __ASSEMBLY__
79#include <asm/bootparam.h> 64#include <asm/bootparam.h>
80 65
66/* Interrupt control for vSMPowered x86_64 systems */
67#ifdef CONFIG_X86_64
68void vsmp_init(void);
69#else
70static inline void vsmp_init(void) { }
71#endif
72
73void setup_bios_corruption_check(void);
74
75#ifdef CONFIG_X86_VISWS
76extern void visws_early_detect(void);
77extern int is_visws_box(void);
78#else
79static inline void visws_early_detect(void) { }
80static inline int is_visws_box(void) { return 0; }
81#endif
82
83extern struct x86_quirks *x86_quirks;
84extern unsigned long saved_video_mode;
85
86#ifndef CONFIG_PARAVIRT
87#define paravirt_post_allocator_init() do {} while (0)
88#endif
89
81#ifndef _SETUP 90#ifndef _SETUP
82 91
83/* 92/*
@@ -91,21 +100,51 @@ extern struct boot_params boot_params;
91 */ 100 */
92#define LOWMEMSIZE() (0x9f000) 101#define LOWMEMSIZE() (0x9f000)
93 102
103/* exceedingly early brk-like allocator */
104extern unsigned long _brk_end;
105void *extend_brk(size_t size, size_t align);
106
107/*
108 * Reserve space in the brk section. The name must be unique within
109 * the file, and somewhat descriptive. The size is in bytes. Must be
110 * used at file scope.
111 *
112 * (This uses a temp function to wrap the asm so we can pass it the
113 * size parameter; otherwise we wouldn't be able to. We can't use a
114 * "section" attribute on a normal variable because it always ends up
115 * being @progbits, which ends up allocating space in the vmlinux
116 * executable.)
117 */
118#define RESERVE_BRK(name,sz) \
119 static void __section(.discard) __used \
120 __brk_reservation_fn_##name##__(void) { \
121 asm volatile ( \
122 ".pushsection .brk_reservation,\"aw\",@nobits;" \
123 ".brk." #name ":" \
124 " 1:.skip %c0;" \
125 " .size .brk." #name ", . - 1b;" \
126 " .popsection" \
127 : : "i" (sz)); \
128 }
129
94#ifdef __i386__ 130#ifdef __i386__
95 131
96void __init i386_start_kernel(void); 132void __init i386_start_kernel(void);
97extern void probe_roms(void); 133extern void probe_roms(void);
98 134
99extern unsigned long init_pg_tables_start;
100extern unsigned long init_pg_tables_end;
101
102#else 135#else
103void __init x86_64_init_pda(void);
104void __init x86_64_start_kernel(char *real_mode); 136void __init x86_64_start_kernel(char *real_mode);
105void __init x86_64_start_reservations(char *real_mode_data); 137void __init x86_64_start_reservations(char *real_mode_data);
106 138
107#endif /* __i386__ */ 139#endif /* __i386__ */
108#endif /* _SETUP */ 140#endif /* _SETUP */
141#else
142#define RESERVE_BRK(name,sz) \
143 .pushsection .brk_reservation,"aw",@nobits; \
144.brk.name: \
1451: .skip sz; \
146 .size .brk.name,.-1b; \
147 .popsection
109#endif /* __ASSEMBLY__ */ 148#endif /* __ASSEMBLY__ */
110#endif /* __KERNEL__ */ 149#endif /* __KERNEL__ */
111 150
diff --git a/arch/x86/include/asm/mach-default/setup_arch.h b/arch/x86/include/asm/setup_arch.h
index 38846208b548..38846208b548 100644
--- a/arch/x86/include/asm/mach-default/setup_arch.h
+++ b/arch/x86/include/asm/setup_arch.h
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 19953df61c52..47d0e21f2b9e 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -15,34 +15,8 @@
15# include <asm/io_apic.h> 15# include <asm/io_apic.h>
16# endif 16# endif
17#endif 17#endif
18#include <asm/pda.h>
19#include <asm/thread_info.h> 18#include <asm/thread_info.h>
20 19#include <asm/cpumask.h>
21#ifdef CONFIG_X86_64
22
23extern cpumask_var_t cpu_callin_mask;
24extern cpumask_var_t cpu_callout_mask;
25extern cpumask_var_t cpu_initialized_mask;
26extern cpumask_var_t cpu_sibling_setup_mask;
27
28#else /* CONFIG_X86_32 */
29
30extern cpumask_t cpu_callin_map;
31extern cpumask_t cpu_callout_map;
32extern cpumask_t cpu_initialized;
33extern cpumask_t cpu_sibling_setup_map;
34
35#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
36#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
37#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
38#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
39
40#endif /* CONFIG_X86_32 */
41
42extern void (*mtrr_hook)(void);
43extern void zap_low_mappings(void);
44
45extern int __cpuinit get_local_pda(int cpu);
46 20
47extern int smp_num_siblings; 21extern int smp_num_siblings;
48extern unsigned int num_processors; 22extern unsigned int num_processors;
@@ -50,9 +24,7 @@ extern unsigned int num_processors;
50DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 24DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
51DECLARE_PER_CPU(cpumask_t, cpu_core_map); 25DECLARE_PER_CPU(cpumask_t, cpu_core_map);
52DECLARE_PER_CPU(u16, cpu_llc_id); 26DECLARE_PER_CPU(u16, cpu_llc_id);
53#ifdef CONFIG_X86_32
54DECLARE_PER_CPU(int, cpu_number); 27DECLARE_PER_CPU(int, cpu_number);
55#endif
56 28
57static inline struct cpumask *cpu_sibling_mask(int cpu) 29static inline struct cpumask *cpu_sibling_mask(int cpu)
58{ 30{
@@ -167,8 +139,6 @@ void play_dead_common(void);
167void native_send_call_func_ipi(const struct cpumask *mask); 139void native_send_call_func_ipi(const struct cpumask *mask);
168void native_send_call_func_single_ipi(int cpu); 140void native_send_call_func_single_ipi(int cpu);
169 141
170extern void prefill_possible_map(void);
171
172void smp_store_cpu_info(int id); 142void smp_store_cpu_info(int id);
173#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 143#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
174 144
@@ -177,10 +147,6 @@ static inline int num_booting_cpus(void)
177{ 147{
178 return cpumask_weight(cpu_callout_mask); 148 return cpumask_weight(cpu_callout_mask);
179} 149}
180#else
181static inline void prefill_possible_map(void)
182{
183}
184#endif /* CONFIG_SMP */ 150#endif /* CONFIG_SMP */
185 151
186extern unsigned disabled_cpus __cpuinitdata; 152extern unsigned disabled_cpus __cpuinitdata;
@@ -191,11 +157,11 @@ extern unsigned disabled_cpus __cpuinitdata;
191 * from the initial startup. We map APIC_BASE very early in page_setup(), 157 * from the initial startup. We map APIC_BASE very early in page_setup(),
192 * so this is correct in the x86 case. 158 * so this is correct in the x86 case.
193 */ 159 */
194#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) 160#define raw_smp_processor_id() (percpu_read(cpu_number))
195extern int safe_smp_processor_id(void); 161extern int safe_smp_processor_id(void);
196 162
197#elif defined(CONFIG_X86_64_SMP) 163#elif defined(CONFIG_X86_64_SMP)
198#define raw_smp_processor_id() read_pda(cpunumber) 164#define raw_smp_processor_id() (percpu_read(cpu_number))
199 165
200#define stack_smp_processor_id() \ 166#define stack_smp_processor_id() \
201({ \ 167({ \
@@ -205,10 +171,6 @@ extern int safe_smp_processor_id(void);
205}) 171})
206#define safe_smp_processor_id() smp_processor_id() 172#define safe_smp_processor_id() smp_processor_id()
207 173
208#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
209#define cpu_physical_id(cpu) boot_cpu_physical_apicid
210#define safe_smp_processor_id() 0
211#define stack_smp_processor_id() 0
212#endif 174#endif
213 175
214#ifdef CONFIG_X86_LOCAL_APIC 176#ifdef CONFIG_X86_LOCAL_APIC
@@ -220,28 +182,9 @@ static inline int logical_smp_processor_id(void)
220 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); 182 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
221} 183}
222 184
223#include <mach_apicdef.h>
224static inline unsigned int read_apic_id(void)
225{
226 unsigned int reg;
227
228 reg = *(u32 *)(APIC_BASE + APIC_ID);
229
230 return GET_APIC_ID(reg);
231}
232#endif 185#endif
233 186
234
235# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
236extern int hard_smp_processor_id(void); 187extern int hard_smp_processor_id(void);
237# else
238#include <mach_apicdef.h>
239static inline int hard_smp_processor_id(void)
240{
241 /* we don't want to mark this access volatile - bad code generation */
242 return read_apic_id();
243}
244# endif /* APIC_DEFINITION */
245 188
246#else /* CONFIG_X86_LOCAL_APIC */ 189#else /* CONFIG_X86_LOCAL_APIC */
247 190
@@ -251,11 +194,5 @@ static inline int hard_smp_processor_id(void)
251 194
252#endif /* CONFIG_X86_LOCAL_APIC */ 195#endif /* CONFIG_X86_LOCAL_APIC */
253 196
254#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
255extern unsigned char boot_cpu_id;
256#else
257#define boot_cpu_id 0
258#endif
259
260#endif /* __ASSEMBLY__ */ 197#endif /* __ASSEMBLY__ */
261#endif /* _ASM_X86_SMP_H */ 198#endif /* _ASM_X86_SMP_H */
diff --git a/arch/x86/include/asm/mach-default/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 23bf52103b89..1def60114906 100644
--- a/arch/x86/include/asm/mach-default/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -13,10 +13,10 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
13 CMOS_WRITE(0xa, 0xf); 13 CMOS_WRITE(0xa, 0xf);
14 local_flush_tlb(); 14 local_flush_tlb();
15 pr_debug("1.\n"); 15 pr_debug("1.\n");
16 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = 16 *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) =
17 start_eip >> 4; 17 start_eip >> 4;
18 pr_debug("2.\n"); 18 pr_debug("2.\n");
19 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 19 *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_low)) =
20 start_eip & 0xf; 20 start_eip & 0xf;
21 pr_debug("3.\n"); 21 pr_debug("3.\n");
22} 22}
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
34 */ 34 */
35 CMOS_WRITE(0, 0xf); 35 CMOS_WRITE(0, 0xf);
36 36
37 *((volatile long *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0; 37 *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
38} 38}
39 39
40static inline void __init smpboot_setup_io_apic(void) 40static inline void __init smpboot_setup_io_apic(void)
diff --git a/arch/x86/include/asm/socket.h b/arch/x86/include/asm/socket.h
index 8ab9cc8b2ecc..ca8bf2cd0ba9 100644
--- a/arch/x86/include/asm/socket.h
+++ b/arch/x86/include/asm/socket.h
@@ -54,4 +54,7 @@
54 54
55#define SO_MARK 36 55#define SO_MARK 36
56 56
57#define SO_TIMESTAMPING 37
58#define SCM_TIMESTAMPING SO_TIMESTAMPING
59
57#endif /* _ASM_X86_SOCKET_H */ 60#endif /* _ASM_X86_SOCKET_H */
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 8247e94ac6b1..e5e6caffec87 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; 172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
173} 173}
174 174
175#ifdef CONFIG_PARAVIRT 175#ifndef CONFIG_PARAVIRT
176/*
177 * Define virtualization-friendly old-style lock byte lock, for use in
178 * pv_lock_ops if desired.
179 *
180 * This differs from the pre-2.6.24 spinlock by always using xchgb
181 * rather than decb to take the lock; this allows it to use a
182 * zero-initialized lock structure. It also maintains a 1-byte
183 * contention counter, so that we can implement
184 * __byte_spin_is_contended.
185 */
186struct __byte_spinlock {
187 s8 lock;
188 s8 spinners;
189};
190
191static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
192{
193 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
194 return bl->lock != 0;
195}
196 176
197static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
198{
199 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
200 return bl->spinners != 0;
201}
202
203static inline void __byte_spin_lock(raw_spinlock_t *lock)
204{
205 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
206 s8 val = 1;
207
208 asm("1: xchgb %1, %0\n"
209 " test %1,%1\n"
210 " jz 3f\n"
211 " " LOCK_PREFIX "incb %2\n"
212 "2: rep;nop\n"
213 " cmpb $1, %0\n"
214 " je 2b\n"
215 " " LOCK_PREFIX "decb %2\n"
216 " jmp 1b\n"
217 "3:"
218 : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
219}
220
221static inline int __byte_spin_trylock(raw_spinlock_t *lock)
222{
223 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
224 u8 old = 1;
225
226 asm("xchgb %1,%0"
227 : "+m" (bl->lock), "+q" (old) : : "memory");
228
229 return old == 0;
230}
231
232static inline void __byte_spin_unlock(raw_spinlock_t *lock)
233{
234 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
235 smp_wmb();
236 bl->lock = 0;
237}
238#else /* !CONFIG_PARAVIRT */
239static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 177static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
240{ 178{
241 return __ticket_spin_is_locked(lock); 179 return __ticket_spin_is_locked(lock);
@@ -268,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
268 __raw_spin_lock(lock); 206 __raw_spin_lock(lock);
269} 207}
270 208
271#endif /* CONFIG_PARAVIRT */ 209#endif
272 210
273static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 211static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
274{ 212{
@@ -330,8 +268,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
330{ 268{
331 atomic_t *count = (atomic_t *)lock; 269 atomic_t *count = (atomic_t *)lock;
332 270
333 atomic_dec(count); 271 if (atomic_dec_return(count) >= 0)
334 if (atomic_read(count) >= 0)
335 return 1; 272 return 1;
336 atomic_inc(count); 273 atomic_inc(count);
337 return 0; 274 return 0;
@@ -358,6 +295,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
358 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); 295 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
359} 296}
360 297
298#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
299#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
300
361#define _raw_spin_relax(lock) cpu_relax() 301#define _raw_spin_relax(lock) cpu_relax()
362#define _raw_read_relax(lock) cpu_relax() 302#define _raw_read_relax(lock) cpu_relax()
363#define _raw_write_relax(lock) cpu_relax() 303#define _raw_write_relax(lock) cpu_relax()
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
new file mode 100644
index 000000000000..c2d742c6e15f
--- /dev/null
+++ b/arch/x86/include/asm/stackprotector.h
@@ -0,0 +1,124 @@
1/*
2 * GCC stack protector support.
3 *
4 * Stack protector works by putting predefined pattern at the start of
5 * the stack frame and verifying that it hasn't been overwritten when
6 * returning from the function. The pattern is called stack canary
7 * and unfortunately gcc requires it to be at a fixed offset from %gs.
8 * On x86_64, the offset is 40 bytes and on x86_32 20 bytes. x86_64
9 * and x86_32 use segment registers differently and thus handles this
10 * requirement differently.
11 *
12 * On x86_64, %gs is shared by percpu area and stack canary. All
13 * percpu symbols are zero based and %gs points to the base of percpu
14 * area. The first occupant of the percpu area is always
15 * irq_stack_union which contains stack_canary at offset 40. Userland
16 * %gs is always saved and restored on kernel entry and exit using
17 * swapgs, so stack protector doesn't add any complexity there.
18 *
19 * On x86_32, it's slightly more complicated. As in x86_64, %gs is
20 * used for userland TLS. Unfortunately, some processors are much
21 * slower at loading segment registers with different value when
22 * entering and leaving the kernel, so the kernel uses %fs for percpu
23 * area and manages %gs lazily so that %gs is switched only when
24 * necessary, usually during task switch.
25 *
26 * As gcc requires the stack canary at %gs:20, %gs can't be managed
27 * lazily if stack protector is enabled, so the kernel saves and
28 * restores userland %gs on kernel entry and exit. This behavior is
29 * controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in
30 * system.h to hide the details.
31 */
32
33#ifndef _ASM_STACKPROTECTOR_H
34#define _ASM_STACKPROTECTOR_H 1
35
36#ifdef CONFIG_CC_STACKPROTECTOR
37
38#include <asm/tsc.h>
39#include <asm/processor.h>
40#include <asm/percpu.h>
41#include <asm/system.h>
42#include <asm/desc.h>
43#include <linux/random.h>
44
45/*
46 * 24 byte read-only segment initializer for stack canary. Linker
47 * can't handle the address bit shifting. Address will be set in
48 * head_32 for boot CPU and setup_per_cpu_areas() for others.
49 */
50#define GDT_STACK_CANARY_INIT \
51 [GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } },
52
53/*
54 * Initialize the stackprotector canary value.
55 *
56 * NOTE: this must only be called from functions that never return,
57 * and it must always be inlined.
58 */
59static __always_inline void boot_init_stack_canary(void)
60{
61 u64 canary;
62 u64 tsc;
63
64#ifdef CONFIG_X86_64
65 BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
66#endif
67 /*
68 * We both use the random pool and the current TSC as a source
69 * of randomness. The TSC only matters for very early init,
70 * there it already has some randomness on most systems. Later
71 * on during the bootup the random pool has true entropy too.
72 */
73 get_random_bytes(&canary, sizeof(canary));
74 tsc = __native_read_tsc();
75 canary += tsc + (tsc << 32UL);
76
77 current->stack_canary = canary;
78#ifdef CONFIG_X86_64
79 percpu_write(irq_stack_union.stack_canary, canary);
80#else
81 percpu_write(stack_canary, canary);
82#endif
83}
84
85static inline void setup_stack_canary_segment(int cpu)
86{
87#ifdef CONFIG_X86_32
88 unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20;
89 struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
90 struct desc_struct desc;
91
92 desc = gdt_table[GDT_ENTRY_STACK_CANARY];
93 desc.base0 = canary & 0xffff;
94 desc.base1 = (canary >> 16) & 0xff;
95 desc.base2 = (canary >> 24) & 0xff;
96 write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
97#endif
98}
99
100static inline void load_stack_canary_segment(void)
101{
102#ifdef CONFIG_X86_32
103 asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory");
104#endif
105}
106
107#else /* CC_STACKPROTECTOR */
108
109#define GDT_STACK_CANARY_INIT
110
111/* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */
112
113static inline void setup_stack_canary_segment(int cpu)
114{ }
115
116static inline void load_stack_canary_segment(void)
117{
118#ifdef CONFIG_X86_32
119 asm volatile ("mov %0, %%gs" : : "r" (0));
120#endif
121}
122
123#endif /* CC_STACKPROTECTOR */
124#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
deleted file mode 100644
index 93d2c8667cfe..000000000000
--- a/arch/x86/include/asm/summit/apic.h
+++ /dev/null
@@ -1,202 +0,0 @@
1#ifndef __ASM_SUMMIT_APIC_H
2#define __ASM_SUMMIT_APIC_H
3
4#include <asm/smp.h>
5#include <linux/gfp.h>
6
7#define esr_disable (1)
8#define NO_BALANCE_IRQ (0)
9
10/* In clustered mode, the high nibble of APIC ID is a cluster number.
11 * The low nibble is a 4-bit bitmap. */
12#define XAPIC_DEST_CPUS_SHIFT 4
13#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
14#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
15
16#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
17
18static inline const cpumask_t *target_cpus(void)
19{
20 /* CPU_MASK_ALL (0xff) has undefined behaviour with
21 * dest_LowestPrio mode logical clustered apic interrupt routing
22 * Just start on cpu 0. IRQ balancing will spread load
23 */
24 return &cpumask_of_cpu(0);
25}
26
27#define INT_DELIVERY_MODE (dest_LowestPrio)
28#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
29
30static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
31{
32 return 0;
33}
34
35/* we don't use the phys_cpu_present_map to indicate apicid presence */
36static inline unsigned long check_apicid_present(int bit)
37{
38 return 1;
39}
40
41#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
42
43extern u8 cpu_2_logical_apicid[];
44
45static inline void init_apic_ldr(void)
46{
47 unsigned long val, id;
48 int count = 0;
49 u8 my_id = (u8)hard_smp_processor_id();
50 u8 my_cluster = (u8)apicid_cluster(my_id);
51#ifdef CONFIG_SMP
52 u8 lid;
53 int i;
54
55 /* Create logical APIC IDs by counting CPUs already in cluster. */
56 for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
57 lid = cpu_2_logical_apicid[i];
58 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
59 ++count;
60 }
61#endif
62 /* We only have a 4 wide bitmap in cluster mode. If a deranged
63 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
64 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
65 id = my_cluster | (1UL << count);
66 apic_write(APIC_DFR, APIC_DFR_VALUE);
67 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
68 val |= SET_APIC_LOGICAL_ID(id);
69 apic_write(APIC_LDR, val);
70}
71
72static inline int multi_timer_check(int apic, int irq)
73{
74 return 0;
75}
76
77static inline int apic_id_registered(void)
78{
79 return 1;
80}
81
82static inline void setup_apic_routing(void)
83{
84 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
85 nr_ioapics);
86}
87
88static inline int apicid_to_node(int logical_apicid)
89{
90#ifdef CONFIG_SMP
91 return apicid_2_node[hard_smp_processor_id()];
92#else
93 return 0;
94#endif
95}
96
97/* Mapping from cpu number to logical apicid */
98static inline int cpu_to_logical_apicid(int cpu)
99{
100#ifdef CONFIG_SMP
101 if (cpu >= nr_cpu_ids)
102 return BAD_APICID;
103 return (int)cpu_2_logical_apicid[cpu];
104#else
105 return logical_smp_processor_id();
106#endif
107}
108
109static inline int cpu_present_to_apicid(int mps_cpu)
110{
111 if (mps_cpu < nr_cpu_ids)
112 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
113 else
114 return BAD_APICID;
115}
116
117static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
118{
119 /* For clustered we don't have a good way to do this yet - hack */
120 return physids_promote(0x0F);
121}
122
123static inline physid_mask_t apicid_to_cpu_present(int apicid)
124{
125 return physid_mask_of_physid(0);
126}
127
128static inline void setup_portio_remap(void)
129{
130}
131
132static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
133{
134 return 1;
135}
136
137static inline void enable_apic_mode(void)
138{
139}
140
141static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
142{
143 int num_bits_set;
144 int cpus_found = 0;
145 int cpu;
146 int apicid;
147
148 num_bits_set = cpus_weight(*cpumask);
149 /* Return id to all */
150 if (num_bits_set >= nr_cpu_ids)
151 return (int) 0xFF;
152 /*
153 * The cpus in the mask must all be on the apic cluster. If are not
154 * on the same apicid cluster return default value of TARGET_CPUS.
155 */
156 cpu = first_cpu(*cpumask);
157 apicid = cpu_to_logical_apicid(cpu);
158 while (cpus_found < num_bits_set) {
159 if (cpu_isset(cpu, *cpumask)) {
160 int new_apicid = cpu_to_logical_apicid(cpu);
161 if (apicid_cluster(apicid) !=
162 apicid_cluster(new_apicid)){
163 printk ("%s: Not a valid mask!\n", __func__);
164 return 0xFF;
165 }
166 apicid = apicid | new_apicid;
167 cpus_found++;
168 }
169 cpu++;
170 }
171 return apicid;
172}
173
174static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
175 const struct cpumask *andmask)
176{
177 int apicid = cpu_to_logical_apicid(0);
178 cpumask_var_t cpumask;
179
180 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
181 return apicid;
182
183 cpumask_and(cpumask, inmask, andmask);
184 cpumask_and(cpumask, cpumask, cpu_online_mask);
185 apicid = cpu_mask_to_apicid(cpumask);
186
187 free_cpumask_var(cpumask);
188 return apicid;
189}
190
191/* cpuid returns the value latched in the HW at reset, not the APIC ID
192 * register's value. For any box whose BIOS changes APIC IDs, like
193 * clustered APIC systems, we must use hard_smp_processor_id.
194 *
195 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
196 */
197static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
198{
199 return hard_smp_processor_id() >> index_msb;
200}
201
202#endif /* __ASM_SUMMIT_APIC_H */
diff --git a/arch/x86/include/asm/summit/apicdef.h b/arch/x86/include/asm/summit/apicdef.h
deleted file mode 100644
index f3fbca1f61c1..000000000000
--- a/arch/x86/include/asm/summit/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_SUMMIT_APICDEF_H
2#define __ASM_SUMMIT_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (x>>24)&0xFF;
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h
deleted file mode 100644
index a8a2c24f50cc..000000000000
--- a/arch/x86/include/asm/summit/ipi.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef __ASM_SUMMIT_IPI_H
2#define __ASM_SUMMIT_IPI_H
3
4void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
5void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
6
7static inline void send_IPI_mask(const cpumask_t *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 cpumask_t mask = cpu_online_map;
15 cpu_clear(smp_processor_id(), mask);
16
17 if (!cpus_empty(mask))
18 send_IPI_mask(&mask, vector);
19}
20
21static inline void send_IPI_all(int vector)
22{
23 send_IPI_mask(&cpu_online_map, vector);
24}
25
26#endif /* __ASM_SUMMIT_IPI_H */
diff --git a/arch/x86/include/asm/summit/mpparse.h b/arch/x86/include/asm/summit/mpparse.h
deleted file mode 100644
index 380e86c02363..000000000000
--- a/arch/x86/include/asm/summit/mpparse.h
+++ /dev/null
@@ -1,109 +0,0 @@
1#ifndef __ASM_SUMMIT_MPPARSE_H
2#define __ASM_SUMMIT_MPPARSE_H
3
4#include <asm/tsc.h>
5
6extern int use_cyclone;
7
8#ifdef CONFIG_X86_SUMMIT_NUMA
9extern void setup_summit(void);
10#else
11#define setup_summit() {}
12#endif
13
14static inline int mps_oem_check(struct mpc_table *mpc, char *oem,
15 char *productid)
16{
17 if (!strncmp(oem, "IBM ENSW", 8) &&
18 (!strncmp(productid, "VIGIL SMP", 9)
19 || !strncmp(productid, "EXA", 3)
20 || !strncmp(productid, "RUTHLESS SMP", 12))){
21 mark_tsc_unstable("Summit based system");
22 use_cyclone = 1; /*enable cyclone-timer*/
23 setup_summit();
24 return 1;
25 }
26 return 0;
27}
28
29/* Hook from generic ACPI tables.c */
30static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
31{
32 if (!strncmp(oem_id, "IBM", 3) &&
33 (!strncmp(oem_table_id, "SERVIGIL", 8)
34 || !strncmp(oem_table_id, "EXA", 3))){
35 mark_tsc_unstable("Summit based system");
36 use_cyclone = 1; /*enable cyclone-timer*/
37 setup_summit();
38 return 1;
39 }
40 return 0;
41}
42
43struct rio_table_hdr {
44 unsigned char version; /* Version number of this data structure */
45 /* Version 3 adds chassis_num & WP_index */
46 unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */
47 unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */
48} __attribute__((packed));
49
50struct scal_detail {
51 unsigned char node_id; /* Scalability Node ID */
52 unsigned long CBAR; /* Address of 1MB register space */
53 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
54 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
55 unsigned char port1node; /* Node ID port connected to: 0xFF = None */
56 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
57 unsigned char port2node; /* Node ID port connected to: 0xFF = None */
58 unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */
59 unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */
60} __attribute__((packed));
61
62struct rio_detail {
63 unsigned char node_id; /* RIO Node ID */
64 unsigned long BBAR; /* Address of 1MB register space */
65 unsigned char type; /* Type of device */
66 unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/
67 /* For CYC: Node ID of Twister that owns this CYC */
68 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
69 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
70 unsigned char port1node; /* Node ID port connected to: 0xFF=None */
71 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
72 unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */
73 /* For CYC: 0 */
74 unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */
75 /* = 0 : the XAPIC is not used, ie:*/
76 /* ints fwded to another XAPIC */
77 /* Bits1:7 Reserved */
78 /* For CYC: Bits0:7 Reserved */
79 unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */
80 /* lower slot numbers/PCI bus numbers */
81 /* For CYC: No meaning */
82 unsigned char chassis_num; /* 1 based Chassis number */
83 /* For LookOut WPEGs this field indicates the */
84 /* Expansion Chassis #, enumerated from Boot */
85 /* Node WPEG external port, then Boot Node CYC */
86 /* external port, then Next Vigil chassis WPEG */
87 /* external port, etc. */
88 /* Shared Lookouts have only 1 chassis number (the */
89 /* first one assigned) */
90} __attribute__((packed));
91
92
93typedef enum {
94 CompatTwister = 0, /* Compatibility Twister */
95 AltTwister = 1, /* Alternate Twister of internal 8-way */
96 CompatCyclone = 2, /* Compatibility Cyclone */
97 AltCyclone = 3, /* Alternate Cyclone of internal 8-way */
98 CompatWPEG = 4, /* Compatibility WPEG */
99 AltWPEG = 5, /* Second Planar WPEG */
100 LookOutAWPEG = 6, /* LookOut WPEG */
101 LookOutBWPEG = 7, /* LookOut WPEG */
102} node_type;
103
104static inline int is_WPEG(struct rio_detail *rio){
105 return (rio->type == CompatWPEG || rio->type == AltWPEG ||
106 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
107}
108
109#endif /* __ASM_SUMMIT_MPPARSE_H */
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index a5074bd0f8be..48dcfa62ea07 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -24,28 +24,4 @@ struct saved_context {
24 unsigned long return_address; 24 unsigned long return_address;
25} __attribute__((packed)); 25} __attribute__((packed));
26 26
27#ifdef CONFIG_ACPI
28extern unsigned long saved_eip;
29extern unsigned long saved_esp;
30extern unsigned long saved_ebp;
31extern unsigned long saved_ebx;
32extern unsigned long saved_esi;
33extern unsigned long saved_edi;
34
35static inline void acpi_save_register_state(unsigned long return_point)
36{
37 saved_eip = return_point;
38 asm volatile("movl %%esp,%0" : "=m" (saved_esp));
39 asm volatile("movl %%ebp,%0" : "=m" (saved_ebp));
40 asm volatile("movl %%ebx,%0" : "=m" (saved_ebx));
41 asm volatile("movl %%edi,%0" : "=m" (saved_edi));
42 asm volatile("movl %%esi,%0" : "=m" (saved_esi));
43}
44
45#define acpi_restore_register_state() do {} while (0)
46
47/* routines for saving/restoring kernel state */
48extern int acpi_save_state_mem(void);
49#endif
50
51#endif /* _ASM_X86_SUSPEND_32_H */ 27#endif /* _ASM_X86_SUSPEND_32_H */
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 1b8afa78e869..82ada75f3ebf 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -174,10 +174,6 @@ struct __attribute__ ((__packed__)) vmcb {
174#define SVM_CPUID_FEATURE_SHIFT 2 174#define SVM_CPUID_FEATURE_SHIFT 2
175#define SVM_CPUID_FUNC 0x8000000a 175#define SVM_CPUID_FUNC 0x8000000a
176 176
177#define MSR_EFER_SVME_MASK (1ULL << 12)
178#define MSR_VM_CR 0xc0010114
179#define MSR_VM_HSAVE_PA 0xc0010117ULL
180
181#define SVM_VM_CR_SVM_DISABLE 4 177#define SVM_VM_CR_SVM_DISABLE 4
182 178
183#define SVM_SELECTOR_S_SHIFT 4 179#define SVM_SELECTOR_S_SHIFT 4
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index ffb08be2a530..72a6dcd1299b 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -70,8 +70,6 @@ struct old_utsname;
70asmlinkage long sys32_olduname(struct oldold_utsname __user *); 70asmlinkage long sys32_olduname(struct oldold_utsname __user *);
71long sys32_uname(struct old_utsname __user *); 71long sys32_uname(struct old_utsname __user *);
72 72
73long sys32_ustat(unsigned, struct ustat32 __user *);
74
75asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *, 73asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *,
76 compat_uptr_t __user *, struct pt_regs *); 74 compat_uptr_t __user *, struct pt_regs *);
77asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); 75asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *);
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index c0b0bda754ee..7043408f6904 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -29,21 +29,21 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *);
29/* X86_32 only */ 29/* X86_32 only */
30#ifdef CONFIG_X86_32 30#ifdef CONFIG_X86_32
31/* kernel/process_32.c */ 31/* kernel/process_32.c */
32asmlinkage int sys_fork(struct pt_regs); 32int sys_fork(struct pt_regs *);
33asmlinkage int sys_clone(struct pt_regs); 33int sys_clone(struct pt_regs *);
34asmlinkage int sys_vfork(struct pt_regs); 34int sys_vfork(struct pt_regs *);
35asmlinkage int sys_execve(struct pt_regs); 35int sys_execve(struct pt_regs *);
36 36
37/* kernel/signal_32.c */ 37/* kernel/signal_32.c */
38asmlinkage int sys_sigsuspend(int, int, old_sigset_t); 38asmlinkage int sys_sigsuspend(int, int, old_sigset_t);
39asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, 39asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
40 struct old_sigaction __user *); 40 struct old_sigaction __user *);
41asmlinkage int sys_sigaltstack(unsigned long); 41int sys_sigaltstack(struct pt_regs *);
42asmlinkage unsigned long sys_sigreturn(unsigned long); 42unsigned long sys_sigreturn(struct pt_regs *);
43asmlinkage int sys_rt_sigreturn(unsigned long); 43long sys_rt_sigreturn(struct pt_regs *);
44 44
45/* kernel/ioport.c */ 45/* kernel/ioport.c */
46asmlinkage long sys_iopl(unsigned long); 46long sys_iopl(struct pt_regs *);
47 47
48/* kernel/sys_i386_32.c */ 48/* kernel/sys_i386_32.c */
49asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, 49asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
@@ -59,8 +59,8 @@ struct oldold_utsname;
59asmlinkage int sys_olduname(struct oldold_utsname __user *); 59asmlinkage int sys_olduname(struct oldold_utsname __user *);
60 60
61/* kernel/vm86_32.c */ 61/* kernel/vm86_32.c */
62asmlinkage int sys_vm86old(struct pt_regs); 62int sys_vm86old(struct pt_regs *);
63asmlinkage int sys_vm86(struct pt_regs); 63int sys_vm86(struct pt_regs *);
64 64
65#else /* CONFIG_X86_32 */ 65#else /* CONFIG_X86_32 */
66 66
@@ -74,6 +74,7 @@ asmlinkage long sys_vfork(struct pt_regs *);
74asmlinkage long sys_execve(char __user *, char __user * __user *, 74asmlinkage long sys_execve(char __user *, char __user * __user *,
75 char __user * __user *, 75 char __user * __user *,
76 struct pt_regs *); 76 struct pt_regs *);
77long sys_arch_prctl(int, unsigned long);
77 78
78/* kernel/ioport.c */ 79/* kernel/ioport.c */
79asmlinkage long sys_iopl(unsigned int, struct pt_regs *); 80asmlinkage long sys_iopl(unsigned int, struct pt_regs *);
@@ -81,7 +82,7 @@ asmlinkage long sys_iopl(unsigned int, struct pt_regs *);
81/* kernel/signal_64.c */ 82/* kernel/signal_64.c */
82asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, 83asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
83 struct pt_regs *); 84 struct pt_regs *);
84asmlinkage long sys_rt_sigreturn(struct pt_regs *); 85long sys_rt_sigreturn(struct pt_regs *);
85 86
86/* kernel/sys_x86_64.c */ 87/* kernel/sys_x86_64.c */
87asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long, 88asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 8e626ea33a1a..643c59b4bc6e 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -20,9 +20,26 @@
20struct task_struct; /* one of the stranger aspects of C forward declarations */ 20struct task_struct; /* one of the stranger aspects of C forward declarations */
21struct task_struct *__switch_to(struct task_struct *prev, 21struct task_struct *__switch_to(struct task_struct *prev,
22 struct task_struct *next); 22 struct task_struct *next);
23struct tss_struct;
24void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
25 struct tss_struct *tss);
23 26
24#ifdef CONFIG_X86_32 27#ifdef CONFIG_X86_32
25 28
29#ifdef CONFIG_CC_STACKPROTECTOR
30#define __switch_canary \
31 "movl %P[task_canary](%[next]), %%ebx\n\t" \
32 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
33#define __switch_canary_oparam \
34 , [stack_canary] "=m" (per_cpu_var(stack_canary))
35#define __switch_canary_iparam \
36 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
37#else /* CC_STACKPROTECTOR */
38#define __switch_canary
39#define __switch_canary_oparam
40#define __switch_canary_iparam
41#endif /* CC_STACKPROTECTOR */
42
26/* 43/*
27 * Saving eflags is important. It switches not only IOPL between tasks, 44 * Saving eflags is important. It switches not only IOPL between tasks,
28 * it also protects other tasks from NT leaking through sysenter etc. 45 * it also protects other tasks from NT leaking through sysenter etc.
@@ -44,6 +61,7 @@ do { \
44 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ 61 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
45 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ 62 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
46 "pushl %[next_ip]\n\t" /* restore EIP */ \ 63 "pushl %[next_ip]\n\t" /* restore EIP */ \
64 __switch_canary \
47 "jmp __switch_to\n" /* regparm call */ \ 65 "jmp __switch_to\n" /* regparm call */ \
48 "1:\t" \ 66 "1:\t" \
49 "popl %%ebp\n\t" /* restore EBP */ \ 67 "popl %%ebp\n\t" /* restore EBP */ \
@@ -58,6 +76,8 @@ do { \
58 "=b" (ebx), "=c" (ecx), "=d" (edx), \ 76 "=b" (ebx), "=c" (ecx), "=d" (edx), \
59 "=S" (esi), "=D" (edi) \ 77 "=S" (esi), "=D" (edi) \
60 \ 78 \
79 __switch_canary_oparam \
80 \
61 /* input parameters: */ \ 81 /* input parameters: */ \
62 : [next_sp] "m" (next->thread.sp), \ 82 : [next_sp] "m" (next->thread.sp), \
63 [next_ip] "m" (next->thread.ip), \ 83 [next_ip] "m" (next->thread.ip), \
@@ -66,6 +86,8 @@ do { \
66 [prev] "a" (prev), \ 86 [prev] "a" (prev), \
67 [next] "d" (next) \ 87 [next] "d" (next) \
68 \ 88 \
89 __switch_canary_iparam \
90 \
69 : /* reloaded segment registers */ \ 91 : /* reloaded segment registers */ \
70 "memory"); \ 92 "memory"); \
71} while (0) 93} while (0)
@@ -86,27 +108,44 @@ do { \
86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ 108 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
87 "r12", "r13", "r14", "r15" 109 "r12", "r13", "r14", "r15"
88 110
111#ifdef CONFIG_CC_STACKPROTECTOR
112#define __switch_canary \
113 "movq %P[task_canary](%%rsi),%%r8\n\t" \
114 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
115#define __switch_canary_oparam \
116 , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
117#define __switch_canary_iparam \
118 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
119#else /* CC_STACKPROTECTOR */
120#define __switch_canary
121#define __switch_canary_oparam
122#define __switch_canary_iparam
123#endif /* CC_STACKPROTECTOR */
124
89/* Save restore flags to clear handle leaking NT */ 125/* Save restore flags to clear handle leaking NT */
90#define switch_to(prev, next, last) \ 126#define switch_to(prev, next, last) \
91 asm volatile(SAVE_CONTEXT \ 127 asm volatile(SAVE_CONTEXT \
92 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ 128 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
93 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ 129 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
94 "call __switch_to\n\t" \ 130 "call __switch_to\n\t" \
95 ".globl thread_return\n" \ 131 ".globl thread_return\n" \
96 "thread_return:\n\t" \ 132 "thread_return:\n\t" \
97 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ 133 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
134 __switch_canary \
98 "movq %P[thread_info](%%rsi),%%r8\n\t" \ 135 "movq %P[thread_info](%%rsi),%%r8\n\t" \
99 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
100 "movq %%rax,%%rdi\n\t" \ 136 "movq %%rax,%%rdi\n\t" \
101 "jc ret_from_fork\n\t" \ 137 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
138 "jnz ret_from_fork\n\t" \
102 RESTORE_CONTEXT \ 139 RESTORE_CONTEXT \
103 : "=a" (last) \ 140 : "=a" (last) \
141 __switch_canary_oparam \
104 : [next] "S" (next), [prev] "D" (prev), \ 142 : [next] "S" (next), [prev] "D" (prev), \
105 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ 143 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
106 [ti_flags] "i" (offsetof(struct thread_info, flags)), \ 144 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
107 [tif_fork] "i" (TIF_FORK), \ 145 [_tif_fork] "i" (_TIF_FORK), \
108 [thread_info] "i" (offsetof(struct task_struct, stack)), \ 146 [thread_info] "i" (offsetof(struct task_struct, stack)), \
109 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ 147 [current_task] "m" (per_cpu_var(current_task)) \
148 __switch_canary_iparam \
110 : "memory", "cc" __EXTRA_CLOBBER) 149 : "memory", "cc" __EXTRA_CLOBBER)
111#endif 150#endif
112 151
@@ -165,6 +204,25 @@ extern void native_load_gs_index(unsigned);
165#define savesegment(seg, value) \ 204#define savesegment(seg, value) \
166 asm("mov %%" #seg ",%0":"=r" (value) : : "memory") 205 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
167 206
207/*
208 * x86_32 user gs accessors.
209 */
210#ifdef CONFIG_X86_32
211#ifdef CONFIG_X86_32_LAZY_GS
212#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
213#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
214#define task_user_gs(tsk) ((tsk)->thread.gs)
215#define lazy_save_gs(v) savesegment(gs, (v))
216#define lazy_load_gs(v) loadsegment(gs, (v))
217#else /* X86_32_LAZY_GS */
218#define get_user_gs(regs) (u16)((regs)->gs)
219#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
220#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
221#define lazy_save_gs(v) do { } while (0)
222#define lazy_load_gs(v) do { } while (0)
223#endif /* X86_32_LAZY_GS */
224#endif /* X86_32 */
225
168static inline unsigned long get_limit(unsigned long segment) 226static inline unsigned long get_limit(unsigned long segment)
169{ 227{
170 unsigned long __limit; 228 unsigned long __limit;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 98789647baa9..df9d5f78385e 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -40,6 +40,7 @@ struct thread_info {
40 */ 40 */
41 __u8 supervisor_stack[0]; 41 __u8 supervisor_stack[0];
42#endif 42#endif
43 int uaccess_err;
43}; 44};
44 45
45#define INIT_THREAD_INFO(tsk) \ 46#define INIT_THREAD_INFO(tsk) \
@@ -194,25 +195,21 @@ static inline struct thread_info *current_thread_info(void)
194 195
195#else /* X86_32 */ 196#else /* X86_32 */
196 197
197#include <asm/pda.h> 198#include <asm/percpu.h>
199#define KERNEL_STACK_OFFSET (5*8)
198 200
199/* 201/*
200 * macros/functions for gaining access to the thread information structure 202 * macros/functions for gaining access to the thread information structure
201 * preempt_count needs to be 1 initially, until the scheduler is functional. 203 * preempt_count needs to be 1 initially, until the scheduler is functional.
202 */ 204 */
203#ifndef __ASSEMBLY__ 205#ifndef __ASSEMBLY__
204static inline struct thread_info *current_thread_info(void) 206DECLARE_PER_CPU(unsigned long, kernel_stack);
205{
206 struct thread_info *ti;
207 ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
208 return ti;
209}
210 207
211/* do not use in interrupt context */ 208static inline struct thread_info *current_thread_info(void)
212static inline struct thread_info *stack_thread_info(void)
213{ 209{
214 struct thread_info *ti; 210 struct thread_info *ti;
215 asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1))); 211 ti = (void *)(percpu_read(kernel_stack) +
212 KERNEL_STACK_OFFSET - THREAD_SIZE);
216 return ti; 213 return ti;
217} 214}
218 215
@@ -220,8 +217,8 @@ static inline struct thread_info *stack_thread_info(void)
220 217
221/* how to get the thread information struct from ASM */ 218/* how to get the thread information struct from ASM */
222#define GET_THREAD_INFO(reg) \ 219#define GET_THREAD_INFO(reg) \
223 movq %gs:pda_kernelstack,reg ; \ 220 movq PER_CPU_VAR(kernel_stack),reg ; \
224 subq $(THREAD_SIZE-PDA_STACKOFFSET),reg 221 subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
225 222
226#endif 223#endif
227 224
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 2bb6a835c453..bd37ed444a21 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -3,6 +3,7 @@
3#include <linux/init.h> 3#include <linux/init.h>
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <linux/percpu.h> 5#include <linux/percpu.h>
6#include <linux/interrupt.h>
6 7
7#define TICK_SIZE (tick_nsec / 1000) 8#define TICK_SIZE (tick_nsec / 1000)
8 9
@@ -11,8 +12,9 @@ unsigned long native_calibrate_tsc(void);
11 12
12#ifdef CONFIG_X86_32 13#ifdef CONFIG_X86_32
13extern int timer_ack; 14extern int timer_ack;
14extern int recalibrate_cpu_khz(void); 15extern irqreturn_t timer_interrupt(int irq, void *dev_id);
15#endif /* CONFIG_X86_32 */ 16#endif /* CONFIG_X86_32 */
17extern int recalibrate_cpu_khz(void);
16 18
17extern int no_timer_check; 19extern int no_timer_check;
18 20
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 0e7bbb549116..d3539f998f88 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -113,7 +113,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
113 __flush_tlb(); 113 __flush_tlb();
114} 114}
115 115
116static inline void native_flush_tlb_others(const cpumask_t *cpumask, 116static inline void native_flush_tlb_others(const struct cpumask *cpumask,
117 struct mm_struct *mm, 117 struct mm_struct *mm,
118 unsigned long va) 118 unsigned long va)
119{ 119{
@@ -142,31 +142,28 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
142 flush_tlb_mm(vma->vm_mm); 142 flush_tlb_mm(vma->vm_mm);
143} 143}
144 144
145void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, 145void native_flush_tlb_others(const struct cpumask *cpumask,
146 unsigned long va); 146 struct mm_struct *mm, unsigned long va);
147 147
148#define TLBSTATE_OK 1 148#define TLBSTATE_OK 1
149#define TLBSTATE_LAZY 2 149#define TLBSTATE_LAZY 2
150 150
151#ifdef CONFIG_X86_32
152struct tlb_state { 151struct tlb_state {
153 struct mm_struct *active_mm; 152 struct mm_struct *active_mm;
154 int state; 153 int state;
155 char __cacheline_padding[L1_CACHE_BYTES-8];
156}; 154};
157DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); 155DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
158 156
159void reset_lazy_tlbstate(void);
160#else
161static inline void reset_lazy_tlbstate(void) 157static inline void reset_lazy_tlbstate(void)
162{ 158{
159 percpu_write(cpu_tlbstate.state, 0);
160 percpu_write(cpu_tlbstate.active_mm, &init_mm);
163} 161}
164#endif
165 162
166#endif /* SMP */ 163#endif /* SMP */
167 164
168#ifndef CONFIG_PARAVIRT 165#ifndef CONFIG_PARAVIRT
169#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va) 166#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
170#endif 167#endif
171 168
172static inline void flush_tlb_kernel_range(unsigned long start, 169static inline void flush_tlb_kernel_range(unsigned long start,
@@ -175,4 +172,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
175 flush_tlb_all(); 172 flush_tlb_all();
176} 173}
177 174
175extern void zap_low_mappings(void);
176
178#endif /* _ASM_X86_TLBFLUSH_H */ 177#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 4e2f2e0aab27..744299c0b774 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -74,6 +74,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
74 return &node_to_cpumask_map[node]; 74 return &node_to_cpumask_map[node];
75} 75}
76 76
77static inline void setup_node_to_cpumask_map(void) { }
78
77#else /* CONFIG_X86_64 */ 79#else /* CONFIG_X86_64 */
78 80
79/* Mappings between node number and cpus on that node. */ 81/* Mappings between node number and cpus on that node. */
@@ -83,7 +85,8 @@ extern cpumask_t *node_to_cpumask_map;
83DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); 85DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
84 86
85/* Returns the number of the current Node. */ 87/* Returns the number of the current Node. */
86#define numa_node_id() read_pda(nodenumber) 88DECLARE_PER_CPU(int, node_number);
89#define numa_node_id() percpu_read(node_number)
87 90
88#ifdef CONFIG_DEBUG_PER_CPU_MAPS 91#ifdef CONFIG_DEBUG_PER_CPU_MAPS
89extern int cpu_to_node(int cpu); 92extern int cpu_to_node(int cpu);
@@ -102,10 +105,7 @@ static inline int cpu_to_node(int cpu)
102/* Same function but used if called before per_cpu areas are setup */ 105/* Same function but used if called before per_cpu areas are setup */
103static inline int early_cpu_to_node(int cpu) 106static inline int early_cpu_to_node(int cpu)
104{ 107{
105 if (early_per_cpu_ptr(x86_cpu_to_node_map)) 108 return early_per_cpu(x86_cpu_to_node_map, cpu);
106 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
107
108 return per_cpu(x86_cpu_to_node_map, cpu);
109} 109}
110 110
111/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 111/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
@@ -122,6 +122,8 @@ static inline cpumask_t node_to_cpumask(int node)
122 122
123#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 123#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
124 124
125extern void setup_node_to_cpumask_map(void);
126
125/* 127/*
126 * Replace default node_to_cpumask_ptr with optimized version 128 * Replace default node_to_cpumask_ptr with optimized version
127 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 129 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
@@ -192,9 +194,20 @@ extern int __node_distance(int, int);
192 194
193#else /* !CONFIG_NUMA */ 195#else /* !CONFIG_NUMA */
194 196
195#define numa_node_id() 0 197static inline int numa_node_id(void)
196#define cpu_to_node(cpu) 0 198{
197#define early_cpu_to_node(cpu) 0 199 return 0;
200}
201
202static inline int cpu_to_node(int cpu)
203{
204 return 0;
205}
206
207static inline int early_cpu_to_node(int cpu)
208{
209 return 0;
210}
198 211
199static inline const cpumask_t *cpumask_of_node(int node) 212static inline const cpumask_t *cpumask_of_node(int node)
200{ 213{
@@ -204,10 +217,8 @@ static inline cpumask_t node_to_cpumask(int node)
204{ 217{
205 return cpu_online_map; 218 return cpu_online_map;
206} 219}
207static inline int node_to_first_cpu(int node) 220
208{ 221static inline void setup_node_to_cpumask_map(void) { }
209 return first_cpu(cpu_online_map);
210}
211 222
212/* 223/*
213 * Replace default node_to_cpumask_ptr with optimized version 224 * Replace default node_to_cpumask_ptr with optimized version
@@ -222,14 +233,6 @@ static inline int node_to_first_cpu(int node)
222 233
223#include <asm-generic/topology.h> 234#include <asm-generic/topology.h>
224 235
225#ifdef CONFIG_NUMA
226/* Returns the number of the first CPU on Node 'node'. */
227static inline int node_to_first_cpu(int node)
228{
229 return cpumask_first(cpumask_of_node(node));
230}
231#endif
232
233extern cpumask_t cpu_coregroup_map(int cpu); 236extern cpumask_t cpu_coregroup_map(int cpu);
234extern const struct cpumask *cpu_coregroup_mask(int cpu); 237extern const struct cpumask *cpu_coregroup_mask(int cpu);
235 238
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
index 780ba0ab94f9..90f06c25221d 100644
--- a/arch/x86/include/asm/trampoline.h
+++ b/arch/x86/include/asm/trampoline.h
@@ -13,6 +13,7 @@ extern unsigned char *trampoline_base;
13 13
14extern unsigned long init_rsp; 14extern unsigned long init_rsp;
15extern unsigned long initial_code; 15extern unsigned long initial_code;
16extern unsigned long initial_gs;
16 17
17#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) 18#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
18#define TRAMPOLINE_BASE 0x6000 19#define TRAMPOLINE_BASE 0x6000
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index cf3bb053da0b..0d5342515b86 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -41,7 +41,7 @@ dotraplinkage void do_int3(struct pt_regs *, long);
41dotraplinkage void do_overflow(struct pt_regs *, long); 41dotraplinkage void do_overflow(struct pt_regs *, long);
42dotraplinkage void do_bounds(struct pt_regs *, long); 42dotraplinkage void do_bounds(struct pt_regs *, long);
43dotraplinkage void do_invalid_op(struct pt_regs *, long); 43dotraplinkage void do_invalid_op(struct pt_regs *, long);
44dotraplinkage void do_device_not_available(struct pt_regs); 44dotraplinkage void do_device_not_available(struct pt_regs *, long);
45dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long); 45dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long);
46dotraplinkage void do_invalid_TSS(struct pt_regs *, long); 46dotraplinkage void do_invalid_TSS(struct pt_regs *, long);
47dotraplinkage void do_segment_not_present(struct pt_regs *, long); 47dotraplinkage void do_segment_not_present(struct pt_regs *, long);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 4340055b7559..b685ece89d5c 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -121,7 +121,7 @@ extern int __get_user_bad(void);
121 121
122#define __get_user_x(size, ret, x, ptr) \ 122#define __get_user_x(size, ret, x, ptr) \
123 asm volatile("call __get_user_" #size \ 123 asm volatile("call __get_user_" #size \
124 : "=a" (ret),"=d" (x) \ 124 : "=a" (ret), "=d" (x) \
125 : "0" (ptr)) \ 125 : "0" (ptr)) \
126 126
127/* Careful: we have to cast the result to the type of the pointer 127/* Careful: we have to cast the result to the type of the pointer
@@ -181,12 +181,12 @@ extern int __get_user_bad(void);
181 181
182#define __put_user_x(size, x, ptr, __ret_pu) \ 182#define __put_user_x(size, x, ptr, __ret_pu) \
183 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 183 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
184 :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 184 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
185 185
186 186
187 187
188#ifdef CONFIG_X86_32 188#ifdef CONFIG_X86_32
189#define __put_user_u64(x, addr, err) \ 189#define __put_user_asm_u64(x, addr, err, errret) \
190 asm volatile("1: movl %%eax,0(%2)\n" \ 190 asm volatile("1: movl %%eax,0(%2)\n" \
191 "2: movl %%edx,4(%2)\n" \ 191 "2: movl %%edx,4(%2)\n" \
192 "3:\n" \ 192 "3:\n" \
@@ -197,14 +197,24 @@ extern int __get_user_bad(void);
197 _ASM_EXTABLE(1b, 4b) \ 197 _ASM_EXTABLE(1b, 4b) \
198 _ASM_EXTABLE(2b, 4b) \ 198 _ASM_EXTABLE(2b, 4b) \
199 : "=r" (err) \ 199 : "=r" (err) \
200 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) 200 : "A" (x), "r" (addr), "i" (errret), "0" (err))
201
202#define __put_user_asm_ex_u64(x, addr) \
203 asm volatile("1: movl %%eax,0(%1)\n" \
204 "2: movl %%edx,4(%1)\n" \
205 "3:\n" \
206 _ASM_EXTABLE(1b, 2b - 1b) \
207 _ASM_EXTABLE(2b, 3b - 2b) \
208 : : "A" (x), "r" (addr))
201 209
202#define __put_user_x8(x, ptr, __ret_pu) \ 210#define __put_user_x8(x, ptr, __ret_pu) \
203 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 211 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
204 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 212 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
205#else 213#else
206#define __put_user_u64(x, ptr, retval) \ 214#define __put_user_asm_u64(x, ptr, retval, errret) \
207 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) 215 __put_user_asm(x, ptr, retval, "q", "", "Zr", errret)
216#define __put_user_asm_ex_u64(x, addr) \
217 __put_user_asm_ex(x, addr, "q", "", "Zr")
208#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 218#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
209#endif 219#endif
210 220
@@ -276,10 +286,32 @@ do { \
276 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ 286 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
277 break; \ 287 break; \
278 case 4: \ 288 case 4: \
279 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\ 289 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
280 break; \ 290 break; \
281 case 8: \ 291 case 8: \
282 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ 292 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
293 errret); \
294 break; \
295 default: \
296 __put_user_bad(); \
297 } \
298} while (0)
299
300#define __put_user_size_ex(x, ptr, size) \
301do { \
302 __chk_user_ptr(ptr); \
303 switch (size) { \
304 case 1: \
305 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
306 break; \
307 case 2: \
308 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
309 break; \
310 case 4: \
311 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
312 break; \
313 case 8: \
314 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
283 break; \ 315 break; \
284 default: \ 316 default: \
285 __put_user_bad(); \ 317 __put_user_bad(); \
@@ -311,9 +343,12 @@ do { \
311 343
312#ifdef CONFIG_X86_32 344#ifdef CONFIG_X86_32
313#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() 345#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
346#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
314#else 347#else
315#define __get_user_asm_u64(x, ptr, retval, errret) \ 348#define __get_user_asm_u64(x, ptr, retval, errret) \
316 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 349 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
350#define __get_user_asm_ex_u64(x, ptr) \
351 __get_user_asm_ex(x, ptr, "q", "", "=r")
317#endif 352#endif
318 353
319#define __get_user_size(x, ptr, size, retval, errret) \ 354#define __get_user_size(x, ptr, size, retval, errret) \
@@ -350,6 +385,33 @@ do { \
350 : "=r" (err), ltype(x) \ 385 : "=r" (err), ltype(x) \
351 : "m" (__m(addr)), "i" (errret), "0" (err)) 386 : "m" (__m(addr)), "i" (errret), "0" (err))
352 387
388#define __get_user_size_ex(x, ptr, size) \
389do { \
390 __chk_user_ptr(ptr); \
391 switch (size) { \
392 case 1: \
393 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
394 break; \
395 case 2: \
396 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
397 break; \
398 case 4: \
399 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
400 break; \
401 case 8: \
402 __get_user_asm_ex_u64(x, ptr); \
403 break; \
404 default: \
405 (x) = __get_user_bad(); \
406 } \
407} while (0)
408
409#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
410 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
411 "2:\n" \
412 _ASM_EXTABLE(1b, 2b - 1b) \
413 : ltype(x) : "m" (__m(addr)))
414
353#define __put_user_nocheck(x, ptr, size) \ 415#define __put_user_nocheck(x, ptr, size) \
354({ \ 416({ \
355 int __pu_err; \ 417 int __pu_err; \
@@ -385,6 +447,26 @@ struct __large_struct { unsigned long buf[100]; };
385 _ASM_EXTABLE(1b, 3b) \ 447 _ASM_EXTABLE(1b, 3b) \
386 : "=r"(err) \ 448 : "=r"(err) \
387 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 449 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
450
451#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
452 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
453 "2:\n" \
454 _ASM_EXTABLE(1b, 2b - 1b) \
455 : : ltype(x), "m" (__m(addr)))
456
457/*
458 * uaccess_try and catch
459 */
460#define uaccess_try do { \
461 int prev_err = current_thread_info()->uaccess_err; \
462 current_thread_info()->uaccess_err = 0; \
463 barrier();
464
465#define uaccess_catch(err) \
466 (err) |= current_thread_info()->uaccess_err; \
467 current_thread_info()->uaccess_err = prev_err; \
468} while (0)
469
388/** 470/**
389 * __get_user: - Get a simple variable from user space, with less checking. 471 * __get_user: - Get a simple variable from user space, with less checking.
390 * @x: Variable to store result. 472 * @x: Variable to store result.
@@ -408,6 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
408 490
409#define __get_user(x, ptr) \ 491#define __get_user(x, ptr) \
410 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 492 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
493
411/** 494/**
412 * __put_user: - Write a simple value into user space, with less checking. 495 * __put_user: - Write a simple value into user space, with less checking.
413 * @x: Value to copy to user space. 496 * @x: Value to copy to user space.
@@ -435,6 +518,45 @@ struct __large_struct { unsigned long buf[100]; };
435#define __put_user_unaligned __put_user 518#define __put_user_unaligned __put_user
436 519
437/* 520/*
521 * {get|put}_user_try and catch
522 *
523 * get_user_try {
524 * get_user_ex(...);
525 * } get_user_catch(err)
526 */
527#define get_user_try uaccess_try
528#define get_user_catch(err) uaccess_catch(err)
529
530#define get_user_ex(x, ptr) do { \
531 unsigned long __gue_val; \
532 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
533 (x) = (__force __typeof__(*(ptr)))__gue_val; \
534} while (0)
535
536#ifdef CONFIG_X86_WP_WORKS_OK
537
538#define put_user_try uaccess_try
539#define put_user_catch(err) uaccess_catch(err)
540
541#define put_user_ex(x, ptr) \
542 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
543
544#else /* !CONFIG_X86_WP_WORKS_OK */
545
546#define put_user_try do { \
547 int __uaccess_err = 0;
548
549#define put_user_catch(err) \
550 (err) |= __uaccess_err; \
551} while (0)
552
553#define put_user_ex(x, ptr) do { \
554 __uaccess_err |= __put_user(x, ptr); \
555} while (0)
556
557#endif /* CONFIG_X86_WP_WORKS_OK */
558
559/*
438 * movsl can be slow when source and dest are not both 8-byte aligned 560 * movsl can be slow when source and dest are not both 8-byte aligned
439 */ 561 */
440#ifdef CONFIG_X86_INTEL_USERCOPY 562#ifdef CONFIG_X86_INTEL_USERCOPY
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 84210c479fca..8cc687326eb8 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -188,16 +188,16 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
188extern long __copy_user_nocache(void *dst, const void __user *src, 188extern long __copy_user_nocache(void *dst, const void __user *src,
189 unsigned size, int zerorest); 189 unsigned size, int zerorest);
190 190
191static inline int __copy_from_user_nocache(void *dst, const void __user *src, 191static inline int
192 unsigned size) 192__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
193{ 193{
194 might_sleep(); 194 might_sleep();
195 return __copy_user_nocache(dst, src, size, 1); 195 return __copy_user_nocache(dst, src, size, 1);
196} 196}
197 197
198static inline int __copy_from_user_inatomic_nocache(void *dst, 198static inline int
199 const void __user *src, 199__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
200 unsigned size) 200 unsigned size)
201{ 201{
202 return __copy_user_nocache(dst, src, size, 0); 202 return __copy_user_nocache(dst, src, size, 0);
203} 203}
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index f2bba78430a4..6e72d74cf8dc 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -338,6 +338,8 @@
338#define __NR_dup3 330 338#define __NR_dup3 330
339#define __NR_pipe2 331 339#define __NR_pipe2 331
340#define __NR_inotify_init1 332 340#define __NR_inotify_init1 332
341#define __NR_preadv 333
342#define __NR_pwritev 334
341 343
342#ifdef __KERNEL__ 344#ifdef __KERNEL__
343 345
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index d2e415e6666f..f81829462325 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -653,6 +653,10 @@ __SYSCALL(__NR_dup3, sys_dup3)
653__SYSCALL(__NR_pipe2, sys_pipe2) 653__SYSCALL(__NR_pipe2, sys_pipe2)
654#define __NR_inotify_init1 294 654#define __NR_inotify_init1 294
655__SYSCALL(__NR_inotify_init1, sys_inotify_init1) 655__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
656#define __NR_preadv 295
657__SYSCALL(__NR_preadv, sys_preadv)
658#define __NR_pwritev 296
659__SYSCALL(__NR_pwritev, sys_pwritev)
656 660
657 661
658#ifndef __NO_STUBS 662#ifndef __NO_STUBS
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
new file mode 100644
index 000000000000..c0a01b5d985b
--- /dev/null
+++ b/arch/x86/include/asm/uv/uv.h
@@ -0,0 +1,33 @@
1#ifndef _ASM_X86_UV_UV_H
2#define _ASM_X86_UV_UV_H
3
4enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
5
6struct cpumask;
7struct mm_struct;
8
9#ifdef CONFIG_X86_UV
10
11extern enum uv_system_type get_uv_system_type(void);
12extern int is_uv_system(void);
13extern void uv_cpu_init(void);
14extern void uv_system_init(void);
15extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
16 struct mm_struct *mm,
17 unsigned long va,
18 unsigned int cpu);
19
20#else /* X86_UV */
21
22static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
23static inline int is_uv_system(void) { return 0; }
24static inline void uv_cpu_init(void) { }
25static inline void uv_system_init(void) { }
26static inline const struct cpumask *
27uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
28 unsigned long va, unsigned int cpu)
29{ return cpumask; }
30
31#endif /* X86_UV */
32
33#endif /* _ASM_X86_UV_UV_H */
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 50423c7b56b2..9b0e61bf7a88 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -325,7 +325,6 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
325#define cpubit_isset(cpu, bau_local_cpumask) \ 325#define cpubit_isset(cpu, bau_local_cpumask) \
326 test_bit((cpu), (bau_local_cpumask).bits) 326 test_bit((cpu), (bau_local_cpumask).bits)
327 327
328extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
329extern void uv_bau_message_intr1(void); 328extern void uv_bau_message_intr1(void);
330extern void uv_bau_timeout_intr1(void); 329extern void uv_bau_timeout_intr1(void);
331 330
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 777327ef05c1..d3a98ea1062e 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -11,11 +11,13 @@
11#ifndef _ASM_X86_UV_UV_HUB_H 11#ifndef _ASM_X86_UV_UV_HUB_H
12#define _ASM_X86_UV_UV_HUB_H 12#define _ASM_X86_UV_UV_HUB_H
13 13
14#ifdef CONFIG_X86_64
14#include <linux/numa.h> 15#include <linux/numa.h>
15#include <linux/percpu.h> 16#include <linux/percpu.h>
16#include <linux/timer.h> 17#include <linux/timer.h>
17#include <asm/types.h> 18#include <asm/types.h>
18#include <asm/percpu.h> 19#include <asm/percpu.h>
20#include <asm/uv/uv_mmrs.h>
19 21
20 22
21/* 23/*
@@ -199,6 +201,10 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
199#define SCIR_CPU_ACTIVITY 0x02 /* not idle */ 201#define SCIR_CPU_ACTIVITY 0x02 /* not idle */
200#define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */ 202#define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */
201 203
204/* Loop through all installed blades */
205#define for_each_possible_blade(bid) \
206 for ((bid) = 0; (bid) < uv_num_possible_blades(); (bid)++)
207
202/* 208/*
203 * Macros for converting between kernel virtual addresses, socket local physical 209 * Macros for converting between kernel virtual addresses, socket local physical
204 * addresses, and UV global physical addresses. 210 * addresses, and UV global physical addresses.
@@ -393,6 +399,7 @@ static inline void uv_set_scir_bits(unsigned char value)
393 uv_write_local_mmr8(uv_hub_info->scir.offset, value); 399 uv_write_local_mmr8(uv_hub_info->scir.offset, value);
394 } 400 }
395} 401}
402
396static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) 403static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
397{ 404{
398 if (uv_cpu_hub_info(cpu)->scir.state != value) { 405 if (uv_cpu_hub_info(cpu)->scir.state != value) {
@@ -401,4 +408,15 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
401 } 408 }
402} 409}
403 410
411static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
412{
413 unsigned long val;
414
415 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
416 ((apicid & 0x3f) << UVH_IPI_INT_APIC_ID_SHFT) |
417 (vector << UVH_IPI_INT_VECTOR_SHFT);
418 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
419}
420
421#endif /* CONFIG_X86_64 */
404#endif /* _ASM_X86_UV_UV_HUB_H */ 422#endif /* _ASM_X86_UV_UV_HUB_H */
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index dd627793a234..db68ac8a5ac2 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -1,3 +1,4 @@
1
1/* 2/*
2 * This file is subject to the terms and conditions of the GNU General Public 3 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive 4 * License. See the file "COPYING" in the main directory of this archive
@@ -243,6 +244,158 @@ union uvh_event_occurred0_u {
243#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0 244#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0
244 245
245/* ========================================================================= */ 246/* ========================================================================= */
247/* UVH_GR0_TLB_INT0_CONFIG */
248/* ========================================================================= */
249#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
250
251#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
252#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
253#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
254#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
255#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11
256#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
257#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12
258#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
259#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13
260#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
261#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15
262#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
263#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16
264#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
265#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32
266#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
267
268union uvh_gr0_tlb_int0_config_u {
269 unsigned long v;
270 struct uvh_gr0_tlb_int0_config_s {
271 unsigned long vector_ : 8; /* RW */
272 unsigned long dm : 3; /* RW */
273 unsigned long destmode : 1; /* RW */
274 unsigned long status : 1; /* RO */
275 unsigned long p : 1; /* RO */
276 unsigned long rsvd_14 : 1; /* */
277 unsigned long t : 1; /* RO */
278 unsigned long m : 1; /* RW */
279 unsigned long rsvd_17_31: 15; /* */
280 unsigned long apic_id : 32; /* RW */
281 } s;
282};
283
284/* ========================================================================= */
285/* UVH_GR0_TLB_INT1_CONFIG */
286/* ========================================================================= */
287#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
288
289#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
290#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
291#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
292#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
293#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11
294#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
295#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12
296#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
297#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13
298#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
299#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15
300#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
301#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16
302#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
303#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32
304#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
305
306union uvh_gr0_tlb_int1_config_u {
307 unsigned long v;
308 struct uvh_gr0_tlb_int1_config_s {
309 unsigned long vector_ : 8; /* RW */
310 unsigned long dm : 3; /* RW */
311 unsigned long destmode : 1; /* RW */
312 unsigned long status : 1; /* RO */
313 unsigned long p : 1; /* RO */
314 unsigned long rsvd_14 : 1; /* */
315 unsigned long t : 1; /* RO */
316 unsigned long m : 1; /* RW */
317 unsigned long rsvd_17_31: 15; /* */
318 unsigned long apic_id : 32; /* RW */
319 } s;
320};
321
322/* ========================================================================= */
323/* UVH_GR1_TLB_INT0_CONFIG */
324/* ========================================================================= */
325#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
326
327#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
328#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
329#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
330#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
331#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11
332#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
333#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12
334#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
335#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13
336#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
337#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15
338#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
339#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16
340#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
341#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32
342#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
343
344union uvh_gr1_tlb_int0_config_u {
345 unsigned long v;
346 struct uvh_gr1_tlb_int0_config_s {
347 unsigned long vector_ : 8; /* RW */
348 unsigned long dm : 3; /* RW */
349 unsigned long destmode : 1; /* RW */
350 unsigned long status : 1; /* RO */
351 unsigned long p : 1; /* RO */
352 unsigned long rsvd_14 : 1; /* */
353 unsigned long t : 1; /* RO */
354 unsigned long m : 1; /* RW */
355 unsigned long rsvd_17_31: 15; /* */
356 unsigned long apic_id : 32; /* RW */
357 } s;
358};
359
360/* ========================================================================= */
361/* UVH_GR1_TLB_INT1_CONFIG */
362/* ========================================================================= */
363#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
364
365#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
366#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
367#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
368#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
369#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11
370#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
371#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12
372#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
373#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13
374#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
375#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15
376#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
377#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16
378#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
379#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32
380#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
381
382union uvh_gr1_tlb_int1_config_u {
383 unsigned long v;
384 struct uvh_gr1_tlb_int1_config_s {
385 unsigned long vector_ : 8; /* RW */
386 unsigned long dm : 3; /* RW */
387 unsigned long destmode : 1; /* RW */
388 unsigned long status : 1; /* RO */
389 unsigned long p : 1; /* RO */
390 unsigned long rsvd_14 : 1; /* */
391 unsigned long t : 1; /* RO */
392 unsigned long m : 1; /* RW */
393 unsigned long rsvd_17_31: 15; /* */
394 unsigned long apic_id : 32; /* RW */
395 } s;
396};
397
398/* ========================================================================= */
246/* UVH_INT_CMPB */ 399/* UVH_INT_CMPB */
247/* ========================================================================= */ 400/* ========================================================================= */
248#define UVH_INT_CMPB 0x22080UL 401#define UVH_INT_CMPB 0x22080UL
diff --git a/arch/x86/include/asm/vic.h b/arch/x86/include/asm/vic.h
deleted file mode 100644
index 53100f353612..000000000000
--- a/arch/x86/include/asm/vic.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/* Copyright (C) 1999,2001
2 *
3 * Author: J.E.J.Bottomley@HansenPartnership.com
4 *
5 * Standard include definitions for the NCR Voyager Interrupt Controller */
6
7/* The eight CPI vectors. To activate a CPI, you write a bit mask
8 * corresponding to the processor set to be interrupted into the
9 * relevant register. That set of CPUs will then be interrupted with
10 * the CPI */
11static const int VIC_CPI_Registers[] =
12 {0xFC00, 0xFC01, 0xFC08, 0xFC09,
13 0xFC10, 0xFC11, 0xFC18, 0xFC19 };
14
15#define VIC_PROC_WHO_AM_I 0xfc29
16# define QUAD_IDENTIFIER 0xC0
17# define EIGHT_SLOT_IDENTIFIER 0xE0
18#define QIC_EXTENDED_PROCESSOR_SELECT 0xFC72
19#define VIC_CPI_BASE_REGISTER 0xFC41
20#define VIC_PROCESSOR_ID 0xFC21
21# define VIC_CPU_MASQUERADE_ENABLE 0x8
22
23#define VIC_CLAIM_REGISTER_0 0xFC38
24#define VIC_CLAIM_REGISTER_1 0xFC39
25#define VIC_REDIRECT_REGISTER_0 0xFC60
26#define VIC_REDIRECT_REGISTER_1 0xFC61
27#define VIC_PRIORITY_REGISTER 0xFC20
28
29#define VIC_PRIMARY_MC_BASE 0xFC48
30#define VIC_SECONDARY_MC_BASE 0xFC49
31
32#define QIC_PROCESSOR_ID 0xFC71
33# define QIC_CPUID_ENABLE 0x08
34
35#define QIC_VIC_CPI_BASE_REGISTER 0xFC79
36#define QIC_CPI_BASE_REGISTER 0xFC7A
37
38#define QIC_MASK_REGISTER0 0xFC80
39/* NOTE: these are masked high, enabled low */
40# define QIC_PERF_TIMER 0x01
41# define QIC_LPE 0x02
42# define QIC_SYS_INT 0x04
43# define QIC_CMN_INT 0x08
44/* at the moment, just enable CMN_INT, disable SYS_INT */
45# define QIC_DEFAULT_MASK0 (~(QIC_CMN_INT /* | VIC_SYS_INT */))
46#define QIC_MASK_REGISTER1 0xFC81
47# define QIC_BOOT_CPI_MASK 0xFE
48/* Enable CPI's 1-6 inclusive */
49# define QIC_CPI_ENABLE 0x81
50
51#define QIC_INTERRUPT_CLEAR0 0xFC8A
52#define QIC_INTERRUPT_CLEAR1 0xFC8B
53
54/* this is where we place the CPI vectors */
55#define VIC_DEFAULT_CPI_BASE 0xC0
56/* this is where we place the QIC CPI vectors */
57#define QIC_DEFAULT_CPI_BASE 0xD0
58
59#define VIC_BOOT_INTERRUPT_MASK 0xfe
60
61extern void smp_vic_timer_interrupt(void);
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
index 593636275238..e0f9aa16358b 100644
--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -118,7 +118,7 @@ static inline void cpu_svm_disable(void)
118 118
119 wrmsrl(MSR_VM_HSAVE_PA, 0); 119 wrmsrl(MSR_VM_HSAVE_PA, 0);
120 rdmsrl(MSR_EFER, efer); 120 rdmsrl(MSR_EFER, efer);
121 wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); 121 wrmsrl(MSR_EFER, efer & ~EFER_SVME);
122} 122}
123 123
124/** Makes sure SVM is disabled, if it is supported on the CPU 124/** Makes sure SVM is disabled, if it is supported on the CPU
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index d0238e6151d8..498f944010b9 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -270,8 +270,9 @@ enum vmcs_field {
270 270
271#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ 271#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
272#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ 272#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
273#define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ 273#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
274#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ 274#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
275#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */
275 276
276/* GUEST_INTERRUPTIBILITY_INFO flags. */ 277/* GUEST_INTERRUPTIBILITY_INFO flags. */
277#define GUEST_INTR_STATE_STI 0x00000001 278#define GUEST_INTR_STATE_STI 0x00000001
@@ -311,7 +312,7 @@ enum vmcs_field {
311#define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */ 312#define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
312#define TYPE_MOV_TO_DR (0 << 4) 313#define TYPE_MOV_TO_DR (0 << 4)
313#define TYPE_MOV_FROM_DR (1 << 4) 314#define TYPE_MOV_FROM_DR (1 << 4)
314#define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose reg. */ 315#define DEBUG_REG_ACCESS_REG(eq) (((eq) >> 8) & 0xf) /* 11:8, general purpose reg. */
315 316
316 317
317/* segment AR */ 318/* segment AR */
diff --git a/arch/x86/include/asm/voyager.h b/arch/x86/include/asm/voyager.h
deleted file mode 100644
index b3e647307625..000000000000
--- a/arch/x86/include/asm/voyager.h
+++ /dev/null
@@ -1,529 +0,0 @@
1/* Copyright (C) 1999,2001
2 *
3 * Author: J.E.J.Bottomley@HansenPartnership.com
4 *
5 * Standard include definitions for the NCR Voyager system */
6
7#undef VOYAGER_DEBUG
8#undef VOYAGER_CAT_DEBUG
9
10#ifdef VOYAGER_DEBUG
11#define VDEBUG(x) printk x
12#else
13#define VDEBUG(x)
14#endif
15
16/* There are three levels of voyager machine: 3,4 and 5. The rule is
17 * if it's less than 3435 it's a Level 3 except for a 3360 which is
18 * a level 4. A 3435 or above is a Level 5 */
19#define VOYAGER_LEVEL5_AND_ABOVE 0x3435
20#define VOYAGER_LEVEL4 0x3360
21
22/* The L4 DINO ASIC */
23#define VOYAGER_DINO 0x43
24
25/* voyager ports in standard I/O space */
26#define VOYAGER_MC_SETUP 0x96
27
28
29#define VOYAGER_CAT_CONFIG_PORT 0x97
30# define VOYAGER_CAT_DESELECT 0xff
31#define VOYAGER_SSPB_RELOCATION_PORT 0x98
32
33/* Valid CAT controller commands */
34/* start instruction register cycle */
35#define VOYAGER_CAT_IRCYC 0x01
36/* start data register cycle */
37#define VOYAGER_CAT_DRCYC 0x02
38/* move to execute state */
39#define VOYAGER_CAT_RUN 0x0F
40/* end operation */
41#define VOYAGER_CAT_END 0x80
42/* hold in idle state */
43#define VOYAGER_CAT_HOLD 0x90
44/* single step an "intest" vector */
45#define VOYAGER_CAT_STEP 0xE0
46/* return cat controller to CLEMSON mode */
47#define VOYAGER_CAT_CLEMSON 0xFF
48
49/* the default cat command header */
50#define VOYAGER_CAT_HEADER 0x7F
51
52/* the range of possible CAT module ids in the system */
53#define VOYAGER_MIN_MODULE 0x10
54#define VOYAGER_MAX_MODULE 0x1f
55
56/* The voyager registers per asic */
57#define VOYAGER_ASIC_ID_REG 0x00
58#define VOYAGER_ASIC_TYPE_REG 0x01
59/* the sub address registers can be made auto incrementing on reads */
60#define VOYAGER_AUTO_INC_REG 0x02
61# define VOYAGER_AUTO_INC 0x04
62# define VOYAGER_NO_AUTO_INC 0xfb
63#define VOYAGER_SUBADDRDATA 0x03
64#define VOYAGER_SCANPATH 0x05
65# define VOYAGER_CONNECT_ASIC 0x01
66# define VOYAGER_DISCONNECT_ASIC 0xfe
67#define VOYAGER_SUBADDRLO 0x06
68#define VOYAGER_SUBADDRHI 0x07
69#define VOYAGER_SUBMODSELECT 0x08
70#define VOYAGER_SUBMODPRESENT 0x09
71
72#define VOYAGER_SUBADDR_LO 0xff
73#define VOYAGER_SUBADDR_HI 0xffff
74
75/* the maximum size of a scan path -- used to form instructions */
76#define VOYAGER_MAX_SCAN_PATH 0x100
77/* the biggest possible register size (in bytes) */
78#define VOYAGER_MAX_REG_SIZE 4
79
80/* Total number of possible modules (including submodules) */
81#define VOYAGER_MAX_MODULES 16
82/* Largest number of asics per module */
83#define VOYAGER_MAX_ASICS_PER_MODULE 7
84
85/* the CAT asic of each module is always the first one */
86#define VOYAGER_CAT_ID 0
87#define VOYAGER_PSI 0x1a
88
89/* voyager instruction operations and registers */
90#define VOYAGER_READ_CONFIG 0x1
91#define VOYAGER_WRITE_CONFIG 0x2
92#define VOYAGER_BYPASS 0xff
93
94typedef struct voyager_asic {
95 __u8 asic_addr; /* ASIC address; Level 4 */
96 __u8 asic_type; /* ASIC type */
97 __u8 asic_id; /* ASIC id */
98 __u8 jtag_id[4]; /* JTAG id */
99 __u8 asic_location; /* Location within scan path; start w/ 0 */
100 __u8 bit_location; /* Location within bit stream; start w/ 0 */
101 __u8 ireg_length; /* Instruction register length */
102 __u16 subaddr; /* Amount of sub address space */
103 struct voyager_asic *next; /* Next asic in linked list */
104} voyager_asic_t;
105
106typedef struct voyager_module {
107 __u8 module_addr; /* Module address */
108 __u8 scan_path_connected; /* Scan path connected */
109 __u16 ee_size; /* Size of the EEPROM */
110 __u16 num_asics; /* Number of Asics */
111 __u16 inst_bits; /* Instruction bits in the scan path */
112 __u16 largest_reg; /* Largest register in the scan path */
113 __u16 smallest_reg; /* Smallest register in the scan path */
114 voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */
115 struct voyager_module *submodule; /* Submodule pointer */
116 struct voyager_module *next; /* Next module in linked list */
117} voyager_module_t;
118
119typedef struct voyager_eeprom_hdr {
120 __u8 module_id[4];
121 __u8 version_id;
122 __u8 config_id;
123 __u16 boundry_id; /* boundary scan id */
124 __u16 ee_size; /* size of EEPROM */
125 __u8 assembly[11]; /* assembly # */
126 __u8 assembly_rev; /* assembly rev */
127 __u8 tracer[4]; /* tracer number */
128 __u16 assembly_cksum; /* asm checksum */
129 __u16 power_consump; /* pwr requirements */
130 __u16 num_asics; /* number of asics */
131 __u16 bist_time; /* min. bist time */
132 __u16 err_log_offset; /* error log offset */
133 __u16 scan_path_offset;/* scan path offset */
134 __u16 cct_offset;
135 __u16 log_length; /* length of err log */
136 __u16 xsum_end; /* offset to end of
137 checksum */
138 __u8 reserved[4];
139 __u8 sflag; /* starting sentinal */
140 __u8 part_number[13]; /* prom part number */
141 __u8 version[10]; /* version number */
142 __u8 signature[8];
143 __u16 eeprom_chksum;
144 __u32 data_stamp_offset;
145 __u8 eflag ; /* ending sentinal */
146} __attribute__((packed)) voyager_eprom_hdr_t;
147
148
149
150#define VOYAGER_EPROM_SIZE_OFFSET \
151 ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size)))
152#define VOYAGER_XSUM_END_OFFSET 0x2a
153
154/* the following three definitions are for internal table layouts
155 * in the module EPROMs. We really only care about the IDs and
156 * offsets */
157typedef struct voyager_sp_table {
158 __u8 asic_id;
159 __u8 bypass_flag;
160 __u16 asic_data_offset;
161 __u16 config_data_offset;
162} __attribute__((packed)) voyager_sp_table_t;
163
164typedef struct voyager_jtag_table {
165 __u8 icode[4];
166 __u8 runbist[4];
167 __u8 intest[4];
168 __u8 samp_preld[4];
169 __u8 ireg_len;
170} __attribute__((packed)) voyager_jtt_t;
171
172typedef struct voyager_asic_data_table {
173 __u8 jtag_id[4];
174 __u16 length_bsr;
175 __u16 length_bist_reg;
176 __u32 bist_clk;
177 __u16 subaddr_bits;
178 __u16 seed_bits;
179 __u16 sig_bits;
180 __u16 jtag_offset;
181} __attribute__((packed)) voyager_at_t;
182
183/* Voyager Interrupt Controller (VIC) registers */
184
185/* Base to add to Cross Processor Interrupts (CPIs) when triggering
186 * the CPU IRQ line */
187/* register defines for the WCBICs (one per processor) */
188#define VOYAGER_WCBIC0 0x41 /* bus A node P1 processor 0 */
189#define VOYAGER_WCBIC1 0x49 /* bus A node P1 processor 1 */
190#define VOYAGER_WCBIC2 0x51 /* bus A node P2 processor 0 */
191#define VOYAGER_WCBIC3 0x59 /* bus A node P2 processor 1 */
192#define VOYAGER_WCBIC4 0x61 /* bus B node P1 processor 0 */
193#define VOYAGER_WCBIC5 0x69 /* bus B node P1 processor 1 */
194#define VOYAGER_WCBIC6 0x71 /* bus B node P2 processor 0 */
195#define VOYAGER_WCBIC7 0x79 /* bus B node P2 processor 1 */
196
197
198/* top of memory registers */
199#define VOYAGER_WCBIC_TOM_L 0x4
200#define VOYAGER_WCBIC_TOM_H 0x5
201
202/* register defines for Voyager Memory Contol (VMC)
203 * these are present on L4 machines only */
204#define VOYAGER_VMC1 0x81
205#define VOYAGER_VMC2 0x91
206#define VOYAGER_VMC3 0xa1
207#define VOYAGER_VMC4 0xb1
208
209/* VMC Ports */
210#define VOYAGER_VMC_MEMORY_SETUP 0x9
211# define VMC_Interleaving 0x01
212# define VMC_4Way 0x02
213# define VMC_EvenCacheLines 0x04
214# define VMC_HighLine 0x08
215# define VMC_Start0_Enable 0x20
216# define VMC_Start1_Enable 0x40
217# define VMC_Vremap 0x80
218#define VOYAGER_VMC_BANK_DENSITY 0xa
219# define VMC_BANK_EMPTY 0
220# define VMC_BANK_4MB 1
221# define VMC_BANK_16MB 2
222# define VMC_BANK_64MB 3
223# define VMC_BANK0_MASK 0x03
224# define VMC_BANK1_MASK 0x0C
225# define VMC_BANK2_MASK 0x30
226# define VMC_BANK3_MASK 0xC0
227
228/* Magellan Memory Controller (MMC) defines - present on L5 */
229#define VOYAGER_MMC_ASIC_ID 1
230/* the two memory modules corresponding to memory cards in the system */
231#define VOYAGER_MMC_MEMORY0_MODULE 0x14
232#define VOYAGER_MMC_MEMORY1_MODULE 0x15
233/* the Magellan Memory Address (MMA) defines */
234#define VOYAGER_MMA_ASIC_ID 2
235
236/* Submodule number for the Quad Baseboard */
237#define VOYAGER_QUAD_BASEBOARD 1
238
239/* ASIC defines for the Quad Baseboard */
240#define VOYAGER_QUAD_QDATA0 1
241#define VOYAGER_QUAD_QDATA1 2
242#define VOYAGER_QUAD_QABC 3
243
244/* Useful areas in extended CMOS */
245#define VOYAGER_PROCESSOR_PRESENT_MASK 0x88a
246#define VOYAGER_MEMORY_CLICKMAP 0xa23
247#define VOYAGER_DUMP_LOCATION 0xb1a
248
249/* SUS In Control bit - used to tell SUS that we don't need to be
250 * babysat anymore */
251#define VOYAGER_SUS_IN_CONTROL_PORT 0x3ff
252# define VOYAGER_IN_CONTROL_FLAG 0x80
253
254/* Voyager PSI defines */
255#define VOYAGER_PSI_STATUS_REG 0x08
256# define PSI_DC_FAIL 0x01
257# define PSI_MON 0x02
258# define PSI_FAULT 0x04
259# define PSI_ALARM 0x08
260# define PSI_CURRENT 0x10
261# define PSI_DVM 0x20
262# define PSI_PSCFAULT 0x40
263# define PSI_STAT_CHG 0x80
264
265#define VOYAGER_PSI_SUPPLY_REG 0x8000
266 /* read */
267# define PSI_FAIL_DC 0x01
268# define PSI_FAIL_AC 0x02
269# define PSI_MON_INT 0x04
270# define PSI_SWITCH_OFF 0x08
271# define PSI_HX_OFF 0x10
272# define PSI_SECURITY 0x20
273# define PSI_CMOS_BATT_LOW 0x40
274# define PSI_CMOS_BATT_FAIL 0x80
275 /* write */
276# define PSI_CLR_SWITCH_OFF 0x13
277# define PSI_CLR_HX_OFF 0x14
278# define PSI_CLR_CMOS_BATT_FAIL 0x17
279
280#define VOYAGER_PSI_MASK 0x8001
281# define PSI_MASK_MASK 0x10
282
283#define VOYAGER_PSI_AC_FAIL_REG 0x8004
284#define AC_FAIL_STAT_CHANGE 0x80
285
286#define VOYAGER_PSI_GENERAL_REG 0x8007
287 /* read */
288# define PSI_SWITCH_ON 0x01
289# define PSI_SWITCH_ENABLED 0x02
290# define PSI_ALARM_ENABLED 0x08
291# define PSI_SECURE_ENABLED 0x10
292# define PSI_COLD_RESET 0x20
293# define PSI_COLD_START 0x80
294 /* write */
295# define PSI_POWER_DOWN 0x10
296# define PSI_SWITCH_DISABLE 0x01
297# define PSI_SWITCH_ENABLE 0x11
298# define PSI_CLEAR 0x12
299# define PSI_ALARM_DISABLE 0x03
300# define PSI_ALARM_ENABLE 0x13
301# define PSI_CLEAR_COLD_RESET 0x05
302# define PSI_SET_COLD_RESET 0x15
303# define PSI_CLEAR_COLD_START 0x07
304# define PSI_SET_COLD_START 0x17
305
306
307
308struct voyager_bios_info {
309 __u8 len;
310 __u8 major;
311 __u8 minor;
312 __u8 debug;
313 __u8 num_classes;
314 __u8 class_1;
315 __u8 class_2;
316};
317
318/* The following structures and definitions are for the Kernel/SUS
319 * interface these are needed to find out how SUS initialised any Quad
320 * boards in the system */
321
322#define NUMBER_OF_MC_BUSSES 2
323#define SLOTS_PER_MC_BUS 8
324#define MAX_CPUS 16 /* 16 way CPU system */
325#define MAX_PROCESSOR_BOARDS 4 /* 4 processor slot system */
326#define MAX_CACHE_LEVELS 4 /* # of cache levels supported */
327#define MAX_SHARED_CPUS 4 /* # of CPUs that can share a LARC */
328#define NUMBER_OF_POS_REGS 8
329
330typedef struct {
331 __u8 MC_Slot;
332 __u8 POS_Values[NUMBER_OF_POS_REGS];
333} __attribute__((packed)) MC_SlotInformation_t;
334
335struct QuadDescription {
336 __u8 Type; /* for type 0 (DYADIC or MONADIC) all fields
337 * will be zero except for slot */
338 __u8 StructureVersion;
339 __u32 CPI_BaseAddress;
340 __u32 LARC_BankSize;
341 __u32 LocalMemoryStateBits;
342 __u8 Slot; /* Processor slots 1 - 4 */
343} __attribute__((packed));
344
345struct ProcBoardInfo {
346 __u8 Type;
347 __u8 StructureVersion;
348 __u8 NumberOfBoards;
349 struct QuadDescription QuadData[MAX_PROCESSOR_BOARDS];
350} __attribute__((packed));
351
352struct CacheDescription {
353 __u8 Level;
354 __u32 TotalSize;
355 __u16 LineSize;
356 __u8 Associativity;
357 __u8 CacheType;
358 __u8 WriteType;
359 __u8 Number_CPUs_SharedBy;
360 __u8 Shared_CPUs_Hardware_IDs[MAX_SHARED_CPUS];
361
362} __attribute__((packed));
363
364struct CPU_Description {
365 __u8 CPU_HardwareId;
366 char *FRU_String;
367 __u8 NumberOfCacheLevels;
368 struct CacheDescription CacheLevelData[MAX_CACHE_LEVELS];
369} __attribute__((packed));
370
371struct CPU_Info {
372 __u8 Type;
373 __u8 StructureVersion;
374 __u8 NumberOf_CPUs;
375 struct CPU_Description CPU_Data[MAX_CPUS];
376} __attribute__((packed));
377
378
379/*
380 * This structure will be used by SUS and the OS.
381 * The assumption about this structure is that no blank space is
382 * packed in it by our friend the compiler.
383 */
384typedef struct {
385 __u8 Mailbox_SUS; /* Written to by SUS to give
386 commands/response to the OS */
387 __u8 Mailbox_OS; /* Written to by the OS to give
388 commands/response to SUS */
389 __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the
390 interface SUS supports */
391 __u8 OS_MailboxVersion; /* Tells SUS which iteration of the
392 interface the OS supports */
393 __u32 OS_Flags; /* Flags set by the OS as info for
394 SUS */
395 __u32 SUS_Flags; /* Flags set by SUS as info
396 for the OS */
397 __u32 WatchDogPeriod; /* Watchdog period (in seconds) which
398 the DP uses to see if the OS
399 is dead */
400 __u32 WatchDogCount; /* Updated by the OS on every tic. */
401 __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS
402 where to stuff the SUS error log
403 on a dump */
404 MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS];
405 /* Storage for MCA POS data */
406 /* All new SECOND_PASS_INTERFACE fields added from this point */
407 struct ProcBoardInfo *BoardData;
408 struct CPU_Info *CPU_Data;
409 /* All new fields must be added from this point */
410} Voyager_KernelSUS_Mbox_t;
411
412/* structure for finding the right memory address to send a QIC CPI to */
413struct voyager_qic_cpi {
414 /* Each cache line (32 bytes) can trigger a cpi. The cpi
415 * read/write may occur anywhere in the cache line---pick the
416 * middle to be safe */
417 struct {
418 __u32 pad1[3];
419 __u32 cpi;
420 __u32 pad2[4];
421 } qic_cpi[8];
422};
423
424struct voyager_status {
425 __u32 power_fail:1;
426 __u32 switch_off:1;
427 __u32 request_from_kernel:1;
428};
429
430struct voyager_psi_regs {
431 __u8 cat_id;
432 __u8 cat_dev;
433 __u8 cat_control;
434 __u8 subaddr;
435 __u8 dummy4;
436 __u8 checkbit;
437 __u8 subaddr_low;
438 __u8 subaddr_high;
439 __u8 intstatus;
440 __u8 stat1;
441 __u8 stat3;
442 __u8 fault;
443 __u8 tms;
444 __u8 gen;
445 __u8 sysconf;
446 __u8 dummy15;
447};
448
449struct voyager_psi_subregs {
450 __u8 supply;
451 __u8 mask;
452 __u8 present;
453 __u8 DCfail;
454 __u8 ACfail;
455 __u8 fail;
456 __u8 UPSfail;
457 __u8 genstatus;
458};
459
460struct voyager_psi {
461 struct voyager_psi_regs regs;
462 struct voyager_psi_subregs subregs;
463};
464
465struct voyager_SUS {
466#define VOYAGER_DUMP_BUTTON_NMI 0x1
467#define VOYAGER_SUS_VALID 0x2
468#define VOYAGER_SYSINT_COMPLETE 0x3
469 __u8 SUS_mbox;
470#define VOYAGER_NO_COMMAND 0x0
471#define VOYAGER_IGNORE_DUMP 0x1
472#define VOYAGER_DO_DUMP 0x2
473#define VOYAGER_SYSINT_HANDSHAKE 0x3
474#define VOYAGER_DO_MEM_DUMP 0x4
475#define VOYAGER_SYSINT_WAS_RECOVERED 0x5
476 __u8 kernel_mbox;
477#define VOYAGER_MAILBOX_VERSION 0x10
478 __u8 SUS_version;
479 __u8 kernel_version;
480#define VOYAGER_OS_HAS_SYSINT 0x1
481#define VOYAGER_OS_IN_PROGRESS 0x2
482#define VOYAGER_UPDATING_WDPERIOD 0x4
483 __u32 kernel_flags;
484#define VOYAGER_SUS_BOOTING 0x1
485#define VOYAGER_SUS_IN_PROGRESS 0x2
486 __u32 SUS_flags;
487 __u32 watchdog_period;
488 __u32 watchdog_count;
489 __u32 SUS_errorlog;
490 /* lots of system configuration stuff under here */
491};
492
493/* Variables exported by voyager_smp */
494extern __u32 voyager_extended_vic_processors;
495extern __u32 voyager_allowed_boot_processors;
496extern __u32 voyager_quad_processors;
497extern struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS];
498extern struct voyager_SUS *voyager_SUS;
499
500/* variables exported always */
501extern struct task_struct *voyager_thread;
502extern int voyager_level;
503extern struct voyager_status voyager_status;
504
505/* functions exported by the voyager and voyager_smp modules */
506extern int voyager_cat_readb(__u8 module, __u8 asic, int reg);
507extern void voyager_cat_init(void);
508extern void voyager_detect(struct voyager_bios_info *);
509extern void voyager_trap_init(void);
510extern void voyager_setup_irqs(void);
511extern int voyager_memory_detect(int region, __u32 *addr, __u32 *length);
512extern void voyager_smp_intr_init(void);
513extern __u8 voyager_extended_cmos_read(__u16 cmos_address);
514extern void voyager_smp_dump(void);
515extern void voyager_timer_interrupt(void);
516extern void smp_local_timer_interrupt(void);
517extern void voyager_power_off(void);
518extern void smp_voyager_power_off(void *dummy);
519extern void voyager_restart(void);
520extern void voyager_cat_power_off(void);
521extern void voyager_cat_do_common_interrupt(void);
522extern void voyager_handle_nmi(void);
523extern void voyager_smp_intr_init(void);
524/* Commands for the following are */
525#define VOYAGER_PSI_READ 0
526#define VOYAGER_PSI_WRITE 1
527#define VOYAGER_PSI_SUBREAD 2
528#define VOYAGER_PSI_SUBWRITE 3
529extern void voyager_cat_psi(__u8, __u16, __u8 *);
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index 19144184983a..1df35417c412 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -15,10 +15,4 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
15 return raw_irqs_disabled_flags(regs->flags); 15 return raw_irqs_disabled_flags(regs->flags);
16} 16}
17 17
18static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
19{
20 regs->orig_ax = ~irq;
21 do_IRQ(regs);
22}
23
24#endif /* _ASM_X86_XEN_EVENTS_H */ 18#endif /* _ASM_X86_XEN_EVENTS_H */
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 5e79ca694326..9c371e4a9fa6 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -296,6 +296,8 @@ HYPERVISOR_get_debugreg(int reg)
296static inline int 296static inline int
297HYPERVISOR_update_descriptor(u64 ma, u64 desc) 297HYPERVISOR_update_descriptor(u64 ma, u64 desc)
298{ 298{
299 if (sizeof(u64) == sizeof(long))
300 return _hypercall2(int, update_descriptor, ma, desc);
299 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); 301 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
300} 302}
301 303
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 81fbd735aec4..d5b7e90c0edf 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -38,22 +38,30 @@ extern struct shared_info *HYPERVISOR_shared_info;
38extern struct start_info *xen_start_info; 38extern struct start_info *xen_start_info;
39 39
40enum xen_domain_type { 40enum xen_domain_type {
41 XEN_NATIVE, 41 XEN_NATIVE, /* running on bare hardware */
42 XEN_PV_DOMAIN, 42 XEN_PV_DOMAIN, /* running in a PV domain */
43 XEN_HVM_DOMAIN, 43 XEN_HVM_DOMAIN, /* running in a Xen hvm domain */
44}; 44};
45 45
46extern enum xen_domain_type xen_domain_type;
47
48#ifdef CONFIG_XEN 46#ifdef CONFIG_XEN
49#define xen_domain() (xen_domain_type != XEN_NATIVE) 47extern enum xen_domain_type xen_domain_type;
50#else 48#else
51#define xen_domain() (0) 49#define xen_domain_type XEN_NATIVE
52#endif 50#endif
53 51
54#define xen_pv_domain() (xen_domain() && xen_domain_type == XEN_PV_DOMAIN) 52#define xen_domain() (xen_domain_type != XEN_NATIVE)
55#define xen_hvm_domain() (xen_domain() && xen_domain_type == XEN_HVM_DOMAIN) 53#define xen_pv_domain() (xen_domain() && \
54 xen_domain_type == XEN_PV_DOMAIN)
55#define xen_hvm_domain() (xen_domain() && \
56 xen_domain_type == XEN_HVM_DOMAIN)
57
58#ifdef CONFIG_XEN_DOM0
59#include <xen/interface/xen.h>
56 60
57#define xen_initial_domain() (xen_pv_domain() && xen_start_info->flags & SIF_INITDOMAIN) 61#define xen_initial_domain() (xen_pv_domain() && \
62 xen_start_info->flags & SIF_INITDOMAIN)
63#else /* !CONFIG_XEN_DOM0 */
64#define xen_initial_domain() (0)
65#endif /* CONFIG_XEN_DOM0 */
58 66
59#endif /* _ASM_X86_XEN_HYPERVISOR_H */ 67#endif /* _ASM_X86_XEN_HYPERVISOR_H */
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 4bd990ee43df..1a918dde46b5 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -164,6 +164,7 @@ static inline pte_t __pte_ma(pteval_t x)
164 164
165 165
166xmaddr_t arbitrary_virt_to_machine(void *address); 166xmaddr_t arbitrary_virt_to_machine(void *address);
167unsigned long arbitrary_virt_to_mfn(void *vaddr);
167void make_lowmem_page_readonly(void *vaddr); 168void make_lowmem_page_readonly(void *vaddr);
168void make_lowmem_page_readwrite(void *vaddr); 169void make_lowmem_page_readwrite(void *vaddr);
169 170