aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-28 18:07:55 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-28 18:07:55 -0400
commitcb28a1bbdb4790378e7366d6c9ee1d2340b84f92 (patch)
tree316436f77dac75335fd2c3ef5f109e71606c50d3 /include/asm-x86
parentb6d4f7e3ef25beb8c658c97867d98883e69dc544 (diff)
parentf934fb19ef34730263e6afc01e8ec27a8a71470f (diff)
Merge branch 'linus' into core/generic-dma-coherent
Conflicts: arch/x86/Kconfig Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/Kbuild2
-rw-r--r--include/asm-x86/amd_iommu_types.h114
-rw-r--r--include/asm-x86/apic.h28
-rw-r--r--include/asm-x86/arch_hooks.h1
-rw-r--r--include/asm-x86/bitops.h2
-rw-r--r--include/asm-x86/calling.h6
-rw-r--r--include/asm-x86/cpufeature.h1
-rw-r--r--include/asm-x86/device.h3
-rw-r--r--include/asm-x86/dma-mapping.h100
-rw-r--r--include/asm-x86/e820.h8
-rw-r--r--include/asm-x86/fixmap_32.h6
-rw-r--r--include/asm-x86/ftrace.h2
-rw-r--r--include/asm-x86/gart.h1
-rw-r--r--include/asm-x86/gpio.h52
-rw-r--r--include/asm-x86/hugetlb.h10
-rw-r--r--include/asm-x86/i387.h54
-rw-r--r--include/asm-x86/ide.h65
-rw-r--r--include/asm-x86/io_32.h2
-rw-r--r--include/asm-x86/io_64.h2
-rw-r--r--include/asm-x86/iommu.h11
-rw-r--r--include/asm-x86/ipi.h2
-rw-r--r--include/asm-x86/kexec.h18
-rw-r--r--include/asm-x86/kvm.h1
-rw-r--r--include/asm-x86/kvm_host.h72
-rw-r--r--include/asm-x86/kvm_x86_emulate.h11
-rw-r--r--include/asm-x86/mach-bigsmp/mach_apic.h4
-rw-r--r--include/asm-x86/mach-default/mach_apic.h4
-rw-r--r--include/asm-x86/mach-default/smpboot_hooks.h6
-rw-r--r--include/asm-x86/mach-es7000/mach_apic.h4
-rw-r--r--include/asm-x86/mach-generic/mach_mpspec.h2
-rw-r--r--include/asm-x86/mach-summit/mach_apic.h6
-rw-r--r--include/asm-x86/mach-visws/entry_arch.h5
-rw-r--r--include/asm-x86/mach-visws/mach_apic.h1
-rw-r--r--include/asm-x86/mach-visws/mach_apicdef.h1
-rw-r--r--include/asm-x86/mach-visws/setup_arch.h1
-rw-r--r--include/asm-x86/mach-visws/smpboot_hooks.h1
-rw-r--r--include/asm-x86/namei.h11
-rw-r--r--include/asm-x86/page.h17
-rw-r--r--include/asm-x86/paravirt.h78
-rw-r--r--include/asm-x86/percpu.h26
-rw-r--r--include/asm-x86/pgtable-3level.h8
-rw-r--r--include/asm-x86/pgtable.h31
-rw-r--r--include/asm-x86/pgtable_32.h19
-rw-r--r--include/asm-x86/pgtable_64.h12
-rw-r--r--include/asm-x86/processor-flags.h2
-rw-r--r--include/asm-x86/processor.h4
-rw-r--r--include/asm-x86/ptrace-abi.h6
-rw-r--r--include/asm-x86/segment.h9
-rw-r--r--include/asm-x86/semaphore.h1
-rw-r--r--include/asm-x86/setup.h30
-rw-r--r--include/asm-x86/signal.h4
-rw-r--r--include/asm-x86/smp.h2
-rw-r--r--include/asm-x86/spinlock.h118
-rw-r--r--include/asm-x86/spinlock_types.h2
-rw-r--r--include/asm-x86/swiotlb.h8
-rw-r--r--include/asm-x86/thread_info.h25
-rw-r--r--include/asm-x86/traps.h66
-rw-r--r--include/asm-x86/uaccess.h1
-rw-r--r--include/asm-x86/unistd_32.h6
-rw-r--r--include/asm-x86/unistd_64.h14
-rw-r--r--include/asm-x86/uv/bios.h68
-rw-r--r--include/asm-x86/vdso.h8
-rw-r--r--include/asm-x86/xen/events.h1
-rw-r--r--include/asm-x86/xen/hypercall.h263
-rw-r--r--include/asm-x86/xen/interface.h139
-rw-r--r--include/asm-x86/xen/interface_32.h97
-rw-r--r--include/asm-x86/xen/interface_64.h159
-rw-r--r--include/asm-x86/xen/page.h8
68 files changed, 1327 insertions, 525 deletions
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild
index 1e3554596f72..4a8e80cdcfa5 100644
--- a/include/asm-x86/Kbuild
+++ b/include/asm-x86/Kbuild
@@ -3,7 +3,6 @@ include include/asm-generic/Kbuild.asm
3header-y += boot.h 3header-y += boot.h
4header-y += bootparam.h 4header-y += bootparam.h
5header-y += debugreg.h 5header-y += debugreg.h
6header-y += kvm.h
7header-y += ldt.h 6header-y += ldt.h
8header-y += msr-index.h 7header-y += msr-index.h
9header-y += prctl.h 8header-y += prctl.h
@@ -19,7 +18,6 @@ unifdef-y += msr.h
19unifdef-y += mtrr.h 18unifdef-y += mtrr.h
20unifdef-y += posix_types_32.h 19unifdef-y += posix_types_32.h
21unifdef-y += posix_types_64.h 20unifdef-y += posix_types_64.h
22unifdef-y += ptrace.h
23unifdef-y += unistd_32.h 21unifdef-y += unistd_32.h
24unifdef-y += unistd_64.h 22unifdef-y += unistd_64.h
25unifdef-y += vm86.h 23unifdef-y += vm86.h
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h
index 7bfcb47cc452..22aa58ca1991 100644
--- a/include/asm-x86/amd_iommu_types.h
+++ b/include/asm-x86/amd_iommu_types.h
@@ -27,13 +27,12 @@
27/* 27/*
28 * some size calculation constants 28 * some size calculation constants
29 */ 29 */
30#define DEV_TABLE_ENTRY_SIZE 256 30#define DEV_TABLE_ENTRY_SIZE 32
31#define ALIAS_TABLE_ENTRY_SIZE 2 31#define ALIAS_TABLE_ENTRY_SIZE 2
32#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) 32#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
33 33
34/* helper macros */ 34/* helper macros */
35#define LOW_U32(x) ((x) & ((1ULL << 32)-1)) 35#define LOW_U32(x) ((x) & ((1ULL << 32)-1))
36#define HIGH_U32(x) (LOW_U32((x) >> 32))
37 36
38/* Length of the MMIO region for the AMD IOMMU */ 37/* Length of the MMIO region for the AMD IOMMU */
39#define MMIO_REGION_LENGTH 0x4000 38#define MMIO_REGION_LENGTH 0x4000
@@ -158,78 +157,170 @@
158 157
159#define MAX_DOMAIN_ID 65536 158#define MAX_DOMAIN_ID 65536
160 159
160/*
161 * This structure contains generic data for IOMMU protection domains
162 * independent of their use.
163 */
161struct protection_domain { 164struct protection_domain {
162 spinlock_t lock; 165 spinlock_t lock; /* mostly used to lock the page table*/
163 u16 id; 166 u16 id; /* the domain id written to the device table */
164 int mode; 167 int mode; /* paging mode (0-6 levels) */
165 u64 *pt_root; 168 u64 *pt_root; /* page table root pointer */
166 void *priv; 169 void *priv; /* private data */
167}; 170};
168 171
172/*
173 * Data container for a dma_ops specific protection domain
174 */
169struct dma_ops_domain { 175struct dma_ops_domain {
170 struct list_head list; 176 struct list_head list;
177
178 /* generic protection domain information */
171 struct protection_domain domain; 179 struct protection_domain domain;
180
181 /* size of the aperture for the mappings */
172 unsigned long aperture_size; 182 unsigned long aperture_size;
183
184 /* address we start to search for free addresses */
173 unsigned long next_bit; 185 unsigned long next_bit;
186
187 /* address allocation bitmap */
174 unsigned long *bitmap; 188 unsigned long *bitmap;
189
190 /*
191 * Array of PTE pages for the aperture. In this array we save all the
192 * leaf pages of the domain page table used for the aperture. This way
193 * we don't need to walk the page table to find a specific PTE. We can
194 * just calculate its address in constant time.
195 */
175 u64 **pte_pages; 196 u64 **pte_pages;
176}; 197};
177 198
199/*
200 * Structure where we save information about one hardware AMD IOMMU in the
201 * system.
202 */
178struct amd_iommu { 203struct amd_iommu {
179 struct list_head list; 204 struct list_head list;
205
206 /* locks the accesses to the hardware */
180 spinlock_t lock; 207 spinlock_t lock;
181 208
209 /* device id of this IOMMU */
182 u16 devid; 210 u16 devid;
211 /*
212 * Capability pointer. There could be more than one IOMMU per PCI
213 * device function if there are more than one AMD IOMMU capability
214 * pointers.
215 */
183 u16 cap_ptr; 216 u16 cap_ptr;
184 217
218 /* physical address of MMIO space */
185 u64 mmio_phys; 219 u64 mmio_phys;
220 /* virtual address of MMIO space */
186 u8 *mmio_base; 221 u8 *mmio_base;
222
223 /* capabilities of that IOMMU read from ACPI */
187 u32 cap; 224 u32 cap;
225
226 /* first device this IOMMU handles. read from PCI */
188 u16 first_device; 227 u16 first_device;
228 /* last device this IOMMU handles. read from PCI */
189 u16 last_device; 229 u16 last_device;
230
231 /* start of exclusion range of that IOMMU */
190 u64 exclusion_start; 232 u64 exclusion_start;
233 /* length of exclusion range of that IOMMU */
191 u64 exclusion_length; 234 u64 exclusion_length;
192 235
236 /* command buffer virtual address */
193 u8 *cmd_buf; 237 u8 *cmd_buf;
238 /* size of command buffer */
194 u32 cmd_buf_size; 239 u32 cmd_buf_size;
195 240
241 /* if one, we need to send a completion wait command */
196 int need_sync; 242 int need_sync;
197 243
244 /* default dma_ops domain for that IOMMU */
198 struct dma_ops_domain *default_dom; 245 struct dma_ops_domain *default_dom;
199}; 246};
200 247
248/*
249 * List with all IOMMUs in the system. This list is not locked because it is
250 * only written and read at driver initialization or suspend time
251 */
201extern struct list_head amd_iommu_list; 252extern struct list_head amd_iommu_list;
202 253
254/*
255 * Structure defining one entry in the device table
256 */
203struct dev_table_entry { 257struct dev_table_entry {
204 u32 data[8]; 258 u32 data[8];
205}; 259};
206 260
261/*
262 * One entry for unity mappings parsed out of the ACPI table.
263 */
207struct unity_map_entry { 264struct unity_map_entry {
208 struct list_head list; 265 struct list_head list;
266
267 /* starting device id this entry is used for (including) */
209 u16 devid_start; 268 u16 devid_start;
269 /* end device id this entry is used for (including) */
210 u16 devid_end; 270 u16 devid_end;
271
272 /* start address to unity map (including) */
211 u64 address_start; 273 u64 address_start;
274 /* end address to unity map (including) */
212 u64 address_end; 275 u64 address_end;
276
277 /* required protection */
213 int prot; 278 int prot;
214}; 279};
215 280
281/*
282 * List of all unity mappings. It is not locked because as runtime it is only
283 * read. It is created at ACPI table parsing time.
284 */
216extern struct list_head amd_iommu_unity_map; 285extern struct list_head amd_iommu_unity_map;
217 286
218/* data structures for device handling */ 287/*
288 * Data structures for device handling
289 */
290
291/*
292 * Device table used by hardware. Read and write accesses by software are
293 * locked with the amd_iommu_pd_table lock.
294 */
219extern struct dev_table_entry *amd_iommu_dev_table; 295extern struct dev_table_entry *amd_iommu_dev_table;
296
297/*
298 * Alias table to find requestor ids to device ids. Not locked because only
299 * read on runtime.
300 */
220extern u16 *amd_iommu_alias_table; 301extern u16 *amd_iommu_alias_table;
302
303/*
304 * Reverse lookup table to find the IOMMU which translates a specific device.
305 */
221extern struct amd_iommu **amd_iommu_rlookup_table; 306extern struct amd_iommu **amd_iommu_rlookup_table;
222 307
308/* size of the dma_ops aperture as power of 2 */
223extern unsigned amd_iommu_aperture_order; 309extern unsigned amd_iommu_aperture_order;
224 310
311/* largest PCI device id we expect translation requests for */
225extern u16 amd_iommu_last_bdf; 312extern u16 amd_iommu_last_bdf;
226 313
227/* data structures for protection domain handling */ 314/* data structures for protection domain handling */
228extern struct protection_domain **amd_iommu_pd_table; 315extern struct protection_domain **amd_iommu_pd_table;
316
317/* allocation bitmap for domain ids */
229extern unsigned long *amd_iommu_pd_alloc_bitmap; 318extern unsigned long *amd_iommu_pd_alloc_bitmap;
230 319
320/* will be 1 if device isolation is enabled */
231extern int amd_iommu_isolate; 321extern int amd_iommu_isolate;
232 322
323/* takes a PCI device id and prints it out in a readable form */
233static inline void print_devid(u16 devid, int nl) 324static inline void print_devid(u16 devid, int nl)
234{ 325{
235 int bus = devid >> 8; 326 int bus = devid >> 8;
@@ -241,4 +332,11 @@ static inline void print_devid(u16 devid, int nl)
241 printk("\n"); 332 printk("\n");
242} 333}
243 334
335/* takes bus and device/function and returns the device id
336 * FIXME: should that be in generic PCI code? */
337static inline u16 calc_devid(u8 bus, u8 devfn)
338{
339 return (((u16)bus) << 8) | devfn;
340}
341
244#endif 342#endif
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index 4e2c1e517f06..133c998161ca 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -3,6 +3,8 @@
3 3
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <linux/delay.h> 5#include <linux/delay.h>
6
7#include <asm/alternative.h>
6#include <asm/fixmap.h> 8#include <asm/fixmap.h>
7#include <asm/apicdef.h> 9#include <asm/apicdef.h>
8#include <asm/processor.h> 10#include <asm/processor.h>
@@ -10,8 +12,6 @@
10 12
11#define ARCH_APICTIMER_STOPS_ON_C3 1 13#define ARCH_APICTIMER_STOPS_ON_C3 1
12 14
13#define Dprintk(x...)
14
15/* 15/*
16 * Debugging macros 16 * Debugging macros
17 */ 17 */
@@ -35,7 +35,7 @@ extern void generic_apic_probe(void);
35 35
36#ifdef CONFIG_X86_LOCAL_APIC 36#ifdef CONFIG_X86_LOCAL_APIC
37 37
38extern int apic_verbosity; 38extern unsigned int apic_verbosity;
39extern int local_apic_timer_c2_ok; 39extern int local_apic_timer_c2_ok;
40 40
41extern int ioapic_force; 41extern int ioapic_force;
@@ -48,7 +48,6 @@ extern int disable_apic;
48#include <asm/paravirt.h> 48#include <asm/paravirt.h>
49#else 49#else
50#define apic_write native_apic_write 50#define apic_write native_apic_write
51#define apic_write_atomic native_apic_write_atomic
52#define apic_read native_apic_read 51#define apic_read native_apic_read
53#define setup_boot_clock setup_boot_APIC_clock 52#define setup_boot_clock setup_boot_APIC_clock
54#define setup_secondary_clock setup_secondary_APIC_clock 53#define setup_secondary_clock setup_secondary_APIC_clock
@@ -58,12 +57,11 @@ extern int is_vsmp_box(void);
58 57
59static inline void native_apic_write(unsigned long reg, u32 v) 58static inline void native_apic_write(unsigned long reg, u32 v)
60{ 59{
61 *((volatile u32 *)(APIC_BASE + reg)) = v; 60 volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
62}
63 61
64static inline void native_apic_write_atomic(unsigned long reg, u32 v) 62 alternative_io("movl %0, %1", "xchgl %0, %1", X86_FEATURE_11AP,
65{ 63 ASM_OUTPUT2("=r" (v), "=m" (*addr)),
66 (void)xchg((u32 *)(APIC_BASE + reg), v); 64 ASM_OUTPUT2("0" (v), "m" (*addr)));
67} 65}
68 66
69static inline u32 native_apic_read(unsigned long reg) 67static inline u32 native_apic_read(unsigned long reg)
@@ -75,16 +73,6 @@ extern void apic_wait_icr_idle(void);
75extern u32 safe_apic_wait_icr_idle(void); 73extern u32 safe_apic_wait_icr_idle(void);
76extern int get_physical_broadcast(void); 74extern int get_physical_broadcast(void);
77 75
78#ifdef CONFIG_X86_GOOD_APIC
79# define FORCE_READ_AROUND_WRITE 0
80# define apic_read_around(x)
81# define apic_write_around(x, y) apic_write((x), (y))
82#else
83# define FORCE_READ_AROUND_WRITE 1
84# define apic_read_around(x) apic_read(x)
85# define apic_write_around(x, y) apic_write_atomic((x), (y))
86#endif
87
88static inline void ack_APIC_irq(void) 76static inline void ack_APIC_irq(void)
89{ 77{
90 /* 78 /*
@@ -95,7 +83,7 @@ static inline void ack_APIC_irq(void)
95 */ 83 */
96 84
97 /* Docs say use 0 for future compatibility */ 85 /* Docs say use 0 for future compatibility */
98 apic_write_around(APIC_EOI, 0); 86 apic_write(APIC_EOI, 0);
99} 87}
100 88
101extern int lapic_get_maxlvt(void); 89extern int lapic_get_maxlvt(void);
diff --git a/include/asm-x86/arch_hooks.h b/include/asm-x86/arch_hooks.h
index 768aee8a04ef..8411750ceb63 100644
--- a/include/asm-x86/arch_hooks.h
+++ b/include/asm-x86/arch_hooks.h
@@ -21,6 +21,7 @@ extern void intr_init_hook(void);
21extern void pre_intr_init_hook(void); 21extern void pre_intr_init_hook(void);
22extern void pre_setup_arch_hook(void); 22extern void pre_setup_arch_hook(void);
23extern void trap_init_hook(void); 23extern void trap_init_hook(void);
24extern void pre_time_init_hook(void);
24extern void time_init_hook(void); 25extern void time_init_hook(void);
25extern void mca_nmi_hook(void); 26extern void mca_nmi_hook(void);
26 27
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index 96b1829cea15..cfb2b64f76e7 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -356,7 +356,7 @@ static inline unsigned long ffz(unsigned long word)
356 * __fls: find last set bit in word 356 * __fls: find last set bit in word
357 * @word: The word to search 357 * @word: The word to search
358 * 358 *
359 * Undefined if no zero exists, so code should check against ~0UL first. 359 * Undefined if no set bit exists, so code should check against 0 first.
360 */ 360 */
361static inline unsigned long __fls(unsigned long word) 361static inline unsigned long __fls(unsigned long word)
362{ 362{
diff --git a/include/asm-x86/calling.h b/include/asm-x86/calling.h
index f13e62e2cb3e..2bc162e0ec6e 100644
--- a/include/asm-x86/calling.h
+++ b/include/asm-x86/calling.h
@@ -104,7 +104,7 @@
104 .endif 104 .endif
105 .endm 105 .endm
106 106
107 .macro LOAD_ARGS offset 107 .macro LOAD_ARGS offset, skiprax=0
108 movq \offset(%rsp), %r11 108 movq \offset(%rsp), %r11
109 movq \offset+8(%rsp), %r10 109 movq \offset+8(%rsp), %r10
110 movq \offset+16(%rsp), %r9 110 movq \offset+16(%rsp), %r9
@@ -113,7 +113,10 @@
113 movq \offset+48(%rsp), %rdx 113 movq \offset+48(%rsp), %rdx
114 movq \offset+56(%rsp), %rsi 114 movq \offset+56(%rsp), %rsi
115 movq \offset+64(%rsp), %rdi 115 movq \offset+64(%rsp), %rdi
116 .if \skiprax
117 .else
116 movq \offset+72(%rsp), %rax 118 movq \offset+72(%rsp), %rax
119 .endif
117 .endm 120 .endm
118 121
119#define REST_SKIP 6*8 122#define REST_SKIP 6*8
@@ -165,4 +168,3 @@
165 .macro icebp 168 .macro icebp
166 .byte 0xf1 169 .byte 0xf1
167 .endm 170 .endm
168
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index 75ef959db329..2f5a792b0acc 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -79,6 +79,7 @@
79#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ 79#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */
80#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ 80#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */
81#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ 81#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */
82#define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */
82 83
83/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 84/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
84#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 85#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h
index 87a715367a1b..3c034f48fdb0 100644
--- a/include/asm-x86/device.h
+++ b/include/asm-x86/device.h
@@ -5,6 +5,9 @@ struct dev_archdata {
5#ifdef CONFIG_ACPI 5#ifdef CONFIG_ACPI
6 void *acpi_handle; 6 void *acpi_handle;
7#endif 7#endif
8#ifdef CONFIG_X86_64
9struct dma_mapping_ops *dma_ops;
10#endif
8#ifdef CONFIG_DMAR 11#ifdef CONFIG_DMAR
9 void *iommu; /* hook for IOMMU specific extension */ 12 void *iommu; /* hook for IOMMU specific extension */
10#endif 13#endif
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
index c68f360ef564..ad9cd6d49bfc 100644
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@ -14,11 +14,11 @@ extern dma_addr_t bad_dma_address;
14extern int iommu_merge; 14extern int iommu_merge;
15extern struct device fallback_dev; 15extern struct device fallback_dev;
16extern int panic_on_overflow; 16extern int panic_on_overflow;
17extern int forbid_dac;
18extern int force_iommu; 17extern int force_iommu;
19 18
20struct dma_mapping_ops { 19struct dma_mapping_ops {
21 int (*mapping_error)(dma_addr_t dma_addr); 20 int (*mapping_error)(struct device *dev,
21 dma_addr_t dma_addr);
22 void* (*alloc_coherent)(struct device *dev, size_t size, 22 void* (*alloc_coherent)(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, gfp_t gfp); 23 dma_addr_t *dma_handle, gfp_t gfp);
24 void (*free_coherent)(struct device *dev, size_t size, 24 void (*free_coherent)(struct device *dev, size_t size,
@@ -57,14 +57,32 @@ struct dma_mapping_ops {
57 int is_phys; 57 int is_phys;
58}; 58};
59 59
60extern const struct dma_mapping_ops *dma_ops; 60extern struct dma_mapping_ops *dma_ops;
61 61
62static inline int dma_mapping_error(dma_addr_t dma_addr) 62static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
63{ 63{
64 if (dma_ops->mapping_error) 64#ifdef CONFIG_X86_32
65 return dma_ops->mapping_error(dma_addr); 65 return dma_ops;
66#else
67 if (unlikely(!dev) || !dev->archdata.dma_ops)
68 return dma_ops;
69 else
70 return dev->archdata.dma_ops;
71#endif
72}
73
74/* Make sure we keep the same behaviour */
75static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
76{
77#ifdef CONFIG_X86_32
78 return 0;
79#else
80 struct dma_mapping_ops *ops = get_dma_ops(dev);
81 if (ops->mapping_error)
82 return ops->mapping_error(dev, dma_addr);
66 83
67 return (dma_addr == bad_dma_address); 84 return (dma_addr == bad_dma_address);
85#endif
68} 86}
69 87
70#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 88#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
@@ -84,44 +102,53 @@ static inline dma_addr_t
84dma_map_single(struct device *hwdev, void *ptr, size_t size, 102dma_map_single(struct device *hwdev, void *ptr, size_t size,
85 int direction) 103 int direction)
86{ 104{
105 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
106
87 BUG_ON(!valid_dma_direction(direction)); 107 BUG_ON(!valid_dma_direction(direction));
88 return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction); 108 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
89} 109}
90 110
91static inline void 111static inline void
92dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 112dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
93 int direction) 113 int direction)
94{ 114{
115 struct dma_mapping_ops *ops = get_dma_ops(dev);
116
95 BUG_ON(!valid_dma_direction(direction)); 117 BUG_ON(!valid_dma_direction(direction));
96 if (dma_ops->unmap_single) 118 if (ops->unmap_single)
97 dma_ops->unmap_single(dev, addr, size, direction); 119 ops->unmap_single(dev, addr, size, direction);
98} 120}
99 121
100static inline int 122static inline int
101dma_map_sg(struct device *hwdev, struct scatterlist *sg, 123dma_map_sg(struct device *hwdev, struct scatterlist *sg,
102 int nents, int direction) 124 int nents, int direction)
103{ 125{
126 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
127
104 BUG_ON(!valid_dma_direction(direction)); 128 BUG_ON(!valid_dma_direction(direction));
105 return dma_ops->map_sg(hwdev, sg, nents, direction); 129 return ops->map_sg(hwdev, sg, nents, direction);
106} 130}
107 131
108static inline void 132static inline void
109dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 133dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
110 int direction) 134 int direction)
111{ 135{
136 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
137
112 BUG_ON(!valid_dma_direction(direction)); 138 BUG_ON(!valid_dma_direction(direction));
113 if (dma_ops->unmap_sg) 139 if (ops->unmap_sg)
114 dma_ops->unmap_sg(hwdev, sg, nents, direction); 140 ops->unmap_sg(hwdev, sg, nents, direction);
115} 141}
116 142
117static inline void 143static inline void
118dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 144dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
119 size_t size, int direction) 145 size_t size, int direction)
120{ 146{
147 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
148
121 BUG_ON(!valid_dma_direction(direction)); 149 BUG_ON(!valid_dma_direction(direction));
122 if (dma_ops->sync_single_for_cpu) 150 if (ops->sync_single_for_cpu)
123 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, 151 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
124 direction);
125 flush_write_buffers(); 152 flush_write_buffers();
126} 153}
127 154
@@ -129,10 +156,11 @@ static inline void
129dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, 156dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
130 size_t size, int direction) 157 size_t size, int direction)
131{ 158{
159 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
160
132 BUG_ON(!valid_dma_direction(direction)); 161 BUG_ON(!valid_dma_direction(direction));
133 if (dma_ops->sync_single_for_device) 162 if (ops->sync_single_for_device)
134 dma_ops->sync_single_for_device(hwdev, dma_handle, size, 163 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
135 direction);
136 flush_write_buffers(); 164 flush_write_buffers();
137} 165}
138 166
@@ -140,11 +168,12 @@ static inline void
140dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 168dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
141 unsigned long offset, size_t size, int direction) 169 unsigned long offset, size_t size, int direction)
142{ 170{
143 BUG_ON(!valid_dma_direction(direction)); 171 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
144 if (dma_ops->sync_single_range_for_cpu)
145 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
146 size, direction);
147 172
173 BUG_ON(!valid_dma_direction(direction));
174 if (ops->sync_single_range_for_cpu)
175 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
176 size, direction);
148 flush_write_buffers(); 177 flush_write_buffers();
149} 178}
150 179
@@ -153,11 +182,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
153 unsigned long offset, size_t size, 182 unsigned long offset, size_t size,
154 int direction) 183 int direction)
155{ 184{
156 BUG_ON(!valid_dma_direction(direction)); 185 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
157 if (dma_ops->sync_single_range_for_device)
158 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
159 offset, size, direction);
160 186
187 BUG_ON(!valid_dma_direction(direction));
188 if (ops->sync_single_range_for_device)
189 ops->sync_single_range_for_device(hwdev, dma_handle,
190 offset, size, direction);
161 flush_write_buffers(); 191 flush_write_buffers();
162} 192}
163 193
@@ -165,9 +195,11 @@ static inline void
165dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 195dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
166 int nelems, int direction) 196 int nelems, int direction)
167{ 197{
198 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
199
168 BUG_ON(!valid_dma_direction(direction)); 200 BUG_ON(!valid_dma_direction(direction));
169 if (dma_ops->sync_sg_for_cpu) 201 if (ops->sync_sg_for_cpu)
170 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); 202 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
171 flush_write_buffers(); 203 flush_write_buffers();
172} 204}
173 205
@@ -175,9 +207,11 @@ static inline void
175dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 207dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
176 int nelems, int direction) 208 int nelems, int direction)
177{ 209{
210 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
211
178 BUG_ON(!valid_dma_direction(direction)); 212 BUG_ON(!valid_dma_direction(direction));
179 if (dma_ops->sync_sg_for_device) 213 if (ops->sync_sg_for_device)
180 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); 214 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
181 215
182 flush_write_buffers(); 216 flush_write_buffers();
183} 217}
@@ -186,9 +220,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
186 size_t offset, size_t size, 220 size_t offset, size_t size,
187 int direction) 221 int direction)
188{ 222{
223 struct dma_mapping_ops *ops = get_dma_ops(dev);
224
189 BUG_ON(!valid_dma_direction(direction)); 225 BUG_ON(!valid_dma_direction(direction));
190 return dma_ops->map_single(dev, page_to_phys(page)+offset, 226 return ops->map_single(dev, page_to_phys(page) + offset,
191 size, direction); 227 size, direction);
192} 228}
193 229
194static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, 230static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h
index 06633b01dd5b..16a31e2c7c57 100644
--- a/include/asm-x86/e820.h
+++ b/include/asm-x86/e820.h
@@ -90,6 +90,14 @@ static inline void e820_mark_nosave_regions(unsigned long limit_pfn)
90} 90}
91#endif 91#endif
92 92
93#ifdef CONFIG_MEMTEST
94extern void early_memtest(unsigned long start, unsigned long end);
95#else
96static inline void early_memtest(unsigned long start, unsigned long end)
97{
98}
99#endif
100
93extern unsigned long end_user_pfn; 101extern unsigned long end_user_pfn;
94 102
95extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); 103extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h
index aae2f0501a40..f1ac2b2167d7 100644
--- a/include/asm-x86/fixmap_32.h
+++ b/include/asm-x86/fixmap_32.h
@@ -90,13 +90,13 @@ enum fixed_addresses {
90 * 256 temporary boot-time mappings, used by early_ioremap(), 90 * 256 temporary boot-time mappings, used by early_ioremap(),
91 * before ioremap() is functional. 91 * before ioremap() is functional.
92 * 92 *
93 * We round it up to the next 512 pages boundary so that we 93 * We round it up to the next 256 pages boundary so that we
94 * can have a single pgd entry and a single pte table: 94 * can have a single pgd entry and a single pte table:
95 */ 95 */
96#define NR_FIX_BTMAPS 64 96#define NR_FIX_BTMAPS 64
97#define FIX_BTMAPS_NESTING 4 97#define FIX_BTMAPS_NESTING 4
98 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 512 - 98 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
99 (__end_of_permanent_fixed_addresses & 511), 99 (__end_of_permanent_fixed_addresses & 255),
100 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, 100 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1,
101 FIX_WP_TEST, 101 FIX_WP_TEST,
102#ifdef CONFIG_ACPI 102#ifdef CONFIG_ACPI
diff --git a/include/asm-x86/ftrace.h b/include/asm-x86/ftrace.h
index c184441133f2..5c68b32ee1c8 100644
--- a/include/asm-x86/ftrace.h
+++ b/include/asm-x86/ftrace.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_FTRACE 1#ifndef _ASM_X86_FTRACE
2#define _ASM_SPARC64_FTRACE 2#define _ASM_X86_FTRACE
3 3
4#ifdef CONFIG_FTRACE 4#ifdef CONFIG_FTRACE
5#define MCOUNT_ADDR ((long)(mcount)) 5#define MCOUNT_ADDR ((long)(mcount))
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h
index 33b9aeeb35a2..3f62a83887f3 100644
--- a/include/asm-x86/gart.h
+++ b/include/asm-x86/gart.h
@@ -2,7 +2,6 @@
2#define _ASM_X8664_GART_H 1 2#define _ASM_X8664_GART_H 1
3 3
4#include <asm/e820.h> 4#include <asm/e820.h>
5#include <asm/iommu.h>
6 5
7extern void set_up_gart_resume(u32, u32); 6extern void set_up_gart_resume(u32, u32);
8 7
diff --git a/include/asm-x86/gpio.h b/include/asm-x86/gpio.h
index ff87fca0caf9..c4c91b37c104 100644
--- a/include/asm-x86/gpio.h
+++ b/include/asm-x86/gpio.h
@@ -1,6 +1,56 @@
1/*
2 * Generic GPIO API implementation for x86.
3 *
4 * Derived from the generic GPIO API for powerpc:
5 *
6 * Copyright (c) 2007-2008 MontaVista Software, Inc.
7 *
8 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
1#ifndef _ASM_I386_GPIO_H 16#ifndef _ASM_I386_GPIO_H
2#define _ASM_I386_GPIO_H 17#define _ASM_I386_GPIO_H
3 18
4#include <gpio.h> 19#include <asm-generic/gpio.h>
20
21#ifdef CONFIG_GPIOLIB
22
23/*
24 * Just call gpiolib.
25 */
26static inline int gpio_get_value(unsigned int gpio)
27{
28 return __gpio_get_value(gpio);
29}
30
31static inline void gpio_set_value(unsigned int gpio, int value)
32{
33 __gpio_set_value(gpio, value);
34}
35
36static inline int gpio_cansleep(unsigned int gpio)
37{
38 return __gpio_cansleep(gpio);
39}
40
41/*
42 * Not implemented, yet.
43 */
44static inline int gpio_to_irq(unsigned int gpio)
45{
46 return -ENOSYS;
47}
48
49static inline int irq_to_gpio(unsigned int irq)
50{
51 return -EINVAL;
52}
53
54#endif /* CONFIG_GPIOLIB */
5 55
6#endif /* _ASM_I386_GPIO_H */ 56#endif /* _ASM_I386_GPIO_H */
diff --git a/include/asm-x86/hugetlb.h b/include/asm-x86/hugetlb.h
index 14171a4924f6..439a9acc132d 100644
--- a/include/asm-x86/hugetlb.h
+++ b/include/asm-x86/hugetlb.h
@@ -14,11 +14,13 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
14 * If the arch doesn't supply something else, assume that hugepage 14 * If the arch doesn't supply something else, assume that hugepage
15 * size aligned regions are ok without further preparation. 15 * size aligned regions are ok without further preparation.
16 */ 16 */
17static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) 17static inline int prepare_hugepage_range(struct file *file,
18 unsigned long addr, unsigned long len)
18{ 19{
19 if (len & ~HPAGE_MASK) 20 struct hstate *h = hstate_file(file);
21 if (len & ~huge_page_mask(h))
20 return -EINVAL; 22 return -EINVAL;
21 if (addr & ~HPAGE_MASK) 23 if (addr & ~huge_page_mask(h))
22 return -EINVAL; 24 return -EINVAL;
23 return 0; 25 return 0;
24} 26}
@@ -26,7 +28,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
26static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { 28static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
27} 29}
28 30
29static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb, 31static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
30 unsigned long addr, unsigned long end, 32 unsigned long addr, unsigned long end,
31 unsigned long floor, 33 unsigned long floor,
32 unsigned long ceiling) 34 unsigned long ceiling)
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 37672f79dcc8..96fa8449ff11 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -137,60 +137,6 @@ static inline void __save_init_fpu(struct task_struct *tsk)
137 task_thread_info(tsk)->status &= ~TS_USEDFPU; 137 task_thread_info(tsk)->status &= ~TS_USEDFPU;
138} 138}
139 139
140/*
141 * Signal frame handlers.
142 */
143
144static inline int save_i387(struct _fpstate __user *buf)
145{
146 struct task_struct *tsk = current;
147 int err = 0;
148
149 BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
150 sizeof(tsk->thread.xstate->fxsave));
151
152 if ((unsigned long)buf % 16)
153 printk("save_i387: bad fpstate %p\n", buf);
154
155 if (!used_math())
156 return 0;
157 clear_used_math(); /* trigger finit */
158 if (task_thread_info(tsk)->status & TS_USEDFPU) {
159 err = save_i387_checking((struct i387_fxsave_struct __user *)
160 buf);
161 if (err)
162 return err;
163 task_thread_info(tsk)->status &= ~TS_USEDFPU;
164 stts();
165 } else {
166 if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
167 sizeof(struct i387_fxsave_struct)))
168 return -1;
169 }
170 return 1;
171}
172
173/*
174 * This restores directly out of user space. Exceptions are handled.
175 */
176static inline int restore_i387(struct _fpstate __user *buf)
177{
178 struct task_struct *tsk = current;
179 int err;
180
181 if (!used_math()) {
182 err = init_fpu(tsk);
183 if (err)
184 return err;
185 }
186
187 if (!(task_thread_info(current)->status & TS_USEDFPU)) {
188 clts();
189 task_thread_info(current)->status |= TS_USEDFPU;
190 }
191 return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
192}
193
194#else /* CONFIG_X86_32 */ 140#else /* CONFIG_X86_32 */
195 141
196extern void finit(void); 142extern void finit(void);
diff --git a/include/asm-x86/ide.h b/include/asm-x86/ide.h
deleted file mode 100644
index cf9c98e5bdb5..000000000000
--- a/include/asm-x86/ide.h
+++ /dev/null
@@ -1,65 +0,0 @@
1/*
2 * Copyright (C) 1994-1996 Linus Torvalds & authors
3 */
4
5/*
6 * This file contains the i386 architecture specific IDE code.
7 */
8
9#ifndef __ASMi386_IDE_H
10#define __ASMi386_IDE_H
11
12#ifdef __KERNEL__
13
14
15#ifndef MAX_HWIFS
16# ifdef CONFIG_BLK_DEV_IDEPCI
17#define MAX_HWIFS 10
18# else
19#define MAX_HWIFS 6
20# endif
21#endif
22
23static __inline__ int ide_default_irq(unsigned long base)
24{
25 switch (base) {
26 case 0x1f0: return 14;
27 case 0x170: return 15;
28 case 0x1e8: return 11;
29 case 0x168: return 10;
30 case 0x1e0: return 8;
31 case 0x160: return 12;
32 default:
33 return 0;
34 }
35}
36
37static __inline__ unsigned long ide_default_io_base(int index)
38{
39 /*
40 * If PCI is present then it is not safe to poke around
41 * the other legacy IDE ports. Only 0x1f0 and 0x170 are
42 * defined compatibility mode ports for PCI. A user can
43 * override this using ide= but we must default safe.
44 */
45 if (no_pci_devices()) {
46 switch(index) {
47 case 2: return 0x1e8;
48 case 3: return 0x168;
49 case 4: return 0x1e0;
50 case 5: return 0x160;
51 }
52 }
53 switch (index) {
54 case 0: return 0x1f0;
55 case 1: return 0x170;
56 default:
57 return 0;
58 }
59}
60
61#include <asm-generic/ide_iops.h>
62
63#endif /* __KERNEL__ */
64
65#endif /* __ASMi386_IDE_H */
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h
index 4df44ed54077..e876d89ac156 100644
--- a/include/asm-x86/io_32.h
+++ b/include/asm-x86/io_32.h
@@ -110,6 +110,8 @@ static inline void *phys_to_virt(unsigned long address)
110 */ 110 */
111extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 111extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
112extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 112extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
113extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
114 unsigned long prot_val);
113 115
114/* 116/*
115 * The default ioremap() behavior is non-cached: 117 * The default ioremap() behavior is non-cached:
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h
index ddd8058a5026..22995c5c5adc 100644
--- a/include/asm-x86/io_64.h
+++ b/include/asm-x86/io_64.h
@@ -175,6 +175,8 @@ extern void early_iounmap(void *addr, unsigned long size);
175 */ 175 */
176extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 176extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
177extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 177extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
178extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
179 unsigned long prot_val);
178 180
179/* 181/*
180 * The default ioremap() behavior is non-cached: 182 * The default ioremap() behavior is non-cached:
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index 068c9a40aa5b..ecc8061904a9 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -3,6 +3,7 @@
3 3
4extern void pci_iommu_shutdown(void); 4extern void pci_iommu_shutdown(void);
5extern void no_iommu_init(void); 5extern void no_iommu_init(void);
6extern struct dma_mapping_ops nommu_dma_ops;
6extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
7extern int iommu_detected; 8extern int iommu_detected;
8 9
@@ -25,10 +26,18 @@ extern void gart_iommu_hole_init(void);
25static inline void early_gart_iommu_check(void) 26static inline void early_gart_iommu_check(void)
26{ 27{
27} 28}
28 29static inline void gart_iommu_init(void)
30{
31}
29static inline void gart_iommu_shutdown(void) 32static inline void gart_iommu_shutdown(void)
30{ 33{
31} 34}
35static inline void gart_parse_options(char *options)
36{
37}
38static inline void gart_iommu_hole_init(void)
39{
40}
32#endif 41#endif
33 42
34#endif 43#endif
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h
index 196d63c28aa4..bb1c09f7a76c 100644
--- a/include/asm-x86/ipi.h
+++ b/include/asm-x86/ipi.h
@@ -122,7 +122,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
122 * - mbligh 122 * - mbligh
123 */ 123 */
124 local_irq_save(flags); 124 local_irq_save(flags);
125 for_each_cpu_mask(query_cpu, mask) { 125 for_each_cpu_mask_nr(query_cpu, mask) {
126 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), 126 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
127 vector, APIC_DEST_PHYSICAL); 127 vector, APIC_DEST_PHYSICAL);
128 } 128 }
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h
index 8f855a15f64d..c0e52a14fd4d 100644
--- a/include/asm-x86/kexec.h
+++ b/include/asm-x86/kexec.h
@@ -10,14 +10,15 @@
10# define VA_PTE_0 5 10# define VA_PTE_0 5
11# define PA_PTE_1 6 11# define PA_PTE_1 6
12# define VA_PTE_1 7 12# define VA_PTE_1 7
13# define PA_SWAP_PAGE 8
13# ifdef CONFIG_X86_PAE 14# ifdef CONFIG_X86_PAE
14# define PA_PMD_0 8 15# define PA_PMD_0 9
15# define VA_PMD_0 9 16# define VA_PMD_0 10
16# define PA_PMD_1 10 17# define PA_PMD_1 11
17# define VA_PMD_1 11 18# define VA_PMD_1 12
18# define PAGES_NR 12 19# define PAGES_NR 13
19# else 20# else
20# define PAGES_NR 8 21# define PAGES_NR 9
21# endif 22# endif
22#else 23#else
23# define PA_CONTROL_PAGE 0 24# define PA_CONTROL_PAGE 0
@@ -152,11 +153,12 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
152} 153}
153 154
154#ifdef CONFIG_X86_32 155#ifdef CONFIG_X86_32
155asmlinkage NORET_TYPE void 156asmlinkage unsigned long
156relocate_kernel(unsigned long indirection_page, 157relocate_kernel(unsigned long indirection_page,
157 unsigned long control_page, 158 unsigned long control_page,
158 unsigned long start_address, 159 unsigned long start_address,
159 unsigned int has_pae) ATTRIB_NORET; 160 unsigned int has_pae,
161 unsigned int preserve_context);
160#else 162#else
161NORET_TYPE void 163NORET_TYPE void
162relocate_kernel(unsigned long indirection_page, 164relocate_kernel(unsigned long indirection_page,
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h
index 80eefef2cc76..6f1840812e59 100644
--- a/include/asm-x86/kvm.h
+++ b/include/asm-x86/kvm.h
@@ -228,5 +228,6 @@ struct kvm_pit_state {
228#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12) 228#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
229#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13) 229#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
230#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) 230#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
231#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
231 232
232#endif 233#endif
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 844f2a89afbc..bc34dc21f178 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -27,6 +27,7 @@
27#define KVM_PRIVATE_MEM_SLOTS 4 27#define KVM_PRIVATE_MEM_SLOTS 4
28 28
29#define KVM_PIO_PAGE_OFFSET 1 29#define KVM_PIO_PAGE_OFFSET 1
30#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
30 31
31#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) 32#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
32#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) 33#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
@@ -79,6 +80,7 @@
79#define KVM_MIN_FREE_MMU_PAGES 5 80#define KVM_MIN_FREE_MMU_PAGES 5
80#define KVM_REFILL_PAGES 25 81#define KVM_REFILL_PAGES 25
81#define KVM_MAX_CPUID_ENTRIES 40 82#define KVM_MAX_CPUID_ENTRIES 40
83#define KVM_NR_VAR_MTRR 8
82 84
83extern spinlock_t kvm_lock; 85extern spinlock_t kvm_lock;
84extern struct list_head vm_list; 86extern struct list_head vm_list;
@@ -109,12 +111,12 @@ enum {
109}; 111};
110 112
111enum { 113enum {
114 VCPU_SREG_ES,
112 VCPU_SREG_CS, 115 VCPU_SREG_CS,
116 VCPU_SREG_SS,
113 VCPU_SREG_DS, 117 VCPU_SREG_DS,
114 VCPU_SREG_ES,
115 VCPU_SREG_FS, 118 VCPU_SREG_FS,
116 VCPU_SREG_GS, 119 VCPU_SREG_GS,
117 VCPU_SREG_SS,
118 VCPU_SREG_TR, 120 VCPU_SREG_TR,
119 VCPU_SREG_LDTR, 121 VCPU_SREG_LDTR,
120}; 122};
@@ -243,6 +245,7 @@ struct kvm_vcpu_arch {
243 gfn_t last_pt_write_gfn; 245 gfn_t last_pt_write_gfn;
244 int last_pt_write_count; 246 int last_pt_write_count;
245 u64 *last_pte_updated; 247 u64 *last_pte_updated;
248 gfn_t last_pte_gfn;
246 249
247 struct { 250 struct {
248 gfn_t gfn; /* presumed gfn during guest pte update */ 251 gfn_t gfn; /* presumed gfn during guest pte update */
@@ -287,6 +290,10 @@ struct kvm_vcpu_arch {
287 unsigned int hv_clock_tsc_khz; 290 unsigned int hv_clock_tsc_khz;
288 unsigned int time_offset; 291 unsigned int time_offset;
289 struct page *time_page; 292 struct page *time_page;
293
294 bool nmi_pending;
295
296 u64 mtrr[0x100];
290}; 297};
291 298
292struct kvm_mem_alias { 299struct kvm_mem_alias {
@@ -344,6 +351,7 @@ struct kvm_vcpu_stat {
344 u32 mmio_exits; 351 u32 mmio_exits;
345 u32 signal_exits; 352 u32 signal_exits;
346 u32 irq_window_exits; 353 u32 irq_window_exits;
354 u32 nmi_window_exits;
347 u32 halt_exits; 355 u32 halt_exits;
348 u32 halt_wakeup; 356 u32 halt_wakeup;
349 u32 request_irq_exits; 357 u32 request_irq_exits;
@@ -379,7 +387,6 @@ struct kvm_x86_ops {
379 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); 387 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
380 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); 388 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
381 void (*vcpu_put)(struct kvm_vcpu *vcpu); 389 void (*vcpu_put)(struct kvm_vcpu *vcpu);
382 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
383 390
384 int (*set_guest_debug)(struct kvm_vcpu *vcpu, 391 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
385 struct kvm_debug_guest *dbg); 392 struct kvm_debug_guest *dbg);
@@ -497,6 +504,10 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
497int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 504int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
498 unsigned long value); 505 unsigned long value);
499 506
507void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
508int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
509 int type_bits, int seg);
510
500int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); 511int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
501 512
502void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 513void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
@@ -515,6 +526,8 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
515void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, 526void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
516 u32 error_code); 527 u32 error_code);
517 528
529void kvm_inject_nmi(struct kvm_vcpu *vcpu);
530
518void fx_init(struct kvm_vcpu *vcpu); 531void fx_init(struct kvm_vcpu *vcpu);
519 532
520int emulator_read_std(unsigned long addr, 533int emulator_read_std(unsigned long addr,
@@ -543,6 +556,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
543int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); 556int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
544 557
545void kvm_enable_tdp(void); 558void kvm_enable_tdp(void);
559void kvm_disable_tdp(void);
546 560
547int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 561int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
548int complete_pio(struct kvm_vcpu *vcpu); 562int complete_pio(struct kvm_vcpu *vcpu);
@@ -554,55 +568,53 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
554 return (struct kvm_mmu_page *)page_private(page); 568 return (struct kvm_mmu_page *)page_private(page);
555} 569}
556 570
557static inline u16 read_fs(void) 571static inline u16 kvm_read_fs(void)
558{ 572{
559 u16 seg; 573 u16 seg;
560 asm("mov %%fs, %0" : "=g"(seg)); 574 asm("mov %%fs, %0" : "=g"(seg));
561 return seg; 575 return seg;
562} 576}
563 577
564static inline u16 read_gs(void) 578static inline u16 kvm_read_gs(void)
565{ 579{
566 u16 seg; 580 u16 seg;
567 asm("mov %%gs, %0" : "=g"(seg)); 581 asm("mov %%gs, %0" : "=g"(seg));
568 return seg; 582 return seg;
569} 583}
570 584
571static inline u16 read_ldt(void) 585static inline u16 kvm_read_ldt(void)
572{ 586{
573 u16 ldt; 587 u16 ldt;
574 asm("sldt %0" : "=g"(ldt)); 588 asm("sldt %0" : "=g"(ldt));
575 return ldt; 589 return ldt;
576} 590}
577 591
578static inline void load_fs(u16 sel) 592static inline void kvm_load_fs(u16 sel)
579{ 593{
580 asm("mov %0, %%fs" : : "rm"(sel)); 594 asm("mov %0, %%fs" : : "rm"(sel));
581} 595}
582 596
583static inline void load_gs(u16 sel) 597static inline void kvm_load_gs(u16 sel)
584{ 598{
585 asm("mov %0, %%gs" : : "rm"(sel)); 599 asm("mov %0, %%gs" : : "rm"(sel));
586} 600}
587 601
588#ifndef load_ldt 602static inline void kvm_load_ldt(u16 sel)
589static inline void load_ldt(u16 sel)
590{ 603{
591 asm("lldt %0" : : "rm"(sel)); 604 asm("lldt %0" : : "rm"(sel));
592} 605}
593#endif
594 606
595static inline void get_idt(struct descriptor_table *table) 607static inline void kvm_get_idt(struct descriptor_table *table)
596{ 608{
597 asm("sidt %0" : "=m"(*table)); 609 asm("sidt %0" : "=m"(*table));
598} 610}
599 611
600static inline void get_gdt(struct descriptor_table *table) 612static inline void kvm_get_gdt(struct descriptor_table *table)
601{ 613{
602 asm("sgdt %0" : "=m"(*table)); 614 asm("sgdt %0" : "=m"(*table));
603} 615}
604 616
605static inline unsigned long read_tr_base(void) 617static inline unsigned long kvm_read_tr_base(void)
606{ 618{
607 u16 tr; 619 u16 tr;
608 asm("str %0" : "=g"(tr)); 620 asm("str %0" : "=g"(tr));
@@ -619,17 +631,17 @@ static inline unsigned long read_msr(unsigned long msr)
619} 631}
620#endif 632#endif
621 633
622static inline void fx_save(struct i387_fxsave_struct *image) 634static inline void kvm_fx_save(struct i387_fxsave_struct *image)
623{ 635{
624 asm("fxsave (%0)":: "r" (image)); 636 asm("fxsave (%0)":: "r" (image));
625} 637}
626 638
627static inline void fx_restore(struct i387_fxsave_struct *image) 639static inline void kvm_fx_restore(struct i387_fxsave_struct *image)
628{ 640{
629 asm("fxrstor (%0)":: "r" (image)); 641 asm("fxrstor (%0)":: "r" (image));
630} 642}
631 643
632static inline void fx_finit(void) 644static inline void kvm_fx_finit(void)
633{ 645{
634 asm("finit"); 646 asm("finit");
635} 647}
@@ -691,4 +703,30 @@ enum {
691 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 703 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
692 vcpu, 0, 0, 0, 0, 0, 0) 704 vcpu, 0, 0, 0, 0, 0, 0)
693 705
706#ifdef CONFIG_64BIT
707# define KVM_EX_ENTRY ".quad"
708# define KVM_EX_PUSH "pushq"
709#else
710# define KVM_EX_ENTRY ".long"
711# define KVM_EX_PUSH "pushl"
712#endif
713
714/*
715 * Hardware virtualization extension instructions may fault if a
716 * reboot turns off virtualization while processes are running.
717 * Trap the fault and ignore the instruction if that happens.
718 */
719asmlinkage void kvm_handle_fault_on_reboot(void);
720
721#define __kvm_handle_fault_on_reboot(insn) \
722 "666: " insn "\n\t" \
723 ".pushsection .text.fixup, \"ax\" \n" \
724 "667: \n\t" \
725 KVM_EX_PUSH " $666b \n\t" \
726 "jmp kvm_handle_fault_on_reboot \n\t" \
727 ".popsection \n\t" \
728 ".pushsection __ex_table, \"a\" \n\t" \
729 KVM_EX_ENTRY " 666b, 667b \n\t" \
730 ".popsection"
731
694#endif 732#endif
diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h
index b877bbd2d3a7..4e8c1e48d91d 100644
--- a/include/asm-x86/kvm_x86_emulate.h
+++ b/include/asm-x86/kvm_x86_emulate.h
@@ -124,7 +124,8 @@ struct decode_cache {
124 u8 rex_prefix; 124 u8 rex_prefix;
125 struct operand src; 125 struct operand src;
126 struct operand dst; 126 struct operand dst;
127 unsigned long *override_base; 127 bool has_seg_override;
128 u8 seg_override;
128 unsigned int d; 129 unsigned int d;
129 unsigned long regs[NR_VCPU_REGS]; 130 unsigned long regs[NR_VCPU_REGS];
130 unsigned long eip; 131 unsigned long eip;
@@ -134,6 +135,7 @@ struct decode_cache {
134 u8 modrm_reg; 135 u8 modrm_reg;
135 u8 modrm_rm; 136 u8 modrm_rm;
136 u8 use_modrm_ea; 137 u8 use_modrm_ea;
138 bool rip_relative;
137 unsigned long modrm_ea; 139 unsigned long modrm_ea;
138 void *modrm_ptr; 140 void *modrm_ptr;
139 unsigned long modrm_val; 141 unsigned long modrm_val;
@@ -150,12 +152,7 @@ struct x86_emulate_ctxt {
150 /* Emulated execution mode, represented by an X86EMUL_MODE value. */ 152 /* Emulated execution mode, represented by an X86EMUL_MODE value. */
151 int mode; 153 int mode;
152 154
153 unsigned long cs_base; 155 u32 cs_base;
154 unsigned long ds_base;
155 unsigned long es_base;
156 unsigned long ss_base;
157 unsigned long gs_base;
158 unsigned long fs_base;
159 156
160 /* decode cache */ 157 /* decode cache */
161 158
diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/mach-bigsmp/mach_apic.h
index 017c8c19ad8f..c3b9dc6970c9 100644
--- a/include/asm-x86/mach-bigsmp/mach_apic.h
+++ b/include/asm-x86/mach-bigsmp/mach_apic.h
@@ -63,9 +63,9 @@ static inline void init_apic_ldr(void)
63 unsigned long val; 63 unsigned long val;
64 int cpu = smp_processor_id(); 64 int cpu = smp_processor_id();
65 65
66 apic_write_around(APIC_DFR, APIC_DFR_VALUE); 66 apic_write(APIC_DFR, APIC_DFR_VALUE);
67 val = calculate_ldr(cpu); 67 val = calculate_ldr(cpu);
68 apic_write_around(APIC_LDR, val); 68 apic_write(APIC_LDR, val);
69} 69}
70 70
71static inline void setup_apic_routing(void) 71static inline void setup_apic_routing(void)
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h
index 0b2cde5e1b74..f3226b9a6b82 100644
--- a/include/asm-x86/mach-default/mach_apic.h
+++ b/include/asm-x86/mach-default/mach_apic.h
@@ -46,10 +46,10 @@ static inline void init_apic_ldr(void)
46{ 46{
47 unsigned long val; 47 unsigned long val;
48 48
49 apic_write_around(APIC_DFR, APIC_DFR_VALUE); 49 apic_write(APIC_DFR, APIC_DFR_VALUE);
50 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; 50 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
51 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); 51 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
52 apic_write_around(APIC_LDR, val); 52 apic_write(APIC_LDR, val);
53} 53}
54 54
55static inline int apic_id_registered(void) 55static inline int apic_id_registered(void)
diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h
index 56d001b9dce4..dbab36d64d48 100644
--- a/include/asm-x86/mach-default/smpboot_hooks.h
+++ b/include/asm-x86/mach-default/smpboot_hooks.h
@@ -12,11 +12,11 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
12{ 12{
13 CMOS_WRITE(0xa, 0xf); 13 CMOS_WRITE(0xa, 0xf);
14 local_flush_tlb(); 14 local_flush_tlb();
15 Dprintk("1.\n"); 15 pr_debug("1.\n");
16 *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4; 16 *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
17 Dprintk("2.\n"); 17 pr_debug("2.\n");
18 *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf; 18 *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
19 Dprintk("3.\n"); 19 pr_debug("3.\n");
20} 20}
21 21
22static inline void smpboot_restore_warm_reset_vector(void) 22static inline void smpboot_restore_warm_reset_vector(void)
diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h
index fbc8ad256f5a..0a3fdf930672 100644
--- a/include/asm-x86/mach-es7000/mach_apic.h
+++ b/include/asm-x86/mach-es7000/mach_apic.h
@@ -66,9 +66,9 @@ static inline void init_apic_ldr(void)
66 unsigned long val; 66 unsigned long val;
67 int cpu = smp_processor_id(); 67 int cpu = smp_processor_id();
68 68
69 apic_write_around(APIC_DFR, APIC_DFR_VALUE); 69 apic_write(APIC_DFR, APIC_DFR_VALUE);
70 val = calculate_ldr(cpu); 70 val = calculate_ldr(cpu);
71 apic_write_around(APIC_LDR, val); 71 apic_write(APIC_LDR, val);
72} 72}
73 73
74#ifndef CONFIG_X86_GENERICARCH 74#ifndef CONFIG_X86_GENERICARCH
diff --git a/include/asm-x86/mach-generic/mach_mpspec.h b/include/asm-x86/mach-generic/mach_mpspec.h
index 9ef0b941bb22..c83c120be538 100644
--- a/include/asm-x86/mach-generic/mach_mpspec.h
+++ b/include/asm-x86/mach-generic/mach_mpspec.h
@@ -7,4 +7,6 @@
7/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ 7/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
8#define MAX_MP_BUSSES 260 8#define MAX_MP_BUSSES 260
9 9
10extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
11 char *productid);
10#endif /* __ASM_MACH_MPSPEC_H */ 12#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h
index 1f76c2e70232..c47e2ab5c5ca 100644
--- a/include/asm-x86/mach-summit/mach_apic.h
+++ b/include/asm-x86/mach-summit/mach_apic.h
@@ -63,10 +63,10 @@ static inline void init_apic_ldr(void)
63 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ 63 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
64 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); 64 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
65 id = my_cluster | (1UL << count); 65 id = my_cluster | (1UL << count);
66 apic_write_around(APIC_DFR, APIC_DFR_VALUE); 66 apic_write(APIC_DFR, APIC_DFR_VALUE);
67 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; 67 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
68 val |= SET_APIC_LOGICAL_ID(id); 68 val |= SET_APIC_LOGICAL_ID(id);
69 apic_write_around(APIC_LDR, val); 69 apic_write(APIC_LDR, val);
70} 70}
71 71
72static inline int multi_timer_check(int apic, int irq) 72static inline int multi_timer_check(int apic, int irq)
@@ -122,7 +122,7 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
122 122
123static inline physid_mask_t apicid_to_cpu_present(int apicid) 123static inline physid_mask_t apicid_to_cpu_present(int apicid)
124{ 124{
125 return physid_mask_of_physid(0); 125 return physid_mask_of_physid(apicid);
126} 126}
127 127
128static inline void setup_portio_remap(void) 128static inline void setup_portio_remap(void)
diff --git a/include/asm-x86/mach-visws/entry_arch.h b/include/asm-x86/mach-visws/entry_arch.h
deleted file mode 100644
index 86be554342d4..000000000000
--- a/include/asm-x86/mach-visws/entry_arch.h
+++ /dev/null
@@ -1,5 +0,0 @@
1/*
2 * VISWS uses the standard Linux entry points:
3 */
4
5#include "../mach-default/entry_arch.h"
diff --git a/include/asm-x86/mach-visws/mach_apic.h b/include/asm-x86/mach-visws/mach_apic.h
deleted file mode 100644
index 6943e7a1d0e6..000000000000
--- a/include/asm-x86/mach-visws/mach_apic.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../mach-default/mach_apic.h"
diff --git a/include/asm-x86/mach-visws/mach_apicdef.h b/include/asm-x86/mach-visws/mach_apicdef.h
deleted file mode 100644
index 42711d152a93..000000000000
--- a/include/asm-x86/mach-visws/mach_apicdef.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../mach-default/mach_apicdef.h"
diff --git a/include/asm-x86/mach-visws/setup_arch.h b/include/asm-x86/mach-visws/setup_arch.h
deleted file mode 100644
index fa4766ca2d10..000000000000
--- a/include/asm-x86/mach-visws/setup_arch.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../mach-default/setup_arch.h"
diff --git a/include/asm-x86/mach-visws/smpboot_hooks.h b/include/asm-x86/mach-visws/smpboot_hooks.h
deleted file mode 100644
index e4433ca88715..000000000000
--- a/include/asm-x86/mach-visws/smpboot_hooks.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../mach-default/smpboot_hooks.h"
diff --git a/include/asm-x86/namei.h b/include/asm-x86/namei.h
deleted file mode 100644
index 415ef5d9550e..000000000000
--- a/include/asm-x86/namei.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef _ASM_X86_NAMEI_H
2#define _ASM_X86_NAMEI_H
3
4/* This dummy routine maybe changed to something useful
5 * for /usr/gnemul/ emulation stuff.
6 * Look at asm-sparc/namei.h for details.
7 */
8
9#define __emul_prefix() NULL
10
11#endif /* _ASM_X86_NAMEI_H */
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
index 28d7b4533b1a..49982110e4d9 100644
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -18,8 +18,11 @@
18 (ie, 32-bit PAE). */ 18 (ie, 32-bit PAE). */
19#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) 19#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
20 20
21/* PTE_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ 21/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
22#define PTE_MASK ((pteval_t)PHYSICAL_PAGE_MASK) 22#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
23
24/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
25#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
23 26
24#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) 27#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
25#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) 28#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
@@ -29,8 +32,7 @@
29#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 32#define HPAGE_MASK (~(HPAGE_SIZE - 1))
30#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 33#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
31 34
32/* to align the pointer to the (next) page boundary */ 35#define HUGE_MAX_HSTATE 2
33#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
34 36
35#ifndef __ASSEMBLY__ 37#ifndef __ASSEMBLY__
36#include <linux/types.h> 38#include <linux/types.h>
@@ -144,6 +146,11 @@ static inline pteval_t native_pte_val(pte_t pte)
144 return pte.pte; 146 return pte.pte;
145} 147}
146 148
149static inline pteval_t native_pte_flags(pte_t pte)
150{
151 return native_pte_val(pte) & PTE_FLAGS_MASK;
152}
153
147#define pgprot_val(x) ((x).pgprot) 154#define pgprot_val(x) ((x).pgprot)
148#define __pgprot(x) ((pgprot_t) { (x) } ) 155#define __pgprot(x) ((pgprot_t) { (x) } )
149 156
@@ -165,7 +172,7 @@ static inline pteval_t native_pte_val(pte_t pte)
165#endif 172#endif
166 173
167#define pte_val(x) native_pte_val(x) 174#define pte_val(x) native_pte_val(x)
168#define pte_flags(x) native_pte_val(x) 175#define pte_flags(x) native_pte_flags(x)
169#define __pte(x) native_make_pte(x) 176#define __pte(x) native_make_pte(x)
170 177
171#endif /* CONFIG_PARAVIRT */ 178#endif /* CONFIG_PARAVIRT */
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index ef5e8ec6a6ab..fbbde93f12d6 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -205,7 +205,6 @@ struct pv_apic_ops {
205 * these shouldn't be in this interface. 205 * these shouldn't be in this interface.
206 */ 206 */
207 void (*apic_write)(unsigned long reg, u32 v); 207 void (*apic_write)(unsigned long reg, u32 v);
208 void (*apic_write_atomic)(unsigned long reg, u32 v);
209 u32 (*apic_read)(unsigned long reg); 208 u32 (*apic_read)(unsigned long reg);
210 void (*setup_boot_clock)(void); 209 void (*setup_boot_clock)(void);
211 void (*setup_secondary_clock)(void); 210 void (*setup_secondary_clock)(void);
@@ -326,6 +325,15 @@ struct pv_mmu_ops {
326 unsigned long phys, pgprot_t flags); 325 unsigned long phys, pgprot_t flags);
327}; 326};
328 327
328struct raw_spinlock;
329struct pv_lock_ops {
330 int (*spin_is_locked)(struct raw_spinlock *lock);
331 int (*spin_is_contended)(struct raw_spinlock *lock);
332 void (*spin_lock)(struct raw_spinlock *lock);
333 int (*spin_trylock)(struct raw_spinlock *lock);
334 void (*spin_unlock)(struct raw_spinlock *lock);
335};
336
329/* This contains all the paravirt structures: we get a convenient 337/* This contains all the paravirt structures: we get a convenient
330 * number for each function using the offset which we use to indicate 338 * number for each function using the offset which we use to indicate
331 * what to patch. */ 339 * what to patch. */
@@ -336,6 +344,7 @@ struct paravirt_patch_template {
336 struct pv_irq_ops pv_irq_ops; 344 struct pv_irq_ops pv_irq_ops;
337 struct pv_apic_ops pv_apic_ops; 345 struct pv_apic_ops pv_apic_ops;
338 struct pv_mmu_ops pv_mmu_ops; 346 struct pv_mmu_ops pv_mmu_ops;
347 struct pv_lock_ops pv_lock_ops;
339}; 348};
340 349
341extern struct pv_info pv_info; 350extern struct pv_info pv_info;
@@ -345,6 +354,7 @@ extern struct pv_cpu_ops pv_cpu_ops;
345extern struct pv_irq_ops pv_irq_ops; 354extern struct pv_irq_ops pv_irq_ops;
346extern struct pv_apic_ops pv_apic_ops; 355extern struct pv_apic_ops pv_apic_ops;
347extern struct pv_mmu_ops pv_mmu_ops; 356extern struct pv_mmu_ops pv_mmu_ops;
357extern struct pv_lock_ops pv_lock_ops;
348 358
349#define PARAVIRT_PATCH(x) \ 359#define PARAVIRT_PATCH(x) \
350 (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) 360 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
@@ -896,11 +906,6 @@ static inline void apic_write(unsigned long reg, u32 v)
896 PVOP_VCALL2(pv_apic_ops.apic_write, reg, v); 906 PVOP_VCALL2(pv_apic_ops.apic_write, reg, v);
897} 907}
898 908
899static inline void apic_write_atomic(unsigned long reg, u32 v)
900{
901 PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v);
902}
903
904static inline u32 apic_read(unsigned long reg) 909static inline u32 apic_read(unsigned long reg)
905{ 910{
906 return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg); 911 return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg);
@@ -1083,6 +1088,9 @@ static inline pteval_t pte_flags(pte_t pte)
1083 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags, 1088 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
1084 pte.pte); 1089 pte.pte);
1085 1090
1091#ifdef CONFIG_PARAVIRT_DEBUG
1092 BUG_ON(ret & PTE_PFN_MASK);
1093#endif
1086 return ret; 1094 return ret;
1087} 1095}
1088 1096
@@ -1374,6 +1382,37 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1374void _paravirt_nop(void); 1382void _paravirt_nop(void);
1375#define paravirt_nop ((void *)_paravirt_nop) 1383#define paravirt_nop ((void *)_paravirt_nop)
1376 1384
1385void paravirt_use_bytelocks(void);
1386
1387#ifdef CONFIG_SMP
1388
1389static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1390{
1391 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1392}
1393
1394static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1395{
1396 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1397}
1398
1399static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1400{
1401 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1402}
1403
1404static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1405{
1406 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1407}
1408
1409static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1410{
1411 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
1412}
1413
1414#endif
1415
1377/* These all sit in the .parainstructions section to tell us what to patch. */ 1416/* These all sit in the .parainstructions section to tell us what to patch. */
1378struct paravirt_patch_site { 1417struct paravirt_patch_site {
1379 u8 *instr; /* original instructions */ 1418 u8 *instr; /* original instructions */
@@ -1396,8 +1435,8 @@ extern struct paravirt_patch_site __parainstructions[],
1396 * caller saved registers but the argument parameter */ 1435 * caller saved registers but the argument parameter */
1397#define PV_SAVE_REGS "pushq %%rdi;" 1436#define PV_SAVE_REGS "pushq %%rdi;"
1398#define PV_RESTORE_REGS "popq %%rdi;" 1437#define PV_RESTORE_REGS "popq %%rdi;"
1399#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx" 1438#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1400#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx" 1439#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
1401#define PV_FLAGS_ARG "D" 1440#define PV_FLAGS_ARG "D"
1402#endif 1441#endif
1403 1442
@@ -1458,6 +1497,7 @@ static inline unsigned long __raw_local_irq_save(void)
1458 return f; 1497 return f;
1459} 1498}
1460 1499
1500
1461/* Make sure as little as possible of this mess escapes. */ 1501/* Make sure as little as possible of this mess escapes. */
1462#undef PARAVIRT_CALL 1502#undef PARAVIRT_CALL
1463#undef __PVOP_CALL 1503#undef __PVOP_CALL
@@ -1489,8 +1529,26 @@ static inline unsigned long __raw_local_irq_save(void)
1489 1529
1490 1530
1491#ifdef CONFIG_X86_64 1531#ifdef CONFIG_X86_64
1492#define PV_SAVE_REGS pushq %rax; pushq %rdi; pushq %rcx; pushq %rdx 1532#define PV_SAVE_REGS \
1493#define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax 1533 push %rax; \
1534 push %rcx; \
1535 push %rdx; \
1536 push %rsi; \
1537 push %rdi; \
1538 push %r8; \
1539 push %r9; \
1540 push %r10; \
1541 push %r11
1542#define PV_RESTORE_REGS \
1543 pop %r11; \
1544 pop %r10; \
1545 pop %r9; \
1546 pop %r8; \
1547 pop %rdi; \
1548 pop %rsi; \
1549 pop %rdx; \
1550 pop %rcx; \
1551 pop %rax
1494#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) 1552#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
1495#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) 1553#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1496#define PARA_INDIRECT(addr) *addr(%rip) 1554#define PARA_INDIRECT(addr) *addr(%rip)
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h
index 912a3a17b9db..4e91ee1e37aa 100644
--- a/include/asm-x86/percpu.h
+++ b/include/asm-x86/percpu.h
@@ -22,6 +22,32 @@
22 22
23DECLARE_PER_CPU(struct x8664_pda, pda); 23DECLARE_PER_CPU(struct x8664_pda, pda);
24 24
25/*
26 * These are supposed to be implemented as a single instruction which
27 * operates on the per-cpu data base segment. x86-64 doesn't have
28 * that yet, so this is a fairly inefficient workaround for the
29 * meantime. The single instruction is atomic with respect to
30 * preemption and interrupts, so we need to explicitly disable
31 * interrupts here to achieve the same effect. However, because it
32 * can be used from within interrupt-disable/enable, we can't actually
33 * disable interrupts; disabling preemption is enough.
34 */
35#define x86_read_percpu(var) \
36 ({ \
37 typeof(per_cpu_var(var)) __tmp; \
38 preempt_disable(); \
39 __tmp = __get_cpu_var(var); \
40 preempt_enable(); \
41 __tmp; \
42 })
43
44#define x86_write_percpu(var, val) \
45 do { \
46 preempt_disable(); \
47 __get_cpu_var(var) = (val); \
48 preempt_enable(); \
49 } while(0)
50
25#else /* CONFIG_X86_64 */ 51#else /* CONFIG_X86_64 */
26 52
27#ifdef __ASSEMBLY__ 53#ifdef __ASSEMBLY__
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h
index c93dbb6c2624..105057f34032 100644
--- a/include/asm-x86/pgtable-3level.h
+++ b/include/asm-x86/pgtable-3level.h
@@ -25,7 +25,7 @@ static inline int pud_none(pud_t pud)
25 25
26static inline int pud_bad(pud_t pud) 26static inline int pud_bad(pud_t pud)
27{ 27{
28 return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; 28 return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
29} 29}
30 30
31static inline int pud_present(pud_t pud) 31static inline int pud_present(pud_t pud)
@@ -120,9 +120,9 @@ static inline void pud_clear(pud_t *pudp)
120 write_cr3(pgd); 120 write_cr3(pgd);
121} 121}
122 122
123#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_MASK)) 123#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_PFN_MASK))
124 124
125#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_MASK)) 125#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
126 126
127 127
128/* Find an entry in the second-level page table.. */ 128/* Find an entry in the second-level page table.. */
@@ -160,7 +160,7 @@ static inline int pte_none(pte_t pte)
160 160
161static inline unsigned long pte_pfn(pte_t pte) 161static inline unsigned long pte_pfn(pte_t pte)
162{ 162{
163 return (pte_val(pte) & PTE_MASK) >> PAGE_SHIFT; 163 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
164} 164}
165 165
166/* 166/*
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 49cbd76b9547..04caa2f544df 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -18,6 +18,7 @@
18#define _PAGE_BIT_UNUSED2 10 18#define _PAGE_BIT_UNUSED2 10
19#define _PAGE_BIT_UNUSED3 11 19#define _PAGE_BIT_UNUSED3 11
20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ 20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
21#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
21#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ 22#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
22 23
23#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) 24#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
@@ -34,6 +35,8 @@
34#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) 35#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
35#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) 36#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
36#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) 37#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
38#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
39#define __HAVE_ARCH_PTE_SPECIAL
37 40
38#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 41#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
39#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) 42#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
@@ -53,8 +56,8 @@
53 _PAGE_DIRTY) 56 _PAGE_DIRTY)
54 57
55/* Set of bits not changed in pte_modify */ 58/* Set of bits not changed in pte_modify */
56#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_PCD | _PAGE_PWT | \ 59#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
57 _PAGE_ACCESSED | _PAGE_DIRTY) 60 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
58 61
59#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) 62#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
60#define _PAGE_CACHE_WB (0) 63#define _PAGE_CACHE_WB (0)
@@ -180,7 +183,7 @@ static inline int pte_exec(pte_t pte)
180 183
181static inline int pte_special(pte_t pte) 184static inline int pte_special(pte_t pte)
182{ 185{
183 return 0; 186 return pte_val(pte) & _PAGE_SPECIAL;
184} 187}
185 188
186static inline int pmd_large(pmd_t pte) 189static inline int pmd_large(pmd_t pte)
@@ -246,7 +249,7 @@ static inline pte_t pte_clrglobal(pte_t pte)
246 249
247static inline pte_t pte_mkspecial(pte_t pte) 250static inline pte_t pte_mkspecial(pte_t pte)
248{ 251{
249 return pte; 252 return __pte(pte_val(pte) | _PAGE_SPECIAL);
250} 253}
251 254
252extern pteval_t __supported_pte_mask; 255extern pteval_t __supported_pte_mask;
@@ -286,7 +289,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
286 return __pgprot(preservebits | addbits); 289 return __pgprot(preservebits | addbits);
287} 290}
288 291
289#define pte_pgprot(x) __pgprot(pte_flags(x) & ~PTE_MASK) 292#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
290 293
291#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) 294#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
292 295
@@ -302,6 +305,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
302/* Install a pte for a particular vaddr in kernel space. */ 305/* Install a pte for a particular vaddr in kernel space. */
303void set_pte_vaddr(unsigned long vaddr, pte_t pte); 306void set_pte_vaddr(unsigned long vaddr, pte_t pte);
304 307
308#ifdef CONFIG_X86_32
309extern void native_pagetable_setup_start(pgd_t *base);
310extern void native_pagetable_setup_done(pgd_t *base);
311#else
312static inline void native_pagetable_setup_start(pgd_t *base) {}
313static inline void native_pagetable_setup_done(pgd_t *base) {}
314#endif
315
305#ifdef CONFIG_PARAVIRT 316#ifdef CONFIG_PARAVIRT
306#include <asm/paravirt.h> 317#include <asm/paravirt.h>
307#else /* !CONFIG_PARAVIRT */ 318#else /* !CONFIG_PARAVIRT */
@@ -333,6 +344,16 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pte);
333 344
334#define pte_update(mm, addr, ptep) do { } while (0) 345#define pte_update(mm, addr, ptep) do { } while (0)
335#define pte_update_defer(mm, addr, ptep) do { } while (0) 346#define pte_update_defer(mm, addr, ptep) do { } while (0)
347
348static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
349{
350 native_pagetable_setup_start(base);
351}
352
353static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
354{
355 native_pagetable_setup_done(base);
356}
336#endif /* CONFIG_PARAVIRT */ 357#endif /* CONFIG_PARAVIRT */
337 358
338#endif /* __ASSEMBLY__ */ 359#endif /* __ASSEMBLY__ */
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index ec871c420d7e..5c3b26567a95 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -88,7 +88,7 @@ extern unsigned long pg0[];
88/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ 88/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
89#define pmd_none(x) (!(unsigned long)pmd_val((x))) 89#define pmd_none(x) (!(unsigned long)pmd_val((x)))
90#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) 90#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
91#define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) 91#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
92 92
93#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) 93#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
94 94
@@ -139,7 +139,7 @@ static inline int pud_large(pud_t pud) { return 0; }
139#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) 139#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
140 140
141#define pmd_page_vaddr(pmd) \ 141#define pmd_page_vaddr(pmd) \
142 ((unsigned long)__va(pmd_val((pmd)) & PTE_MASK)) 142 ((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK))
143 143
144#if defined(CONFIG_HIGHPTE) 144#if defined(CONFIG_HIGHPTE)
145#define pte_offset_map(dir, address) \ 145#define pte_offset_map(dir, address) \
@@ -171,21 +171,6 @@ do { \
171 */ 171 */
172#define update_mmu_cache(vma, address, pte) do { } while (0) 172#define update_mmu_cache(vma, address, pte) do { } while (0)
173 173
174extern void native_pagetable_setup_start(pgd_t *base);
175extern void native_pagetable_setup_done(pgd_t *base);
176
177#ifndef CONFIG_PARAVIRT
178static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
179{
180 native_pagetable_setup_start(base);
181}
182
183static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
184{
185 native_pagetable_setup_done(base);
186}
187#endif /* !CONFIG_PARAVIRT */
188
189#endif /* !__ASSEMBLY__ */ 174#endif /* !__ASSEMBLY__ */
190 175
191/* 176/*
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index fa7208b483ca..ac5fff4cc58a 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -16,6 +16,8 @@
16extern pud_t level3_kernel_pgt[512]; 16extern pud_t level3_kernel_pgt[512];
17extern pud_t level3_ident_pgt[512]; 17extern pud_t level3_ident_pgt[512];
18extern pmd_t level2_kernel_pgt[512]; 18extern pmd_t level2_kernel_pgt[512];
19extern pmd_t level2_fixmap_pgt[512];
20extern pmd_t level2_ident_pgt[512];
19extern pgd_t init_level4_pgt[]; 21extern pgd_t init_level4_pgt[];
20 22
21#define swapper_pg_dir init_level4_pgt 23#define swapper_pg_dir init_level4_pgt
@@ -156,17 +158,17 @@ static inline void native_pgd_clear(pgd_t *pgd)
156 158
157static inline int pgd_bad(pgd_t pgd) 159static inline int pgd_bad(pgd_t pgd)
158{ 160{
159 return (pgd_val(pgd) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE; 161 return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
160} 162}
161 163
162static inline int pud_bad(pud_t pud) 164static inline int pud_bad(pud_t pud)
163{ 165{
164 return (pud_val(pud) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE; 166 return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
165} 167}
166 168
167static inline int pmd_bad(pmd_t pmd) 169static inline int pmd_bad(pmd_t pmd)
168{ 170{
169 return (pmd_val(pmd) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE; 171 return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
170} 172}
171 173
172#define pte_none(x) (!pte_val((x))) 174#define pte_none(x) (!pte_val((x)))
@@ -191,7 +193,7 @@ static inline int pmd_bad(pmd_t pmd)
191 * Level 4 access. 193 * Level 4 access.
192 */ 194 */
193#define pgd_page_vaddr(pgd) \ 195#define pgd_page_vaddr(pgd) \
194 ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_MASK)) 196 ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK))
195#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT)) 197#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
196#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) 198#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
197static inline int pgd_large(pgd_t pgd) { return 0; } 199static inline int pgd_large(pgd_t pgd) { return 0; }
@@ -214,7 +216,7 @@ static inline int pud_large(pud_t pte)
214} 216}
215 217
216/* PMD - Level 2 access */ 218/* PMD - Level 2 access */
217#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_MASK)) 219#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK))
218#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) 220#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
219 221
220#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) 222#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h
index 092b39b3a7e6..eff2ecd7fff0 100644
--- a/include/asm-x86/processor-flags.h
+++ b/include/asm-x86/processor-flags.h
@@ -88,10 +88,12 @@
88#define CX86_ARR_BASE 0xc4 88#define CX86_ARR_BASE 0xc4
89#define CX86_RCR_BASE 0xdc 89#define CX86_RCR_BASE 0xdc
90 90
91#ifdef __KERNEL__
91#ifdef CONFIG_VM86 92#ifdef CONFIG_VM86
92#define X86_VM_MASK X86_EFLAGS_VM 93#define X86_VM_MASK X86_EFLAGS_VM
93#else 94#else
94#define X86_VM_MASK 0 /* No VM86 support */ 95#define X86_VM_MASK 0 /* No VM86 support */
95#endif 96#endif
97#endif
96 98
97#endif /* __ASM_I386_PROCESSOR_FLAGS_H */ 99#endif /* __ASM_I386_PROCESSOR_FLAGS_H */
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 55402d2ab938..5f58da401b43 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -134,7 +134,7 @@ extern __u32 cleared_cpu_caps[NCAPINTS];
134#ifdef CONFIG_SMP 134#ifdef CONFIG_SMP
135DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); 135DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
136#define cpu_data(cpu) per_cpu(cpu_info, cpu) 136#define cpu_data(cpu) per_cpu(cpu_info, cpu)
137#define current_cpu_data cpu_data(smp_processor_id()) 137#define current_cpu_data __get_cpu_var(cpu_info)
138#else 138#else
139#define cpu_data(cpu) boot_cpu_data 139#define cpu_data(cpu) boot_cpu_data
140#define current_cpu_data boot_cpu_data 140#define current_cpu_data boot_cpu_data
@@ -722,8 +722,6 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
722 722
723extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); 723extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
724 724
725extern int force_mwait;
726
727extern void select_idle_routine(const struct cpuinfo_x86 *c); 725extern void select_idle_routine(const struct cpuinfo_x86 *c);
728 726
729extern unsigned long boot_option_idle_override; 727extern unsigned long boot_option_idle_override;
diff --git a/include/asm-x86/ptrace-abi.h b/include/asm-x86/ptrace-abi.h
index f224eb3c3157..72e7b9db29bb 100644
--- a/include/asm-x86/ptrace-abi.h
+++ b/include/asm-x86/ptrace-abi.h
@@ -73,11 +73,11 @@
73 73
74#ifdef __x86_64__ 74#ifdef __x86_64__
75# define PTRACE_ARCH_PRCTL 30 75# define PTRACE_ARCH_PRCTL 30
76#else
77# define PTRACE_SYSEMU 31
78# define PTRACE_SYSEMU_SINGLESTEP 32
79#endif 76#endif
80 77
78#define PTRACE_SYSEMU 31
79#define PTRACE_SYSEMU_SINGLESTEP 32
80
81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ 81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
82 82
83#ifndef __ASSEMBLY__ 83#ifndef __ASSEMBLY__
diff --git a/include/asm-x86/segment.h b/include/asm-x86/segment.h
index dfc8601c0892..646452ea9ea3 100644
--- a/include/asm-x86/segment.h
+++ b/include/asm-x86/segment.h
@@ -1,6 +1,15 @@
1#ifndef _ASM_X86_SEGMENT_H_ 1#ifndef _ASM_X86_SEGMENT_H_
2#define _ASM_X86_SEGMENT_H_ 2#define _ASM_X86_SEGMENT_H_
3 3
4/* Constructor for a conventional segment GDT (or LDT) entry */
5/* This is a macro so it can be used in initializers */
6#define GDT_ENTRY(flags, base, limit) \
7 ((((base) & 0xff000000ULL) << (56-24)) | \
8 (((flags) & 0x0000f0ffULL) << 40) | \
9 (((limit) & 0x000f0000ULL) << (48-16)) | \
10 (((base) & 0x00ffffffULL) << 16) | \
11 (((limit) & 0x0000ffffULL)))
12
4/* Simple and small GDT entries for booting only */ 13/* Simple and small GDT entries for booting only */
5 14
6#define GDT_ENTRY_BOOT_CS 2 15#define GDT_ENTRY_BOOT_CS 2
diff --git a/include/asm-x86/semaphore.h b/include/asm-x86/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-x86/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h
index 90ab2225e71b..a07c6f1c01e1 100644
--- a/include/asm-x86/setup.h
+++ b/include/asm-x86/setup.h
@@ -19,13 +19,28 @@ static inline int is_visws_box(void) { return 0; }
19/* 19/*
20 * Any setup quirks to be performed? 20 * Any setup quirks to be performed?
21 */ 21 */
22extern int (*arch_time_init_quirk)(void); 22struct mpc_config_processor;
23extern int (*arch_pre_intr_init_quirk)(void); 23struct mpc_config_bus;
24extern int (*arch_intr_init_quirk)(void); 24struct mp_config_oemtable;
25extern int (*arch_trap_init_quirk)(void); 25struct x86_quirks {
26extern char * (*arch_memory_setup_quirk)(void); 26 int (*arch_pre_time_init)(void);
27extern int (*mach_get_smp_config_quirk)(unsigned int early); 27 int (*arch_time_init)(void);
28extern int (*mach_find_smp_config_quirk)(unsigned int reserve); 28 int (*arch_pre_intr_init)(void);
29 int (*arch_intr_init)(void);
30 int (*arch_trap_init)(void);
31 char * (*arch_memory_setup)(void);
32 int (*mach_get_smp_config)(unsigned int early);
33 int (*mach_find_smp_config)(unsigned int reserve);
34
35 int *mpc_record;
36 int (*mpc_apic_id)(struct mpc_config_processor *m);
37 void (*mpc_oem_bus_info)(struct mpc_config_bus *m, char *name);
38 void (*mpc_oem_pci_bus)(struct mpc_config_bus *m);
39 void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable,
40 unsigned short oemsize);
41};
42
43extern struct x86_quirks *x86_quirks;
29 44
30#ifndef CONFIG_PARAVIRT 45#ifndef CONFIG_PARAVIRT
31#define paravirt_post_allocator_init() do {} while (0) 46#define paravirt_post_allocator_init() do {} while (0)
@@ -76,6 +91,7 @@ extern unsigned long init_pg_tables_start;
76extern unsigned long init_pg_tables_end; 91extern unsigned long init_pg_tables_end;
77 92
78#else 93#else
94void __init x86_64_init_pda(void);
79void __init x86_64_start_kernel(char *real_mode); 95void __init x86_64_start_kernel(char *real_mode);
80void __init x86_64_start_reservations(char *real_mode_data); 96void __init x86_64_start_reservations(char *real_mode_data);
81 97
diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h
index f15186d39c69..6dac49364e95 100644
--- a/include/asm-x86/signal.h
+++ b/include/asm-x86/signal.h
@@ -181,12 +181,12 @@ typedef struct sigaltstack {
181#ifdef __KERNEL__ 181#ifdef __KERNEL__
182#include <asm/sigcontext.h> 182#include <asm/sigcontext.h>
183 183
184#ifdef __386__ 184#ifdef __i386__
185 185
186#define __HAVE_ARCH_SIG_BITOPS 186#define __HAVE_ARCH_SIG_BITOPS
187 187
188#define sigaddset(set,sig) \ 188#define sigaddset(set,sig) \
189 (__builtin_constantp(sig) \ 189 (__builtin_constant_p(sig) \
190 ? __const_sigaddset((set), (sig)) \ 190 ? __const_sigaddset((set), (sig)) \
191 : __gen_sigaddset((set), (sig))) 191 : __gen_sigaddset((set), (sig)))
192 192
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index c2784b3e0b77..3c877f74f279 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -25,6 +25,8 @@ extern cpumask_t cpu_callin_map;
25extern void (*mtrr_hook)(void); 25extern void (*mtrr_hook)(void);
26extern void zap_low_mappings(void); 26extern void zap_low_mappings(void);
27 27
28extern int __cpuinit get_local_pda(int cpu);
29
28extern int smp_num_siblings; 30extern int smp_num_siblings;
29extern unsigned int num_processors; 31extern unsigned int num_processors;
30extern cpumask_t cpu_initialized; 32extern cpumask_t cpu_initialized;
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index 21e89bf92f1c..4f9a9861799a 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -6,7 +6,7 @@
6#include <asm/page.h> 6#include <asm/page.h>
7#include <asm/processor.h> 7#include <asm/processor.h>
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9 9#include <asm/paravirt.h>
10/* 10/*
11 * Your basic SMP spinlocks, allowing only a single CPU anywhere 11 * Your basic SMP spinlocks, allowing only a single CPU anywhere
12 * 12 *
@@ -54,21 +54,21 @@
54 * much between them in performance though, especially as locks are out of line. 54 * much between them in performance though, especially as locks are out of line.
55 */ 55 */
56#if (NR_CPUS < 256) 56#if (NR_CPUS < 256)
57static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 57static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
58{ 58{
59 int tmp = ACCESS_ONCE(lock->slock); 59 int tmp = ACCESS_ONCE(lock->slock);
60 60
61 return (((tmp >> 8) & 0xff) != (tmp & 0xff)); 61 return (((tmp >> 8) & 0xff) != (tmp & 0xff));
62} 62}
63 63
64static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 64static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
65{ 65{
66 int tmp = ACCESS_ONCE(lock->slock); 66 int tmp = ACCESS_ONCE(lock->slock);
67 67
68 return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; 68 return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
69} 69}
70 70
71static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 71static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
72{ 72{
73 short inc = 0x0100; 73 short inc = 0x0100;
74 74
@@ -87,9 +87,7 @@ static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
87 : "memory", "cc"); 87 : "memory", "cc");
88} 88}
89 89
90#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 90static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
91
92static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
93{ 91{
94 int tmp; 92 int tmp;
95 short new; 93 short new;
@@ -110,7 +108,7 @@ static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
110 return tmp; 108 return tmp;
111} 109}
112 110
113static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 111static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
114{ 112{
115 asm volatile(UNLOCK_LOCK_PREFIX "incb %0" 113 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
116 : "+m" (lock->slock) 114 : "+m" (lock->slock)
@@ -118,21 +116,21 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
118 : "memory", "cc"); 116 : "memory", "cc");
119} 117}
120#else 118#else
121static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 119static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
122{ 120{
123 int tmp = ACCESS_ONCE(lock->slock); 121 int tmp = ACCESS_ONCE(lock->slock);
124 122
125 return (((tmp >> 16) & 0xffff) != (tmp & 0xffff)); 123 return (((tmp >> 16) & 0xffff) != (tmp & 0xffff));
126} 124}
127 125
128static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 126static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
129{ 127{
130 int tmp = ACCESS_ONCE(lock->slock); 128 int tmp = ACCESS_ONCE(lock->slock);
131 129
132 return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; 130 return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
133} 131}
134 132
135static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 133static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
136{ 134{
137 int inc = 0x00010000; 135 int inc = 0x00010000;
138 int tmp; 136 int tmp;
@@ -153,9 +151,7 @@ static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
153 : "memory", "cc"); 151 : "memory", "cc");
154} 152}
155 153
156#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 154static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
157
158static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
159{ 155{
160 int tmp; 156 int tmp;
161 int new; 157 int new;
@@ -177,7 +173,7 @@ static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
177 return tmp; 173 return tmp;
178} 174}
179 175
180static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 176static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
181{ 177{
182 asm volatile(UNLOCK_LOCK_PREFIX "incw %0" 178 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
183 : "+m" (lock->slock) 179 : "+m" (lock->slock)
@@ -186,6 +182,98 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
186} 182}
187#endif 183#endif
188 184
185#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
186
187#ifdef CONFIG_PARAVIRT
188/*
189 * Define virtualization-friendly old-style lock byte lock, for use in
190 * pv_lock_ops if desired.
191 *
192 * This differs from the pre-2.6.24 spinlock by always using xchgb
193 * rather than decb to take the lock; this allows it to use a
194 * zero-initialized lock structure. It also maintains a 1-byte
195 * contention counter, so that we can implement
196 * __byte_spin_is_contended.
197 */
198struct __byte_spinlock {
199 s8 lock;
200 s8 spinners;
201};
202
203static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
204{
205 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
206 return bl->lock != 0;
207}
208
209static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
210{
211 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
212 return bl->spinners != 0;
213}
214
215static inline void __byte_spin_lock(raw_spinlock_t *lock)
216{
217 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
218 s8 val = 1;
219
220 asm("1: xchgb %1, %0\n"
221 " test %1,%1\n"
222 " jz 3f\n"
223 " " LOCK_PREFIX "incb %2\n"
224 "2: rep;nop\n"
225 " cmpb $1, %0\n"
226 " je 2b\n"
227 " " LOCK_PREFIX "decb %2\n"
228 " jmp 1b\n"
229 "3:"
230 : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
231}
232
233static inline int __byte_spin_trylock(raw_spinlock_t *lock)
234{
235 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
236 u8 old = 1;
237
238 asm("xchgb %1,%0"
239 : "+m" (bl->lock), "+q" (old) : : "memory");
240
241 return old == 0;
242}
243
244static inline void __byte_spin_unlock(raw_spinlock_t *lock)
245{
246 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
247 smp_wmb();
248 bl->lock = 0;
249}
250#else /* !CONFIG_PARAVIRT */
251static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
252{
253 return __ticket_spin_is_locked(lock);
254}
255
256static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
257{
258 return __ticket_spin_is_contended(lock);
259}
260
261static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
262{
263 __ticket_spin_lock(lock);
264}
265
266static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
267{
268 return __ticket_spin_trylock(lock);
269}
270
271static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
272{
273 __ticket_spin_unlock(lock);
274}
275#endif /* CONFIG_PARAVIRT */
276
189static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 277static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
190{ 278{
191 while (__raw_spin_is_locked(lock)) 279 while (__raw_spin_is_locked(lock))
diff --git a/include/asm-x86/spinlock_types.h b/include/asm-x86/spinlock_types.h
index 9029cf78cf5d..06c071c9eee9 100644
--- a/include/asm-x86/spinlock_types.h
+++ b/include/asm-x86/spinlock_types.h
@@ -5,7 +5,7 @@
5# error "please don't include this file directly" 5# error "please don't include this file directly"
6#endif 6#endif
7 7
8typedef struct { 8typedef struct raw_spinlock {
9 unsigned int slock; 9 unsigned int slock;
10} raw_spinlock_t; 10} raw_spinlock_t;
11 11
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h
index f5d9e74b1e4a..2730b351afcf 100644
--- a/include/asm-x86/swiotlb.h
+++ b/include/asm-x86/swiotlb.h
@@ -35,7 +35,7 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
35 int nents, int direction); 35 int nents, int direction);
36extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, 36extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
37 int nents, int direction); 37 int nents, int direction);
38extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); 38extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
39extern void swiotlb_free_coherent(struct device *hwdev, size_t size, 39extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
40 void *vaddr, dma_addr_t dma_handle); 40 void *vaddr, dma_addr_t dma_handle);
41extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); 41extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
@@ -45,12 +45,14 @@ extern int swiotlb_force;
45 45
46#ifdef CONFIG_SWIOTLB 46#ifdef CONFIG_SWIOTLB
47extern int swiotlb; 47extern int swiotlb;
48extern void pci_swiotlb_init(void);
48#else 49#else
49#define swiotlb 0 50#define swiotlb 0
51static inline void pci_swiotlb_init(void)
52{
53}
50#endif 54#endif
51 55
52extern void pci_swiotlb_init(void);
53
54static inline void dma_mark_clean(void *addr, size_t size) {} 56static inline void dma_mark_clean(void *addr, size_t size) {}
55 57
56#endif /* _ASM_SWIOTLB_H */ 58#endif /* _ASM_SWIOTLB_H */
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h
index 895339d2bc0b..da0a675adf94 100644
--- a/include/asm-x86/thread_info.h
+++ b/include/asm-x86/thread_info.h
@@ -75,13 +75,10 @@ struct thread_info {
75#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 75#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
76#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ 76#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
77#define TIF_IRET 5 /* force IRET */ 77#define TIF_IRET 5 /* force IRET */
78#ifdef CONFIG_X86_32
79#define TIF_SYSCALL_EMU 6 /* syscall emulation active */ 78#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
80#endif
81#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 79#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
82#define TIF_SECCOMP 8 /* secure computing */ 80#define TIF_SECCOMP 8 /* secure computing */
83#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ 81#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
84#define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */
85#define TIF_NOTSC 16 /* TSC is not accessible in userland */ 82#define TIF_NOTSC 16 /* TSC is not accessible in userland */
86#define TIF_IA32 17 /* 32bit process */ 83#define TIF_IA32 17 /* 32bit process */
87#define TIF_FORK 18 /* ret_from_fork */ 84#define TIF_FORK 18 /* ret_from_fork */
@@ -100,15 +97,10 @@ struct thread_info {
100#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 97#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
101#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 98#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
102#define _TIF_IRET (1 << TIF_IRET) 99#define _TIF_IRET (1 << TIF_IRET)
103#ifdef CONFIG_X86_32
104#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) 100#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
105#else
106#define _TIF_SYSCALL_EMU 0
107#endif
108#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 101#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
109#define _TIF_SECCOMP (1 << TIF_SECCOMP) 102#define _TIF_SECCOMP (1 << TIF_SECCOMP)
110#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) 103#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
111#define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED)
112#define _TIF_NOTSC (1 << TIF_NOTSC) 104#define _TIF_NOTSC (1 << TIF_NOTSC)
113#define _TIF_IA32 (1 << TIF_IA32) 105#define _TIF_IA32 (1 << TIF_IA32)
114#define _TIF_FORK (1 << TIF_FORK) 106#define _TIF_FORK (1 << TIF_FORK)
@@ -121,18 +113,27 @@ struct thread_info {
121#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) 113#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
122#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS) 114#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
123 115
116/* work to do in syscall_trace_enter() */
117#define _TIF_WORK_SYSCALL_ENTRY \
118 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | \
119 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP)
120
121/* work to do in syscall_trace_leave() */
122#define _TIF_WORK_SYSCALL_EXIT \
123 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)
124
124/* work to do on interrupt/exception return */ 125/* work to do on interrupt/exception return */
125#define _TIF_WORK_MASK \ 126#define _TIF_WORK_MASK \
126 (0x0000FFFF & \ 127 (0x0000FFFF & \
127 ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP| \ 128 ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT| \
128 _TIF_SECCOMP|_TIF_SYSCALL_EMU)) 129 _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU))
129 130
130/* work to do on any return to user space */ 131/* work to do on any return to user space */
131#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) 132#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
132 133
133/* Only used for 64 bit */ 134/* Only used for 64 bit */
134#define _TIF_DO_NOTIFY_MASK \ 135#define _TIF_DO_NOTIFY_MASK \
135 (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) 136 (_TIF_SIGPENDING|_TIF_MCE_NOTIFY)
136 137
137/* flags to check in __switch_to() */ 138/* flags to check in __switch_to() */
138#define _TIF_WORK_CTXSW \ 139#define _TIF_WORK_CTXSW \
@@ -151,6 +152,8 @@ struct thread_info {
151#define THREAD_FLAGS GFP_KERNEL 152#define THREAD_FLAGS GFP_KERNEL
152#endif 153#endif
153 154
155#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
156
154#define alloc_thread_info(tsk) \ 157#define alloc_thread_info(tsk) \
155 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER)) 158 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
156 159
diff --git a/include/asm-x86/traps.h b/include/asm-x86/traps.h
new file mode 100644
index 000000000000..a4b65a71bd66
--- /dev/null
+++ b/include/asm-x86/traps.h
@@ -0,0 +1,66 @@
1#ifndef _ASM_X86_TRAPS_H
2#define _ASM_X86_TRAPS_H
3
4/* Common in X86_32 and X86_64 */
5asmlinkage void divide_error(void);
6asmlinkage void debug(void);
7asmlinkage void nmi(void);
8asmlinkage void int3(void);
9asmlinkage void overflow(void);
10asmlinkage void bounds(void);
11asmlinkage void invalid_op(void);
12asmlinkage void device_not_available(void);
13asmlinkage void coprocessor_segment_overrun(void);
14asmlinkage void invalid_TSS(void);
15asmlinkage void segment_not_present(void);
16asmlinkage void stack_segment(void);
17asmlinkage void general_protection(void);
18asmlinkage void page_fault(void);
19asmlinkage void coprocessor_error(void);
20asmlinkage void simd_coprocessor_error(void);
21asmlinkage void alignment_check(void);
22asmlinkage void spurious_interrupt_bug(void);
23#ifdef CONFIG_X86_MCE
24asmlinkage void machine_check(void);
25#endif /* CONFIG_X86_MCE */
26
27void do_divide_error(struct pt_regs *, long);
28void do_overflow(struct pt_regs *, long);
29void do_bounds(struct pt_regs *, long);
30void do_coprocessor_segment_overrun(struct pt_regs *, long);
31void do_invalid_TSS(struct pt_regs *, long);
32void do_segment_not_present(struct pt_regs *, long);
33void do_stack_segment(struct pt_regs *, long);
34void do_alignment_check(struct pt_regs *, long);
35void do_invalid_op(struct pt_regs *, long);
36void do_general_protection(struct pt_regs *, long);
37void do_nmi(struct pt_regs *, long);
38
39extern int panic_on_unrecovered_nmi;
40extern int kstack_depth_to_print;
41
42#ifdef CONFIG_X86_32
43
44void do_iret_error(struct pt_regs *, long);
45void do_int3(struct pt_regs *, long);
46void do_debug(struct pt_regs *, long);
47void math_error(void __user *);
48void do_coprocessor_error(struct pt_regs *, long);
49void do_simd_coprocessor_error(struct pt_regs *, long);
50void do_spurious_interrupt_bug(struct pt_regs *, long);
51unsigned long patch_espfix_desc(unsigned long, unsigned long);
52asmlinkage void math_emulate(long);
53
54#else /* CONFIG_X86_32 */
55
56asmlinkage void double_fault(void);
57
58asmlinkage void do_int3(struct pt_regs *, long);
59asmlinkage void do_stack_segment(struct pt_regs *, long);
60asmlinkage void do_debug(struct pt_regs *, unsigned long);
61asmlinkage void do_coprocessor_error(struct pt_regs *);
62asmlinkage void do_simd_coprocessor_error(struct pt_regs *);
63asmlinkage void do_spurious_interrupt_bug(struct pt_regs *);
64
65#endif /* CONFIG_X86_32 */
66#endif /* _ASM_X86_TRAPS_H */
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h
index f6fa4d841bbc..5f702d1d5218 100644
--- a/include/asm-x86/uaccess.h
+++ b/include/asm-x86/uaccess.h
@@ -451,3 +451,4 @@ extern struct movsl_mask {
451#endif 451#endif
452 452
453#endif 453#endif
454
diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h
index 8317d94771d3..d7394673b772 100644
--- a/include/asm-x86/unistd_32.h
+++ b/include/asm-x86/unistd_32.h
@@ -332,6 +332,12 @@
332#define __NR_fallocate 324 332#define __NR_fallocate 324
333#define __NR_timerfd_settime 325 333#define __NR_timerfd_settime 325
334#define __NR_timerfd_gettime 326 334#define __NR_timerfd_gettime 326
335#define __NR_signalfd4 327
336#define __NR_eventfd2 328
337#define __NR_epoll_create1 329
338#define __NR_dup3 330
339#define __NR_pipe2 331
340#define __NR_inotify_init1 332
335 341
336#ifdef __KERNEL__ 342#ifdef __KERNEL__
337 343
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h
index 9c1a4a3470d9..3a341d791792 100644
--- a/include/asm-x86/unistd_64.h
+++ b/include/asm-x86/unistd_64.h
@@ -639,6 +639,20 @@ __SYSCALL(__NR_fallocate, sys_fallocate)
639__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) 639__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
640#define __NR_timerfd_gettime 287 640#define __NR_timerfd_gettime 287
641__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) 641__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
642#define __NR_paccept 288
643__SYSCALL(__NR_paccept, sys_paccept)
644#define __NR_signalfd4 289
645__SYSCALL(__NR_signalfd4, sys_signalfd4)
646#define __NR_eventfd2 290
647__SYSCALL(__NR_eventfd2, sys_eventfd2)
648#define __NR_epoll_create1 291
649__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
650#define __NR_dup3 292
651__SYSCALL(__NR_dup3, sys_dup3)
652#define __NR_pipe2 293
653__SYSCALL(__NR_pipe2, sys_pipe2)
654#define __NR_inotify_init1 294
655__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
642 656
643 657
644#ifndef __NO_STUBS 658#ifndef __NO_STUBS
diff --git a/include/asm-x86/uv/bios.h b/include/asm-x86/uv/bios.h
new file mode 100644
index 000000000000..aa73362ff5df
--- /dev/null
+++ b/include/asm-x86/uv/bios.h
@@ -0,0 +1,68 @@
1#ifndef _ASM_X86_BIOS_H
2#define _ASM_X86_BIOS_H
3
4/*
5 * BIOS layer definitions.
6 *
7 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#include <linux/rtc.h>
25
26#define BIOS_FREQ_BASE 0x01000001
27
28enum {
29 BIOS_FREQ_BASE_PLATFORM = 0,
30 BIOS_FREQ_BASE_INTERVAL_TIMER = 1,
31 BIOS_FREQ_BASE_REALTIME_CLOCK = 2
32};
33
34# define BIOS_CALL(result, a0, a1, a2, a3, a4, a5, a6, a7) \
35 do { \
36 /* XXX - the real call goes here */ \
37 result.status = BIOS_STATUS_UNIMPLEMENTED; \
38 isrv.v0 = 0; \
39 isrv.v1 = 0; \
40 } while (0)
41
42enum {
43 BIOS_STATUS_SUCCESS = 0,
44 BIOS_STATUS_UNIMPLEMENTED = -1,
45 BIOS_STATUS_EINVAL = -2,
46 BIOS_STATUS_ERROR = -3
47};
48
49struct uv_bios_retval {
50 /*
51 * A zero status value indicates call completed without error.
52 * A negative status value indicates reason of call failure.
53 * A positive status value indicates success but an
54 * informational value should be printed (e.g., "reboot for
55 * change to take effect").
56 */
57 s64 status;
58 u64 v0;
59 u64 v1;
60 u64 v2;
61};
62
63extern long
64x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second,
65 unsigned long *drift_info);
66extern const char *x86_bios_strerror(long status);
67
68#endif /* _ASM_X86_BIOS_H */
diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h
index 86e085e003d2..8e18fb80f5e6 100644
--- a/include/asm-x86/vdso.h
+++ b/include/asm-x86/vdso.h
@@ -36,4 +36,12 @@ extern const char VDSO32_PRELINK[];
36extern void __user __kernel_sigreturn; 36extern void __user __kernel_sigreturn;
37extern void __user __kernel_rt_sigreturn; 37extern void __user __kernel_rt_sigreturn;
38 38
39/*
40 * These symbols are defined by vdso32.S to mark the bounds
41 * of the ELF DSO images included therein.
42 */
43extern const char vdso32_int80_start, vdso32_int80_end;
44extern const char vdso32_syscall_start, vdso32_syscall_end;
45extern const char vdso32_sysenter_start, vdso32_sysenter_end;
46
39#endif /* asm-x86/vdso.h */ 47#endif /* asm-x86/vdso.h */
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h
index f8d57ea1f05f..8ded74720024 100644
--- a/include/asm-x86/xen/events.h
+++ b/include/asm-x86/xen/events.h
@@ -5,6 +5,7 @@ enum ipi_vector {
5 XEN_RESCHEDULE_VECTOR, 5 XEN_RESCHEDULE_VECTOR,
6 XEN_CALL_FUNCTION_VECTOR, 6 XEN_CALL_FUNCTION_VECTOR,
7 XEN_CALL_FUNCTION_SINGLE_VECTOR, 7 XEN_CALL_FUNCTION_SINGLE_VECTOR,
8 XEN_SPIN_UNLOCK_VECTOR,
8 9
9 XEN_NR_IPIS, 10 XEN_NR_IPIS,
10}; 11};
diff --git a/include/asm-x86/xen/hypercall.h b/include/asm-x86/xen/hypercall.h
index 2a4f9b41d684..91cb7fd5c123 100644
--- a/include/asm-x86/xen/hypercall.h
+++ b/include/asm-x86/xen/hypercall.h
@@ -40,83 +40,157 @@
40#include <xen/interface/sched.h> 40#include <xen/interface/sched.h>
41#include <xen/interface/physdev.h> 41#include <xen/interface/physdev.h>
42 42
43/*
44 * The hypercall asms have to meet several constraints:
45 * - Work on 32- and 64-bit.
46 * The two architectures put their arguments in different sets of
47 * registers.
48 *
49 * - Work around asm syntax quirks
50 * It isn't possible to specify one of the rNN registers in a
51 * constraint, so we use explicit register variables to get the
52 * args into the right place.
53 *
54 * - Mark all registers as potentially clobbered
55 * Even unused parameters can be clobbered by the hypervisor, so we
56 * need to make sure gcc knows it.
57 *
58 * - Avoid compiler bugs.
59 * This is the tricky part. Because x86_32 has such a constrained
60 * register set, gcc versions below 4.3 have trouble generating
61 * code when all the arg registers and memory are trashed by the
62 * asm. There are syntactically simpler ways of achieving the
63 * semantics below, but they cause the compiler to crash.
64 *
65 * The only combination I found which works is:
66 * - assign the __argX variables first
67 * - list all actually used parameters as "+r" (__argX)
68 * - clobber the rest
69 *
70 * The result certainly isn't pretty, and it really shows up cpp's
71 * weakness as as macro language. Sorry. (But let's just give thanks
72 * there aren't more than 5 arguments...)
73 */
74
43extern struct { char _entry[32]; } hypercall_page[]; 75extern struct { char _entry[32]; } hypercall_page[];
44 76
77#define __HYPERCALL "call hypercall_page+%c[offset]"
78#define __HYPERCALL_ENTRY(x) \
79 [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
80
81#ifdef CONFIG_X86_32
82#define __HYPERCALL_RETREG "eax"
83#define __HYPERCALL_ARG1REG "ebx"
84#define __HYPERCALL_ARG2REG "ecx"
85#define __HYPERCALL_ARG3REG "edx"
86#define __HYPERCALL_ARG4REG "esi"
87#define __HYPERCALL_ARG5REG "edi"
88#else
89#define __HYPERCALL_RETREG "rax"
90#define __HYPERCALL_ARG1REG "rdi"
91#define __HYPERCALL_ARG2REG "rsi"
92#define __HYPERCALL_ARG3REG "rdx"
93#define __HYPERCALL_ARG4REG "r10"
94#define __HYPERCALL_ARG5REG "r8"
95#endif
96
97#define __HYPERCALL_DECLS \
98 register unsigned long __res asm(__HYPERCALL_RETREG); \
99 register unsigned long __arg1 asm(__HYPERCALL_ARG1REG) = __arg1; \
100 register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
101 register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
102 register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
103 register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
104
105#define __HYPERCALL_0PARAM "=r" (__res)
106#define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
107#define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
108#define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
109#define __HYPERCALL_4PARAM __HYPERCALL_3PARAM, "+r" (__arg4)
110#define __HYPERCALL_5PARAM __HYPERCALL_4PARAM, "+r" (__arg5)
111
112#define __HYPERCALL_0ARG()
113#define __HYPERCALL_1ARG(a1) \
114 __HYPERCALL_0ARG() __arg1 = (unsigned long)(a1);
115#define __HYPERCALL_2ARG(a1,a2) \
116 __HYPERCALL_1ARG(a1) __arg2 = (unsigned long)(a2);
117#define __HYPERCALL_3ARG(a1,a2,a3) \
118 __HYPERCALL_2ARG(a1,a2) __arg3 = (unsigned long)(a3);
119#define __HYPERCALL_4ARG(a1,a2,a3,a4) \
120 __HYPERCALL_3ARG(a1,a2,a3) __arg4 = (unsigned long)(a4);
121#define __HYPERCALL_5ARG(a1,a2,a3,a4,a5) \
122 __HYPERCALL_4ARG(a1,a2,a3,a4) __arg5 = (unsigned long)(a5);
123
124#define __HYPERCALL_CLOBBER5 "memory"
125#define __HYPERCALL_CLOBBER4 __HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG
126#define __HYPERCALL_CLOBBER3 __HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG
127#define __HYPERCALL_CLOBBER2 __HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG
128#define __HYPERCALL_CLOBBER1 __HYPERCALL_CLOBBER2, __HYPERCALL_ARG2REG
129#define __HYPERCALL_CLOBBER0 __HYPERCALL_CLOBBER1, __HYPERCALL_ARG1REG
130
45#define _hypercall0(type, name) \ 131#define _hypercall0(type, name) \
46({ \ 132({ \
47 long __res; \ 133 __HYPERCALL_DECLS; \
48 asm volatile ( \ 134 __HYPERCALL_0ARG(); \
49 "call %[call]" \ 135 asm volatile (__HYPERCALL \
50 : "=a" (__res) \ 136 : __HYPERCALL_0PARAM \
51 : [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ 137 : __HYPERCALL_ENTRY(name) \
52 : "memory" ); \ 138 : __HYPERCALL_CLOBBER0); \
53 (type)__res; \ 139 (type)__res; \
54}) 140})
55 141
56#define _hypercall1(type, name, a1) \ 142#define _hypercall1(type, name, a1) \
57({ \ 143({ \
58 long __res, __ign1; \ 144 __HYPERCALL_DECLS; \
59 asm volatile ( \ 145 __HYPERCALL_1ARG(a1); \
60 "call %[call]" \ 146 asm volatile (__HYPERCALL \
61 : "=a" (__res), "=b" (__ign1) \ 147 : __HYPERCALL_1PARAM \
62 : "1" ((long)(a1)), \ 148 : __HYPERCALL_ENTRY(name) \
63 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ 149 : __HYPERCALL_CLOBBER1); \
64 : "memory" ); \
65 (type)__res; \ 150 (type)__res; \
66}) 151})
67 152
68#define _hypercall2(type, name, a1, a2) \ 153#define _hypercall2(type, name, a1, a2) \
69({ \ 154({ \
70 long __res, __ign1, __ign2; \ 155 __HYPERCALL_DECLS; \
71 asm volatile ( \ 156 __HYPERCALL_2ARG(a1, a2); \
72 "call %[call]" \ 157 asm volatile (__HYPERCALL \
73 : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ 158 : __HYPERCALL_2PARAM \
74 : "1" ((long)(a1)), "2" ((long)(a2)), \ 159 : __HYPERCALL_ENTRY(name) \
75 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ 160 : __HYPERCALL_CLOBBER2); \
76 : "memory" ); \
77 (type)__res; \ 161 (type)__res; \
78}) 162})
79 163
80#define _hypercall3(type, name, a1, a2, a3) \ 164#define _hypercall3(type, name, a1, a2, a3) \
81({ \ 165({ \
82 long __res, __ign1, __ign2, __ign3; \ 166 __HYPERCALL_DECLS; \
83 asm volatile ( \ 167 __HYPERCALL_3ARG(a1, a2, a3); \
84 "call %[call]" \ 168 asm volatile (__HYPERCALL \
85 : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ 169 : __HYPERCALL_3PARAM \
86 "=d" (__ign3) \ 170 : __HYPERCALL_ENTRY(name) \
87 : "1" ((long)(a1)), "2" ((long)(a2)), \ 171 : __HYPERCALL_CLOBBER3); \
88 "3" ((long)(a3)), \
89 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
90 : "memory" ); \
91 (type)__res; \ 172 (type)__res; \
92}) 173})
93 174
94#define _hypercall4(type, name, a1, a2, a3, a4) \ 175#define _hypercall4(type, name, a1, a2, a3, a4) \
95({ \ 176({ \
96 long __res, __ign1, __ign2, __ign3, __ign4; \ 177 __HYPERCALL_DECLS; \
97 asm volatile ( \ 178 __HYPERCALL_4ARG(a1, a2, a3, a4); \
98 "call %[call]" \ 179 asm volatile (__HYPERCALL \
99 : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ 180 : __HYPERCALL_4PARAM \
100 "=d" (__ign3), "=S" (__ign4) \ 181 : __HYPERCALL_ENTRY(name) \
101 : "1" ((long)(a1)), "2" ((long)(a2)), \ 182 : __HYPERCALL_CLOBBER4); \
102 "3" ((long)(a3)), "4" ((long)(a4)), \
103 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
104 : "memory" ); \
105 (type)__res; \ 183 (type)__res; \
106}) 184})
107 185
108#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ 186#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
109({ \ 187({ \
110 long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \ 188 __HYPERCALL_DECLS; \
111 asm volatile ( \ 189 __HYPERCALL_5ARG(a1, a2, a3, a4, a5); \
112 "call %[call]" \ 190 asm volatile (__HYPERCALL \
113 : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ 191 : __HYPERCALL_5PARAM \
114 "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ 192 : __HYPERCALL_ENTRY(name) \
115 : "1" ((long)(a1)), "2" ((long)(a2)), \ 193 : __HYPERCALL_CLOBBER5); \
116 "3" ((long)(a3)), "4" ((long)(a4)), \
117 "5" ((long)(a5)), \
118 [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
119 : "memory" ); \
120 (type)__res; \ 194 (type)__res; \
121}) 195})
122 196
@@ -152,6 +226,7 @@ HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
152 return _hypercall2(int, stack_switch, ss, esp); 226 return _hypercall2(int, stack_switch, ss, esp);
153} 227}
154 228
229#ifdef CONFIG_X86_32
155static inline int 230static inline int
156HYPERVISOR_set_callbacks(unsigned long event_selector, 231HYPERVISOR_set_callbacks(unsigned long event_selector,
157 unsigned long event_address, 232 unsigned long event_address,
@@ -162,6 +237,17 @@ HYPERVISOR_set_callbacks(unsigned long event_selector,
162 event_selector, event_address, 237 event_selector, event_address,
163 failsafe_selector, failsafe_address); 238 failsafe_selector, failsafe_address);
164} 239}
240#else /* CONFIG_X86_64 */
241static inline int
242HYPERVISOR_set_callbacks(unsigned long event_address,
243 unsigned long failsafe_address,
244 unsigned long syscall_address)
245{
246 return _hypercall3(int, set_callbacks,
247 event_address, failsafe_address,
248 syscall_address);
249}
250#endif /* CONFIG_X86_{32,64} */
165 251
166static inline int 252static inline int
167HYPERVISOR_callback_op(int cmd, void *arg) 253HYPERVISOR_callback_op(int cmd, void *arg)
@@ -223,12 +309,12 @@ static inline int
223HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val, 309HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
224 unsigned long flags) 310 unsigned long flags)
225{ 311{
226 unsigned long pte_hi = 0; 312 if (sizeof(new_val) == sizeof(long))
227#ifdef CONFIG_X86_PAE 313 return _hypercall3(int, update_va_mapping, va,
228 pte_hi = new_val.pte_high; 314 new_val.pte, flags);
229#endif 315 else
230 return _hypercall4(int, update_va_mapping, va, 316 return _hypercall4(int, update_va_mapping, va,
231 new_val.pte_low, pte_hi, flags); 317 new_val.pte, new_val.pte >> 32, flags);
232} 318}
233 319
234static inline int 320static inline int
@@ -281,12 +367,13 @@ static inline int
281HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val, 367HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val,
282 unsigned long flags, domid_t domid) 368 unsigned long flags, domid_t domid)
283{ 369{
284 unsigned long pte_hi = 0; 370 if (sizeof(new_val) == sizeof(long))
285#ifdef CONFIG_X86_PAE 371 return _hypercall4(int, update_va_mapping_otherdomain, va,
286 pte_hi = new_val.pte_high; 372 new_val.pte, flags, domid);
287#endif 373 else
288 return _hypercall5(int, update_va_mapping_otherdomain, va, 374 return _hypercall5(int, update_va_mapping_otherdomain, va,
289 new_val.pte_low, pte_hi, flags, domid); 375 new_val.pte, new_val.pte >> 32,
376 flags, domid);
290} 377}
291 378
292static inline int 379static inline int
@@ -301,6 +388,14 @@ HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
301 return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); 388 return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
302} 389}
303 390
391#ifdef CONFIG_X86_64
392static inline int
393HYPERVISOR_set_segment_base(int reg, unsigned long value)
394{
395 return _hypercall2(int, set_segment_base, reg, value);
396}
397#endif
398
304static inline int 399static inline int
305HYPERVISOR_suspend(unsigned long srec) 400HYPERVISOR_suspend(unsigned long srec)
306{ 401{
@@ -327,14 +422,14 @@ MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
327{ 422{
328 mcl->op = __HYPERVISOR_update_va_mapping; 423 mcl->op = __HYPERVISOR_update_va_mapping;
329 mcl->args[0] = va; 424 mcl->args[0] = va;
330#ifdef CONFIG_X86_PAE 425 if (sizeof(new_val) == sizeof(long)) {
331 mcl->args[1] = new_val.pte_low; 426 mcl->args[1] = new_val.pte;
332 mcl->args[2] = new_val.pte_high; 427 mcl->args[2] = flags;
333#else 428 } else {
334 mcl->args[1] = new_val.pte_low; 429 mcl->args[1] = new_val.pte;
335 mcl->args[2] = 0; 430 mcl->args[2] = new_val.pte >> 32;
336#endif 431 mcl->args[3] = flags;
337 mcl->args[3] = flags; 432 }
338} 433}
339 434
340static inline void 435static inline void
@@ -354,15 +449,16 @@ MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long v
354{ 449{
355 mcl->op = __HYPERVISOR_update_va_mapping_otherdomain; 450 mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
356 mcl->args[0] = va; 451 mcl->args[0] = va;
357#ifdef CONFIG_X86_PAE 452 if (sizeof(new_val) == sizeof(long)) {
358 mcl->args[1] = new_val.pte_low; 453 mcl->args[1] = new_val.pte;
359 mcl->args[2] = new_val.pte_high; 454 mcl->args[2] = flags;
360#else 455 mcl->args[3] = domid;
361 mcl->args[1] = new_val.pte_low; 456 } else {
362 mcl->args[2] = 0; 457 mcl->args[1] = new_val.pte;
363#endif 458 mcl->args[2] = new_val.pte >> 32;
364 mcl->args[3] = flags; 459 mcl->args[3] = flags;
365 mcl->args[4] = domid; 460 mcl->args[4] = domid;
461 }
366} 462}
367 463
368static inline void 464static inline void
@@ -370,10 +466,15 @@ MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
370 struct desc_struct desc) 466 struct desc_struct desc)
371{ 467{
372 mcl->op = __HYPERVISOR_update_descriptor; 468 mcl->op = __HYPERVISOR_update_descriptor;
373 mcl->args[0] = maddr; 469 if (sizeof(maddr) == sizeof(long)) {
374 mcl->args[1] = maddr >> 32; 470 mcl->args[0] = maddr;
375 mcl->args[2] = desc.a; 471 mcl->args[1] = *(unsigned long *)&desc;
376 mcl->args[3] = desc.b; 472 } else {
473 mcl->args[0] = maddr;
474 mcl->args[1] = maddr >> 32;
475 mcl->args[2] = desc.a;
476 mcl->args[3] = desc.b;
477 }
377} 478}
378 479
379static inline void 480static inline void
diff --git a/include/asm-x86/xen/interface.h b/include/asm-x86/xen/interface.h
index 6227000a1e84..9d810f2538a2 100644
--- a/include/asm-x86/xen/interface.h
+++ b/include/asm-x86/xen/interface.h
@@ -1,13 +1,13 @@
1/****************************************************************************** 1/******************************************************************************
2 * arch-x86_32.h 2 * arch-x86_32.h
3 * 3 *
4 * Guest OS interface to x86 32-bit Xen. 4 * Guest OS interface to x86 Xen.
5 * 5 *
6 * Copyright (c) 2004, K A Fraser 6 * Copyright (c) 2004, K A Fraser
7 */ 7 */
8 8
9#ifndef __XEN_PUBLIC_ARCH_X86_32_H__ 9#ifndef __ASM_X86_XEN_INTERFACE_H
10#define __XEN_PUBLIC_ARCH_X86_32_H__ 10#define __ASM_X86_XEN_INTERFACE_H
11 11
12#ifdef __XEN__ 12#ifdef __XEN__
13#define __DEFINE_GUEST_HANDLE(name, type) \ 13#define __DEFINE_GUEST_HANDLE(name, type) \
@@ -57,6 +57,17 @@ DEFINE_GUEST_HANDLE(long);
57DEFINE_GUEST_HANDLE(void); 57DEFINE_GUEST_HANDLE(void);
58#endif 58#endif
59 59
60#ifndef HYPERVISOR_VIRT_START
61#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
62#endif
63
64#ifndef machine_to_phys_mapping
65#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
66#endif
67
68/* Maximum number of virtual CPUs in multi-processor guests. */
69#define MAX_VIRT_CPUS 32
70
60/* 71/*
61 * SEGMENT DESCRIPTOR TABLES 72 * SEGMENT DESCRIPTOR TABLES
62 */ 73 */
@@ -71,58 +82,21 @@ DEFINE_GUEST_HANDLE(void);
71#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) 82#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
72 83
73/* 84/*
74 * These flat segments are in the Xen-private section of every GDT. Since these
75 * are also present in the initial GDT, many OSes will be able to avoid
76 * installing their own GDT.
77 */
78#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
79#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
80#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
81#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
82#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
83#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
84
85#define FLAT_KERNEL_CS FLAT_RING1_CS
86#define FLAT_KERNEL_DS FLAT_RING1_DS
87#define FLAT_KERNEL_SS FLAT_RING1_SS
88#define FLAT_USER_CS FLAT_RING3_CS
89#define FLAT_USER_DS FLAT_RING3_DS
90#define FLAT_USER_SS FLAT_RING3_SS
91
92/* And the trap vector is... */
93#define TRAP_INSTR "int $0x82"
94
95/*
96 * Virtual addresses beyond this are not modifiable by guest OSes. The
97 * machine->physical mapping table starts at this address, read-only.
98 */
99#ifdef CONFIG_X86_PAE
100#define __HYPERVISOR_VIRT_START 0xF5800000
101#else
102#define __HYPERVISOR_VIRT_START 0xFC000000
103#endif
104
105#ifndef HYPERVISOR_VIRT_START
106#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
107#endif
108
109#ifndef machine_to_phys_mapping
110#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
111#endif
112
113/* Maximum number of virtual CPUs in multi-processor guests. */
114#define MAX_VIRT_CPUS 32
115
116#ifndef __ASSEMBLY__
117
118/*
119 * Send an array of these to HYPERVISOR_set_trap_table() 85 * Send an array of these to HYPERVISOR_set_trap_table()
86 * The privilege level specifies which modes may enter a trap via a software
87 * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
88 * privilege levels as follows:
89 * Level == 0: Noone may enter
90 * Level == 1: Kernel may enter
91 * Level == 2: Kernel may enter
92 * Level == 3: Everyone may enter
120 */ 93 */
121#define TI_GET_DPL(_ti) ((_ti)->flags & 3) 94#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
122#define TI_GET_IF(_ti) ((_ti)->flags & 4) 95#define TI_GET_IF(_ti) ((_ti)->flags & 4)
123#define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl)) 96#define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl))
124#define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2)) 97#define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2))
125 98
99#ifndef __ASSEMBLY__
126struct trap_info { 100struct trap_info {
127 uint8_t vector; /* exception vector */ 101 uint8_t vector; /* exception vector */
128 uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ 102 uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
@@ -131,32 +105,21 @@ struct trap_info {
131}; 105};
132DEFINE_GUEST_HANDLE_STRUCT(trap_info); 106DEFINE_GUEST_HANDLE_STRUCT(trap_info);
133 107
134struct cpu_user_regs { 108struct arch_shared_info {
135 uint32_t ebx; 109 unsigned long max_pfn; /* max pfn that appears in table */
136 uint32_t ecx; 110 /* Frame containing list of mfns containing list of mfns containing p2m. */
137 uint32_t edx; 111 unsigned long pfn_to_mfn_frame_list_list;
138 uint32_t esi; 112 unsigned long nmi_reason;
139 uint32_t edi;
140 uint32_t ebp;
141 uint32_t eax;
142 uint16_t error_code; /* private */
143 uint16_t entry_vector; /* private */
144 uint32_t eip;
145 uint16_t cs;
146 uint8_t saved_upcall_mask;
147 uint8_t _pad0;
148 uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
149 uint32_t esp;
150 uint16_t ss, _pad1;
151 uint16_t es, _pad2;
152 uint16_t ds, _pad3;
153 uint16_t fs, _pad4;
154 uint16_t gs, _pad5;
155}; 113};
156DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs); 114#endif /* !__ASSEMBLY__ */
157 115
158typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ 116#ifdef CONFIG_X86_32
117#include "interface_32.h"
118#else
119#include "interface_64.h"
120#endif
159 121
122#ifndef __ASSEMBLY__
160/* 123/*
161 * The following is all CPU context. Note that the fpu_ctxt block is filled 124 * The following is all CPU context. Note that the fpu_ctxt block is filled
162 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. 125 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
@@ -173,33 +136,29 @@ struct vcpu_guest_context {
173 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ 136 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
174 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ 137 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
175 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ 138 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
139 /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
176 unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ 140 unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
177 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ 141 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
142#ifdef __i386__
178 unsigned long event_callback_cs; /* CS:EIP of event callback */ 143 unsigned long event_callback_cs; /* CS:EIP of event callback */
179 unsigned long event_callback_eip; 144 unsigned long event_callback_eip;
180 unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ 145 unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
181 unsigned long failsafe_callback_eip; 146 unsigned long failsafe_callback_eip;
147#else
148 unsigned long event_callback_eip;
149 unsigned long failsafe_callback_eip;
150 unsigned long syscall_callback_eip;
151#endif
182 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ 152 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
153#ifdef __x86_64__
154 /* Segment base addresses. */
155 uint64_t fs_base;
156 uint64_t gs_base_kernel;
157 uint64_t gs_base_user;
158#endif
183}; 159};
184DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context); 160DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
185 161#endif /* !__ASSEMBLY__ */
186struct arch_shared_info {
187 unsigned long max_pfn; /* max pfn that appears in table */
188 /* Frame containing list of mfns containing list of mfns containing p2m. */
189 unsigned long pfn_to_mfn_frame_list_list;
190 unsigned long nmi_reason;
191};
192
193struct arch_vcpu_info {
194 unsigned long cr2;
195 unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
196};
197
198struct xen_callback {
199 unsigned long cs;
200 unsigned long eip;
201};
202#endif /* !__ASSEMBLY__ */
203 162
204/* 163/*
205 * Prefix forces emulation of some non-trapping instructions. 164 * Prefix forces emulation of some non-trapping instructions.
@@ -213,4 +172,4 @@ struct xen_callback {
213#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" 172#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
214#endif 173#endif
215 174
216#endif 175#endif /* __ASM_X86_XEN_INTERFACE_H */
diff --git a/include/asm-x86/xen/interface_32.h b/include/asm-x86/xen/interface_32.h
new file mode 100644
index 000000000000..d8ac41d5db86
--- /dev/null
+++ b/include/asm-x86/xen/interface_32.h
@@ -0,0 +1,97 @@
1/******************************************************************************
2 * arch-x86_32.h
3 *
4 * Guest OS interface to x86 32-bit Xen.
5 *
6 * Copyright (c) 2004, K A Fraser
7 */
8
9#ifndef __ASM_X86_XEN_INTERFACE_32_H
10#define __ASM_X86_XEN_INTERFACE_32_H
11
12
13/*
14 * These flat segments are in the Xen-private section of every GDT. Since these
15 * are also present in the initial GDT, many OSes will be able to avoid
16 * installing their own GDT.
17 */
18#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
19#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
20#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
21#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
22#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
23#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
24
25#define FLAT_KERNEL_CS FLAT_RING1_CS
26#define FLAT_KERNEL_DS FLAT_RING1_DS
27#define FLAT_KERNEL_SS FLAT_RING1_SS
28#define FLAT_USER_CS FLAT_RING3_CS
29#define FLAT_USER_DS FLAT_RING3_DS
30#define FLAT_USER_SS FLAT_RING3_SS
31
32/* And the trap vector is... */
33#define TRAP_INSTR "int $0x82"
34
35/*
36 * Virtual addresses beyond this are not modifiable by guest OSes. The
37 * machine->physical mapping table starts at this address, read-only.
38 */
39#define __HYPERVISOR_VIRT_START 0xF5800000
40
41#ifndef __ASSEMBLY__
42
43struct cpu_user_regs {
44 uint32_t ebx;
45 uint32_t ecx;
46 uint32_t edx;
47 uint32_t esi;
48 uint32_t edi;
49 uint32_t ebp;
50 uint32_t eax;
51 uint16_t error_code; /* private */
52 uint16_t entry_vector; /* private */
53 uint32_t eip;
54 uint16_t cs;
55 uint8_t saved_upcall_mask;
56 uint8_t _pad0;
57 uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
58 uint32_t esp;
59 uint16_t ss, _pad1;
60 uint16_t es, _pad2;
61 uint16_t ds, _pad3;
62 uint16_t fs, _pad4;
63 uint16_t gs, _pad5;
64};
65DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
66
67typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
68
69struct arch_vcpu_info {
70 unsigned long cr2;
71 unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
72};
73
74struct xen_callback {
75 unsigned long cs;
76 unsigned long eip;
77};
78typedef struct xen_callback xen_callback_t;
79
80#define XEN_CALLBACK(__cs, __eip) \
81 ((struct xen_callback){ .cs = (__cs), .eip = (unsigned long)(__eip) })
82#endif /* !__ASSEMBLY__ */
83
84
85/*
86 * Page-directory addresses above 4GB do not fit into architectural %cr3.
87 * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
88 * must use the following accessor macros to pack/unpack valid MFNs.
89 *
90 * Note that Xen is using the fact that the pagetable base is always
91 * page-aligned, and putting the 12 MSB of the address into the 12 LSB
92 * of cr3.
93 */
94#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
95#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
96
97#endif /* __ASM_X86_XEN_INTERFACE_32_H */
diff --git a/include/asm-x86/xen/interface_64.h b/include/asm-x86/xen/interface_64.h
new file mode 100644
index 000000000000..842266ce96e6
--- /dev/null
+++ b/include/asm-x86/xen/interface_64.h
@@ -0,0 +1,159 @@
1#ifndef __ASM_X86_XEN_INTERFACE_64_H
2#define __ASM_X86_XEN_INTERFACE_64_H
3
4/*
5 * 64-bit segment selectors
6 * These flat segments are in the Xen-private section of every GDT. Since these
7 * are also present in the initial GDT, many OSes will be able to avoid
8 * installing their own GDT.
9 */
10
11#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
12#define FLAT_RING3_CS64 0xe033 /* GDT index 261 */
13#define FLAT_RING3_DS32 0xe02b /* GDT index 262 */
14#define FLAT_RING3_DS64 0x0000 /* NULL selector */
15#define FLAT_RING3_SS32 0xe02b /* GDT index 262 */
16#define FLAT_RING3_SS64 0xe02b /* GDT index 262 */
17
18#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
19#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
20#define FLAT_KERNEL_DS FLAT_KERNEL_DS64
21#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
22#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
23#define FLAT_KERNEL_CS FLAT_KERNEL_CS64
24#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
25#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
26#define FLAT_KERNEL_SS FLAT_KERNEL_SS64
27
28#define FLAT_USER_DS64 FLAT_RING3_DS64
29#define FLAT_USER_DS32 FLAT_RING3_DS32
30#define FLAT_USER_DS FLAT_USER_DS64
31#define FLAT_USER_CS64 FLAT_RING3_CS64
32#define FLAT_USER_CS32 FLAT_RING3_CS32
33#define FLAT_USER_CS FLAT_USER_CS64
34#define FLAT_USER_SS64 FLAT_RING3_SS64
35#define FLAT_USER_SS32 FLAT_RING3_SS32
36#define FLAT_USER_SS FLAT_USER_SS64
37
38#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
39#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
40#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
41#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
42
43#ifndef HYPERVISOR_VIRT_START
44#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
45#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
46#endif
47
48#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
49#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
50#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
51#ifndef machine_to_phys_mapping
52#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
53#endif
54
55/*
56 * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
57 * @which == SEGBASE_* ; @base == 64-bit base address
58 * Returns 0 on success.
59 */
60#define SEGBASE_FS 0
61#define SEGBASE_GS_USER 1
62#define SEGBASE_GS_KERNEL 2
63#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
64
65/*
66 * int HYPERVISOR_iret(void)
67 * All arguments are on the kernel stack, in the following format.
68 * Never returns if successful. Current kernel context is lost.
69 * The saved CS is mapped as follows:
70 * RING0 -> RING3 kernel mode.
71 * RING1 -> RING3 kernel mode.
72 * RING2 -> RING3 kernel mode.
73 * RING3 -> RING3 user mode.
74 * However RING0 indicates that the guest kernel should return to iteself
75 * directly with
76 * orb $3,1*8(%rsp)
77 * iretq
78 * If flags contains VGCF_in_syscall:
79 * Restore RAX, RIP, RFLAGS, RSP.
80 * Discard R11, RCX, CS, SS.
81 * Otherwise:
82 * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
83 * All other registers are saved on hypercall entry and restored to user.
84 */
85/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
86#define _VGCF_in_syscall 8
87#define VGCF_in_syscall (1<<_VGCF_in_syscall)
88#define VGCF_IN_SYSCALL VGCF_in_syscall
89
90#ifndef __ASSEMBLY__
91
92struct iret_context {
93 /* Top of stack (%rsp at point of hypercall). */
94 uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
95 /* Bottom of iret stack frame. */
96};
97
98#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
99/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
100#define __DECL_REG(name) union { \
101 uint64_t r ## name, e ## name; \
102 uint32_t _e ## name; \
103}
104#else
105/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
106#define __DECL_REG(name) uint64_t r ## name
107#endif
108
109struct cpu_user_regs {
110 uint64_t r15;
111 uint64_t r14;
112 uint64_t r13;
113 uint64_t r12;
114 __DECL_REG(bp);
115 __DECL_REG(bx);
116 uint64_t r11;
117 uint64_t r10;
118 uint64_t r9;
119 uint64_t r8;
120 __DECL_REG(ax);
121 __DECL_REG(cx);
122 __DECL_REG(dx);
123 __DECL_REG(si);
124 __DECL_REG(di);
125 uint32_t error_code; /* private */
126 uint32_t entry_vector; /* private */
127 __DECL_REG(ip);
128 uint16_t cs, _pad0[1];
129 uint8_t saved_upcall_mask;
130 uint8_t _pad1[3];
131 __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */
132 __DECL_REG(sp);
133 uint16_t ss, _pad2[3];
134 uint16_t es, _pad3[3];
135 uint16_t ds, _pad4[3];
136 uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
137 uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
138};
139DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
140
141#undef __DECL_REG
142
143#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
144#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
145
146struct arch_vcpu_info {
147 unsigned long cr2;
148 unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
149};
150
151typedef unsigned long xen_callback_t;
152
153#define XEN_CALLBACK(__cs, __rip) \
154 ((unsigned long)(__rip))
155
156#endif /* !__ASSEMBLY__ */
157
158
159#endif /* __ASM_X86_XEN_INTERFACE_64_H */
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h
index 377c04591c15..7b3835d3b77d 100644
--- a/include/asm-x86/xen/page.h
+++ b/include/asm-x86/xen/page.h
@@ -124,7 +124,7 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
124 124
125static inline unsigned long pte_mfn(pte_t pte) 125static inline unsigned long pte_mfn(pte_t pte)
126{ 126{
127 return (pte.pte & PTE_MASK) >> PAGE_SHIFT; 127 return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
128} 128}
129 129
130static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot) 130static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
@@ -148,13 +148,17 @@ static inline pte_t __pte_ma(pteval_t x)
148} 148}
149 149
150#define pmd_val_ma(v) ((v).pmd) 150#define pmd_val_ma(v) ((v).pmd)
151#ifdef __PAGETABLE_PUD_FOLDED
151#define pud_val_ma(v) ((v).pgd.pgd) 152#define pud_val_ma(v) ((v).pgd.pgd)
153#else
154#define pud_val_ma(v) ((v).pud)
155#endif
152#define __pmd_ma(x) ((pmd_t) { (x) } ) 156#define __pmd_ma(x) ((pmd_t) { (x) } )
153 157
154#define pgd_val_ma(x) ((x).pgd) 158#define pgd_val_ma(x) ((x).pgd)
155 159
156 160
157xmaddr_t arbitrary_virt_to_machine(unsigned long address); 161xmaddr_t arbitrary_virt_to_machine(void *address);
158void make_lowmem_page_readonly(void *vaddr); 162void make_lowmem_page_readonly(void *vaddr);
159void make_lowmem_page_readwrite(void *vaddr); 163void make_lowmem_page_readwrite(void *vaddr);
160 164