diff options
Diffstat (limited to 'include')
45 files changed, 919 insertions, 279 deletions
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h index 7bfcb47cc45..22aa58ca199 100644 --- a/include/asm-x86/amd_iommu_types.h +++ b/include/asm-x86/amd_iommu_types.h | |||
@@ -27,13 +27,12 @@ | |||
27 | /* | 27 | /* |
28 | * some size calculation constants | 28 | * some size calculation constants |
29 | */ | 29 | */ |
30 | #define DEV_TABLE_ENTRY_SIZE 256 | 30 | #define DEV_TABLE_ENTRY_SIZE 32 |
31 | #define ALIAS_TABLE_ENTRY_SIZE 2 | 31 | #define ALIAS_TABLE_ENTRY_SIZE 2 |
32 | #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) | 32 | #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) |
33 | 33 | ||
34 | /* helper macros */ | 34 | /* helper macros */ |
35 | #define LOW_U32(x) ((x) & ((1ULL << 32)-1)) | 35 | #define LOW_U32(x) ((x) & ((1ULL << 32)-1)) |
36 | #define HIGH_U32(x) (LOW_U32((x) >> 32)) | ||
37 | 36 | ||
38 | /* Length of the MMIO region for the AMD IOMMU */ | 37 | /* Length of the MMIO region for the AMD IOMMU */ |
39 | #define MMIO_REGION_LENGTH 0x4000 | 38 | #define MMIO_REGION_LENGTH 0x4000 |
@@ -158,78 +157,170 @@ | |||
158 | 157 | ||
159 | #define MAX_DOMAIN_ID 65536 | 158 | #define MAX_DOMAIN_ID 65536 |
160 | 159 | ||
160 | /* | ||
161 | * This structure contains generic data for IOMMU protection domains | ||
162 | * independent of their use. | ||
163 | */ | ||
161 | struct protection_domain { | 164 | struct protection_domain { |
162 | spinlock_t lock; | 165 | spinlock_t lock; /* mostly used to lock the page table*/ |
163 | u16 id; | 166 | u16 id; /* the domain id written to the device table */ |
164 | int mode; | 167 | int mode; /* paging mode (0-6 levels) */ |
165 | u64 *pt_root; | 168 | u64 *pt_root; /* page table root pointer */ |
166 | void *priv; | 169 | void *priv; /* private data */ |
167 | }; | 170 | }; |
168 | 171 | ||
172 | /* | ||
173 | * Data container for a dma_ops specific protection domain | ||
174 | */ | ||
169 | struct dma_ops_domain { | 175 | struct dma_ops_domain { |
170 | struct list_head list; | 176 | struct list_head list; |
177 | |||
178 | /* generic protection domain information */ | ||
171 | struct protection_domain domain; | 179 | struct protection_domain domain; |
180 | |||
181 | /* size of the aperture for the mappings */ | ||
172 | unsigned long aperture_size; | 182 | unsigned long aperture_size; |
183 | |||
184 | /* address we start to search for free addresses */ | ||
173 | unsigned long next_bit; | 185 | unsigned long next_bit; |
186 | |||
187 | /* address allocation bitmap */ | ||
174 | unsigned long *bitmap; | 188 | unsigned long *bitmap; |
189 | |||
190 | /* | ||
191 | * Array of PTE pages for the aperture. In this array we save all the | ||
192 | * leaf pages of the domain page table used for the aperture. This way | ||
193 | * we don't need to walk the page table to find a specific PTE. We can | ||
194 | * just calculate its address in constant time. | ||
195 | */ | ||
175 | u64 **pte_pages; | 196 | u64 **pte_pages; |
176 | }; | 197 | }; |
177 | 198 | ||
199 | /* | ||
200 | * Structure where we save information about one hardware AMD IOMMU in the | ||
201 | * system. | ||
202 | */ | ||
178 | struct amd_iommu { | 203 | struct amd_iommu { |
179 | struct list_head list; | 204 | struct list_head list; |
205 | |||
206 | /* locks the accesses to the hardware */ | ||
180 | spinlock_t lock; | 207 | spinlock_t lock; |
181 | 208 | ||
209 | /* device id of this IOMMU */ | ||
182 | u16 devid; | 210 | u16 devid; |
211 | /* | ||
212 | * Capability pointer. There could be more than one IOMMU per PCI | ||
213 | * device function if there are more than one AMD IOMMU capability | ||
214 | * pointers. | ||
215 | */ | ||
183 | u16 cap_ptr; | 216 | u16 cap_ptr; |
184 | 217 | ||
218 | /* physical address of MMIO space */ | ||
185 | u64 mmio_phys; | 219 | u64 mmio_phys; |
220 | /* virtual address of MMIO space */ | ||
186 | u8 *mmio_base; | 221 | u8 *mmio_base; |
222 | |||
223 | /* capabilities of that IOMMU read from ACPI */ | ||
187 | u32 cap; | 224 | u32 cap; |
225 | |||
226 | /* first device this IOMMU handles. read from PCI */ | ||
188 | u16 first_device; | 227 | u16 first_device; |
228 | /* last device this IOMMU handles. read from PCI */ | ||
189 | u16 last_device; | 229 | u16 last_device; |
230 | |||
231 | /* start of exclusion range of that IOMMU */ | ||
190 | u64 exclusion_start; | 232 | u64 exclusion_start; |
233 | /* length of exclusion range of that IOMMU */ | ||
191 | u64 exclusion_length; | 234 | u64 exclusion_length; |
192 | 235 | ||
236 | /* command buffer virtual address */ | ||
193 | u8 *cmd_buf; | 237 | u8 *cmd_buf; |
238 | /* size of command buffer */ | ||
194 | u32 cmd_buf_size; | 239 | u32 cmd_buf_size; |
195 | 240 | ||
241 | /* if one, we need to send a completion wait command */ | ||
196 | int need_sync; | 242 | int need_sync; |
197 | 243 | ||
244 | /* default dma_ops domain for that IOMMU */ | ||
198 | struct dma_ops_domain *default_dom; | 245 | struct dma_ops_domain *default_dom; |
199 | }; | 246 | }; |
200 | 247 | ||
248 | /* | ||
249 | * List with all IOMMUs in the system. This list is not locked because it is | ||
250 | * only written and read at driver initialization or suspend time | ||
251 | */ | ||
201 | extern struct list_head amd_iommu_list; | 252 | extern struct list_head amd_iommu_list; |
202 | 253 | ||
254 | /* | ||
255 | * Structure defining one entry in the device table | ||
256 | */ | ||
203 | struct dev_table_entry { | 257 | struct dev_table_entry { |
204 | u32 data[8]; | 258 | u32 data[8]; |
205 | }; | 259 | }; |
206 | 260 | ||
261 | /* | ||
262 | * One entry for unity mappings parsed out of the ACPI table. | ||
263 | */ | ||
207 | struct unity_map_entry { | 264 | struct unity_map_entry { |
208 | struct list_head list; | 265 | struct list_head list; |
266 | |||
267 | /* starting device id this entry is used for (including) */ | ||
209 | u16 devid_start; | 268 | u16 devid_start; |
269 | /* end device id this entry is used for (including) */ | ||
210 | u16 devid_end; | 270 | u16 devid_end; |
271 | |||
272 | /* start address to unity map (including) */ | ||
211 | u64 address_start; | 273 | u64 address_start; |
274 | /* end address to unity map (including) */ | ||
212 | u64 address_end; | 275 | u64 address_end; |
276 | |||
277 | /* required protection */ | ||
213 | int prot; | 278 | int prot; |
214 | }; | 279 | }; |
215 | 280 | ||
281 | /* | ||
282 | * List of all unity mappings. It is not locked because as runtime it is only | ||
283 | * read. It is created at ACPI table parsing time. | ||
284 | */ | ||
216 | extern struct list_head amd_iommu_unity_map; | 285 | extern struct list_head amd_iommu_unity_map; |
217 | 286 | ||
218 | /* data structures for device handling */ | 287 | /* |
288 | * Data structures for device handling | ||
289 | */ | ||
290 | |||
291 | /* | ||
292 | * Device table used by hardware. Read and write accesses by software are | ||
293 | * locked with the amd_iommu_pd_table lock. | ||
294 | */ | ||
219 | extern struct dev_table_entry *amd_iommu_dev_table; | 295 | extern struct dev_table_entry *amd_iommu_dev_table; |
296 | |||
297 | /* | ||
298 | * Alias table to find requestor ids to device ids. Not locked because only | ||
299 | * read on runtime. | ||
300 | */ | ||
220 | extern u16 *amd_iommu_alias_table; | 301 | extern u16 *amd_iommu_alias_table; |
302 | |||
303 | /* | ||
304 | * Reverse lookup table to find the IOMMU which translates a specific device. | ||
305 | */ | ||
221 | extern struct amd_iommu **amd_iommu_rlookup_table; | 306 | extern struct amd_iommu **amd_iommu_rlookup_table; |
222 | 307 | ||
308 | /* size of the dma_ops aperture as power of 2 */ | ||
223 | extern unsigned amd_iommu_aperture_order; | 309 | extern unsigned amd_iommu_aperture_order; |
224 | 310 | ||
311 | /* largest PCI device id we expect translation requests for */ | ||
225 | extern u16 amd_iommu_last_bdf; | 312 | extern u16 amd_iommu_last_bdf; |
226 | 313 | ||
227 | /* data structures for protection domain handling */ | 314 | /* data structures for protection domain handling */ |
228 | extern struct protection_domain **amd_iommu_pd_table; | 315 | extern struct protection_domain **amd_iommu_pd_table; |
316 | |||
317 | /* allocation bitmap for domain ids */ | ||
229 | extern unsigned long *amd_iommu_pd_alloc_bitmap; | 318 | extern unsigned long *amd_iommu_pd_alloc_bitmap; |
230 | 319 | ||
320 | /* will be 1 if device isolation is enabled */ | ||
231 | extern int amd_iommu_isolate; | 321 | extern int amd_iommu_isolate; |
232 | 322 | ||
323 | /* takes a PCI device id and prints it out in a readable form */ | ||
233 | static inline void print_devid(u16 devid, int nl) | 324 | static inline void print_devid(u16 devid, int nl) |
234 | { | 325 | { |
235 | int bus = devid >> 8; | 326 | int bus = devid >> 8; |
@@ -241,4 +332,11 @@ static inline void print_devid(u16 devid, int nl) | |||
241 | printk("\n"); | 332 | printk("\n"); |
242 | } | 333 | } |
243 | 334 | ||
335 | /* takes bus and device/function and returns the device id | ||
336 | * FIXME: should that be in generic PCI code? */ | ||
337 | static inline u16 calc_devid(u8 bus, u8 devfn) | ||
338 | { | ||
339 | return (((u16)bus) << 8) | devfn; | ||
340 | } | ||
341 | |||
244 | #endif | 342 | #endif |
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index 4e2c1e517f0..b96460a7190 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h | |||
@@ -3,6 +3,8 @@ | |||
3 | 3 | ||
4 | #include <linux/pm.h> | 4 | #include <linux/pm.h> |
5 | #include <linux/delay.h> | 5 | #include <linux/delay.h> |
6 | |||
7 | #include <asm/alternative.h> | ||
6 | #include <asm/fixmap.h> | 8 | #include <asm/fixmap.h> |
7 | #include <asm/apicdef.h> | 9 | #include <asm/apicdef.h> |
8 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
@@ -10,7 +12,7 @@ | |||
10 | 12 | ||
11 | #define ARCH_APICTIMER_STOPS_ON_C3 1 | 13 | #define ARCH_APICTIMER_STOPS_ON_C3 1 |
12 | 14 | ||
13 | #define Dprintk(x...) | 15 | #define Dprintk printk |
14 | 16 | ||
15 | /* | 17 | /* |
16 | * Debugging macros | 18 | * Debugging macros |
@@ -35,7 +37,7 @@ extern void generic_apic_probe(void); | |||
35 | 37 | ||
36 | #ifdef CONFIG_X86_LOCAL_APIC | 38 | #ifdef CONFIG_X86_LOCAL_APIC |
37 | 39 | ||
38 | extern int apic_verbosity; | 40 | extern unsigned int apic_verbosity; |
39 | extern int local_apic_timer_c2_ok; | 41 | extern int local_apic_timer_c2_ok; |
40 | 42 | ||
41 | extern int ioapic_force; | 43 | extern int ioapic_force; |
@@ -48,7 +50,6 @@ extern int disable_apic; | |||
48 | #include <asm/paravirt.h> | 50 | #include <asm/paravirt.h> |
49 | #else | 51 | #else |
50 | #define apic_write native_apic_write | 52 | #define apic_write native_apic_write |
51 | #define apic_write_atomic native_apic_write_atomic | ||
52 | #define apic_read native_apic_read | 53 | #define apic_read native_apic_read |
53 | #define setup_boot_clock setup_boot_APIC_clock | 54 | #define setup_boot_clock setup_boot_APIC_clock |
54 | #define setup_secondary_clock setup_secondary_APIC_clock | 55 | #define setup_secondary_clock setup_secondary_APIC_clock |
@@ -58,12 +59,11 @@ extern int is_vsmp_box(void); | |||
58 | 59 | ||
59 | static inline void native_apic_write(unsigned long reg, u32 v) | 60 | static inline void native_apic_write(unsigned long reg, u32 v) |
60 | { | 61 | { |
61 | *((volatile u32 *)(APIC_BASE + reg)) = v; | 62 | volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); |
62 | } | ||
63 | 63 | ||
64 | static inline void native_apic_write_atomic(unsigned long reg, u32 v) | 64 | alternative_io("movl %0, %1", "xchgl %0, %1", X86_FEATURE_11AP, |
65 | { | 65 | ASM_OUTPUT2("=r" (v), "=m" (*addr)), |
66 | (void)xchg((u32 *)(APIC_BASE + reg), v); | 66 | ASM_OUTPUT2("0" (v), "m" (*addr))); |
67 | } | 67 | } |
68 | 68 | ||
69 | static inline u32 native_apic_read(unsigned long reg) | 69 | static inline u32 native_apic_read(unsigned long reg) |
@@ -75,16 +75,6 @@ extern void apic_wait_icr_idle(void); | |||
75 | extern u32 safe_apic_wait_icr_idle(void); | 75 | extern u32 safe_apic_wait_icr_idle(void); |
76 | extern int get_physical_broadcast(void); | 76 | extern int get_physical_broadcast(void); |
77 | 77 | ||
78 | #ifdef CONFIG_X86_GOOD_APIC | ||
79 | # define FORCE_READ_AROUND_WRITE 0 | ||
80 | # define apic_read_around(x) | ||
81 | # define apic_write_around(x, y) apic_write((x), (y)) | ||
82 | #else | ||
83 | # define FORCE_READ_AROUND_WRITE 1 | ||
84 | # define apic_read_around(x) apic_read(x) | ||
85 | # define apic_write_around(x, y) apic_write_atomic((x), (y)) | ||
86 | #endif | ||
87 | |||
88 | static inline void ack_APIC_irq(void) | 78 | static inline void ack_APIC_irq(void) |
89 | { | 79 | { |
90 | /* | 80 | /* |
@@ -95,7 +85,7 @@ static inline void ack_APIC_irq(void) | |||
95 | */ | 85 | */ |
96 | 86 | ||
97 | /* Docs say use 0 for future compatibility */ | 87 | /* Docs say use 0 for future compatibility */ |
98 | apic_write_around(APIC_EOI, 0); | 88 | apic_write(APIC_EOI, 0); |
99 | } | 89 | } |
100 | 90 | ||
101 | extern int lapic_get_maxlvt(void); | 91 | extern int lapic_get_maxlvt(void); |
diff --git a/include/asm-x86/arch_hooks.h b/include/asm-x86/arch_hooks.h index 768aee8a04e..8411750ceb6 100644 --- a/include/asm-x86/arch_hooks.h +++ b/include/asm-x86/arch_hooks.h | |||
@@ -21,6 +21,7 @@ extern void intr_init_hook(void); | |||
21 | extern void pre_intr_init_hook(void); | 21 | extern void pre_intr_init_hook(void); |
22 | extern void pre_setup_arch_hook(void); | 22 | extern void pre_setup_arch_hook(void); |
23 | extern void trap_init_hook(void); | 23 | extern void trap_init_hook(void); |
24 | extern void pre_time_init_hook(void); | ||
24 | extern void time_init_hook(void); | 25 | extern void time_init_hook(void); |
25 | extern void mca_nmi_hook(void); | 26 | extern void mca_nmi_hook(void); |
26 | 27 | ||
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 96b1829cea1..cfb2b64f76e 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h | |||
@@ -356,7 +356,7 @@ static inline unsigned long ffz(unsigned long word) | |||
356 | * __fls: find last set bit in word | 356 | * __fls: find last set bit in word |
357 | * @word: The word to search | 357 | * @word: The word to search |
358 | * | 358 | * |
359 | * Undefined if no zero exists, so code should check against ~0UL first. | 359 | * Undefined if no set bit exists, so code should check against 0 first. |
360 | */ | 360 | */ |
361 | static inline unsigned long __fls(unsigned long word) | 361 | static inline unsigned long __fls(unsigned long word) |
362 | { | 362 | { |
diff --git a/include/asm-x86/calling.h b/include/asm-x86/calling.h index f13e62e2cb3..2bc162e0ec6 100644 --- a/include/asm-x86/calling.h +++ b/include/asm-x86/calling.h | |||
@@ -104,7 +104,7 @@ | |||
104 | .endif | 104 | .endif |
105 | .endm | 105 | .endm |
106 | 106 | ||
107 | .macro LOAD_ARGS offset | 107 | .macro LOAD_ARGS offset, skiprax=0 |
108 | movq \offset(%rsp), %r11 | 108 | movq \offset(%rsp), %r11 |
109 | movq \offset+8(%rsp), %r10 | 109 | movq \offset+8(%rsp), %r10 |
110 | movq \offset+16(%rsp), %r9 | 110 | movq \offset+16(%rsp), %r9 |
@@ -113,7 +113,10 @@ | |||
113 | movq \offset+48(%rsp), %rdx | 113 | movq \offset+48(%rsp), %rdx |
114 | movq \offset+56(%rsp), %rsi | 114 | movq \offset+56(%rsp), %rsi |
115 | movq \offset+64(%rsp), %rdi | 115 | movq \offset+64(%rsp), %rdi |
116 | .if \skiprax | ||
117 | .else | ||
116 | movq \offset+72(%rsp), %rax | 118 | movq \offset+72(%rsp), %rax |
119 | .endif | ||
117 | .endm | 120 | .endm |
118 | 121 | ||
119 | #define REST_SKIP 6*8 | 122 | #define REST_SKIP 6*8 |
@@ -165,4 +168,3 @@ | |||
165 | .macro icebp | 168 | .macro icebp |
166 | .byte 0xf1 | 169 | .byte 0xf1 |
167 | .endm | 170 | .endm |
168 | |||
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 75ef959db32..2f5a792b0ac 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h | |||
@@ -79,6 +79,7 @@ | |||
79 | #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ | 79 | #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ |
80 | #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ | 80 | #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ |
81 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ | 81 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ |
82 | #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ | ||
82 | 83 | ||
83 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 84 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
84 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 85 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ |
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h index a1a4dc7fe6e..c2ddd3d1b88 100644 --- a/include/asm-x86/dma-mapping.h +++ b/include/asm-x86/dma-mapping.h | |||
@@ -14,7 +14,6 @@ extern dma_addr_t bad_dma_address; | |||
14 | extern int iommu_merge; | 14 | extern int iommu_merge; |
15 | extern struct device fallback_dev; | 15 | extern struct device fallback_dev; |
16 | extern int panic_on_overflow; | 16 | extern int panic_on_overflow; |
17 | extern int forbid_dac; | ||
18 | extern int force_iommu; | 17 | extern int force_iommu; |
19 | 18 | ||
20 | struct dma_mapping_ops { | 19 | struct dma_mapping_ops { |
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h index 06633b01dd5..16a31e2c7c5 100644 --- a/include/asm-x86/e820.h +++ b/include/asm-x86/e820.h | |||
@@ -90,6 +90,14 @@ static inline void e820_mark_nosave_regions(unsigned long limit_pfn) | |||
90 | } | 90 | } |
91 | #endif | 91 | #endif |
92 | 92 | ||
93 | #ifdef CONFIG_MEMTEST | ||
94 | extern void early_memtest(unsigned long start, unsigned long end); | ||
95 | #else | ||
96 | static inline void early_memtest(unsigned long start, unsigned long end) | ||
97 | { | ||
98 | } | ||
99 | #endif | ||
100 | |||
93 | extern unsigned long end_user_pfn; | 101 | extern unsigned long end_user_pfn; |
94 | 102 | ||
95 | extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); | 103 | extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); |
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h index aae2f0501a4..f1ac2b2167d 100644 --- a/include/asm-x86/fixmap_32.h +++ b/include/asm-x86/fixmap_32.h | |||
@@ -90,13 +90,13 @@ enum fixed_addresses { | |||
90 | * 256 temporary boot-time mappings, used by early_ioremap(), | 90 | * 256 temporary boot-time mappings, used by early_ioremap(), |
91 | * before ioremap() is functional. | 91 | * before ioremap() is functional. |
92 | * | 92 | * |
93 | * We round it up to the next 512 pages boundary so that we | 93 | * We round it up to the next 256 pages boundary so that we |
94 | * can have a single pgd entry and a single pte table: | 94 | * can have a single pgd entry and a single pte table: |
95 | */ | 95 | */ |
96 | #define NR_FIX_BTMAPS 64 | 96 | #define NR_FIX_BTMAPS 64 |
97 | #define FIX_BTMAPS_NESTING 4 | 97 | #define FIX_BTMAPS_NESTING 4 |
98 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 512 - | 98 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 - |
99 | (__end_of_permanent_fixed_addresses & 511), | 99 | (__end_of_permanent_fixed_addresses & 255), |
100 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, | 100 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, |
101 | FIX_WP_TEST, | 101 | FIX_WP_TEST, |
102 | #ifdef CONFIG_ACPI | 102 | #ifdef CONFIG_ACPI |
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h index 33b9aeeb35a..3f62a83887f 100644 --- a/include/asm-x86/gart.h +++ b/include/asm-x86/gart.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define _ASM_X8664_GART_H 1 | 2 | #define _ASM_X8664_GART_H 1 |
3 | 3 | ||
4 | #include <asm/e820.h> | 4 | #include <asm/e820.h> |
5 | #include <asm/iommu.h> | ||
6 | 5 | ||
7 | extern void set_up_gart_resume(u32, u32); | 6 | extern void set_up_gart_resume(u32, u32); |
8 | 7 | ||
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h index 068c9a40aa5..d63166fb3ab 100644 --- a/include/asm-x86/iommu.h +++ b/include/asm-x86/iommu.h | |||
@@ -25,10 +25,18 @@ extern void gart_iommu_hole_init(void); | |||
25 | static inline void early_gart_iommu_check(void) | 25 | static inline void early_gart_iommu_check(void) |
26 | { | 26 | { |
27 | } | 27 | } |
28 | 28 | static inline void gart_iommu_init(void) | |
29 | { | ||
30 | } | ||
29 | static inline void gart_iommu_shutdown(void) | 31 | static inline void gart_iommu_shutdown(void) |
30 | { | 32 | { |
31 | } | 33 | } |
34 | static inline void gart_parse_options(char *options) | ||
35 | { | ||
36 | } | ||
37 | static inline void gart_iommu_hole_init(void) | ||
38 | { | ||
39 | } | ||
32 | #endif | 40 | #endif |
33 | 41 | ||
34 | #endif | 42 | #endif |
diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/mach-bigsmp/mach_apic.h index 017c8c19ad8..c3b9dc6970c 100644 --- a/include/asm-x86/mach-bigsmp/mach_apic.h +++ b/include/asm-x86/mach-bigsmp/mach_apic.h | |||
@@ -63,9 +63,9 @@ static inline void init_apic_ldr(void) | |||
63 | unsigned long val; | 63 | unsigned long val; |
64 | int cpu = smp_processor_id(); | 64 | int cpu = smp_processor_id(); |
65 | 65 | ||
66 | apic_write_around(APIC_DFR, APIC_DFR_VALUE); | 66 | apic_write(APIC_DFR, APIC_DFR_VALUE); |
67 | val = calculate_ldr(cpu); | 67 | val = calculate_ldr(cpu); |
68 | apic_write_around(APIC_LDR, val); | 68 | apic_write(APIC_LDR, val); |
69 | } | 69 | } |
70 | 70 | ||
71 | static inline void setup_apic_routing(void) | 71 | static inline void setup_apic_routing(void) |
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index 0b2cde5e1b7..f3226b9a6b8 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h | |||
@@ -46,10 +46,10 @@ static inline void init_apic_ldr(void) | |||
46 | { | 46 | { |
47 | unsigned long val; | 47 | unsigned long val; |
48 | 48 | ||
49 | apic_write_around(APIC_DFR, APIC_DFR_VALUE); | 49 | apic_write(APIC_DFR, APIC_DFR_VALUE); |
50 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; | 50 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; |
51 | val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); | 51 | val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); |
52 | apic_write_around(APIC_LDR, val); | 52 | apic_write(APIC_LDR, val); |
53 | } | 53 | } |
54 | 54 | ||
55 | static inline int apic_id_registered(void) | 55 | static inline int apic_id_registered(void) |
diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h index fbc8ad256f5..0a3fdf93067 100644 --- a/include/asm-x86/mach-es7000/mach_apic.h +++ b/include/asm-x86/mach-es7000/mach_apic.h | |||
@@ -66,9 +66,9 @@ static inline void init_apic_ldr(void) | |||
66 | unsigned long val; | 66 | unsigned long val; |
67 | int cpu = smp_processor_id(); | 67 | int cpu = smp_processor_id(); |
68 | 68 | ||
69 | apic_write_around(APIC_DFR, APIC_DFR_VALUE); | 69 | apic_write(APIC_DFR, APIC_DFR_VALUE); |
70 | val = calculate_ldr(cpu); | 70 | val = calculate_ldr(cpu); |
71 | apic_write_around(APIC_LDR, val); | 71 | apic_write(APIC_LDR, val); |
72 | } | 72 | } |
73 | 73 | ||
74 | #ifndef CONFIG_X86_GENERICARCH | 74 | #ifndef CONFIG_X86_GENERICARCH |
diff --git a/include/asm-x86/mach-generic/mach_mpspec.h b/include/asm-x86/mach-generic/mach_mpspec.h index 9ef0b941bb2..c83c120be53 100644 --- a/include/asm-x86/mach-generic/mach_mpspec.h +++ b/include/asm-x86/mach-generic/mach_mpspec.h | |||
@@ -7,4 +7,6 @@ | |||
7 | /* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ | 7 | /* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ |
8 | #define MAX_MP_BUSSES 260 | 8 | #define MAX_MP_BUSSES 260 |
9 | 9 | ||
10 | extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, | ||
11 | char *productid); | ||
10 | #endif /* __ASM_MACH_MPSPEC_H */ | 12 | #endif /* __ASM_MACH_MPSPEC_H */ |
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h index 1f76c2e7023..75d2c95005d 100644 --- a/include/asm-x86/mach-summit/mach_apic.h +++ b/include/asm-x86/mach-summit/mach_apic.h | |||
@@ -63,10 +63,10 @@ static inline void init_apic_ldr(void) | |||
63 | * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ | 63 | * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ |
64 | BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); | 64 | BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); |
65 | id = my_cluster | (1UL << count); | 65 | id = my_cluster | (1UL << count); |
66 | apic_write_around(APIC_DFR, APIC_DFR_VALUE); | 66 | apic_write(APIC_DFR, APIC_DFR_VALUE); |
67 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; | 67 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; |
68 | val |= SET_APIC_LOGICAL_ID(id); | 68 | val |= SET_APIC_LOGICAL_ID(id); |
69 | apic_write_around(APIC_LDR, val); | 69 | apic_write(APIC_LDR, val); |
70 | } | 70 | } |
71 | 71 | ||
72 | static inline int multi_timer_check(int apic, int irq) | 72 | static inline int multi_timer_check(int apic, int irq) |
diff --git a/include/asm-x86/mach-visws/entry_arch.h b/include/asm-x86/mach-visws/entry_arch.h deleted file mode 100644 index 86be554342d..00000000000 --- a/include/asm-x86/mach-visws/entry_arch.h +++ /dev/null | |||
@@ -1,5 +0,0 @@ | |||
1 | /* | ||
2 | * VISWS uses the standard Linux entry points: | ||
3 | */ | ||
4 | |||
5 | #include "../mach-default/entry_arch.h" | ||
diff --git a/include/asm-x86/mach-visws/mach_apic.h b/include/asm-x86/mach-visws/mach_apic.h deleted file mode 100644 index 6943e7a1d0e..00000000000 --- a/include/asm-x86/mach-visws/mach_apic.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include "../mach-default/mach_apic.h" | ||
diff --git a/include/asm-x86/mach-visws/mach_apicdef.h b/include/asm-x86/mach-visws/mach_apicdef.h deleted file mode 100644 index 42711d152a9..00000000000 --- a/include/asm-x86/mach-visws/mach_apicdef.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include "../mach-default/mach_apicdef.h" | ||
diff --git a/include/asm-x86/mach-visws/setup_arch.h b/include/asm-x86/mach-visws/setup_arch.h deleted file mode 100644 index fa4766ca2d1..00000000000 --- a/include/asm-x86/mach-visws/setup_arch.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include "../mach-default/setup_arch.h" | ||
diff --git a/include/asm-x86/mach-visws/smpboot_hooks.h b/include/asm-x86/mach-visws/smpboot_hooks.h deleted file mode 100644 index e4433ca8871..00000000000 --- a/include/asm-x86/mach-visws/smpboot_hooks.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include "../mach-default/smpboot_hooks.h" | ||
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index ef5e8ec6a6a..695ce9383f5 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h | |||
@@ -205,7 +205,6 @@ struct pv_apic_ops { | |||
205 | * these shouldn't be in this interface. | 205 | * these shouldn't be in this interface. |
206 | */ | 206 | */ |
207 | void (*apic_write)(unsigned long reg, u32 v); | 207 | void (*apic_write)(unsigned long reg, u32 v); |
208 | void (*apic_write_atomic)(unsigned long reg, u32 v); | ||
209 | u32 (*apic_read)(unsigned long reg); | 208 | u32 (*apic_read)(unsigned long reg); |
210 | void (*setup_boot_clock)(void); | 209 | void (*setup_boot_clock)(void); |
211 | void (*setup_secondary_clock)(void); | 210 | void (*setup_secondary_clock)(void); |
@@ -896,11 +895,6 @@ static inline void apic_write(unsigned long reg, u32 v) | |||
896 | PVOP_VCALL2(pv_apic_ops.apic_write, reg, v); | 895 | PVOP_VCALL2(pv_apic_ops.apic_write, reg, v); |
897 | } | 896 | } |
898 | 897 | ||
899 | static inline void apic_write_atomic(unsigned long reg, u32 v) | ||
900 | { | ||
901 | PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v); | ||
902 | } | ||
903 | |||
904 | static inline u32 apic_read(unsigned long reg) | 898 | static inline u32 apic_read(unsigned long reg) |
905 | { | 899 | { |
906 | return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg); | 900 | return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg); |
@@ -1396,8 +1390,8 @@ extern struct paravirt_patch_site __parainstructions[], | |||
1396 | * caller saved registers but the argument parameter */ | 1390 | * caller saved registers but the argument parameter */ |
1397 | #define PV_SAVE_REGS "pushq %%rdi;" | 1391 | #define PV_SAVE_REGS "pushq %%rdi;" |
1398 | #define PV_RESTORE_REGS "popq %%rdi;" | 1392 | #define PV_RESTORE_REGS "popq %%rdi;" |
1399 | #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx" | 1393 | #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi" |
1400 | #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx" | 1394 | #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi" |
1401 | #define PV_FLAGS_ARG "D" | 1395 | #define PV_FLAGS_ARG "D" |
1402 | #endif | 1396 | #endif |
1403 | 1397 | ||
@@ -1489,8 +1483,26 @@ static inline unsigned long __raw_local_irq_save(void) | |||
1489 | 1483 | ||
1490 | 1484 | ||
1491 | #ifdef CONFIG_X86_64 | 1485 | #ifdef CONFIG_X86_64 |
1492 | #define PV_SAVE_REGS pushq %rax; pushq %rdi; pushq %rcx; pushq %rdx | 1486 | #define PV_SAVE_REGS \ |
1493 | #define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax | 1487 | push %rax; \ |
1488 | push %rcx; \ | ||
1489 | push %rdx; \ | ||
1490 | push %rsi; \ | ||
1491 | push %rdi; \ | ||
1492 | push %r8; \ | ||
1493 | push %r9; \ | ||
1494 | push %r10; \ | ||
1495 | push %r11 | ||
1496 | #define PV_RESTORE_REGS \ | ||
1497 | pop %r11; \ | ||
1498 | pop %r10; \ | ||
1499 | pop %r9; \ | ||
1500 | pop %r8; \ | ||
1501 | pop %rdi; \ | ||
1502 | pop %rsi; \ | ||
1503 | pop %rdx; \ | ||
1504 | pop %rcx; \ | ||
1505 | pop %rax | ||
1494 | #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) | 1506 | #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) |
1495 | #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) | 1507 | #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) |
1496 | #define PARA_INDIRECT(addr) *addr(%rip) | 1508 | #define PARA_INDIRECT(addr) *addr(%rip) |
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h index 912a3a17b9d..4e91ee1e37a 100644 --- a/include/asm-x86/percpu.h +++ b/include/asm-x86/percpu.h | |||
@@ -22,6 +22,32 @@ | |||
22 | 22 | ||
23 | DECLARE_PER_CPU(struct x8664_pda, pda); | 23 | DECLARE_PER_CPU(struct x8664_pda, pda); |
24 | 24 | ||
25 | /* | ||
26 | * These are supposed to be implemented as a single instruction which | ||
27 | * operates on the per-cpu data base segment. x86-64 doesn't have | ||
28 | * that yet, so this is a fairly inefficient workaround for the | ||
29 | * meantime. The single instruction is atomic with respect to | ||
30 | * preemption and interrupts, so we need to explicitly disable | ||
31 | * interrupts here to achieve the same effect. However, because it | ||
32 | * can be used from within interrupt-disable/enable, we can't actually | ||
33 | * disable interrupts; disabling preemption is enough. | ||
34 | */ | ||
35 | #define x86_read_percpu(var) \ | ||
36 | ({ \ | ||
37 | typeof(per_cpu_var(var)) __tmp; \ | ||
38 | preempt_disable(); \ | ||
39 | __tmp = __get_cpu_var(var); \ | ||
40 | preempt_enable(); \ | ||
41 | __tmp; \ | ||
42 | }) | ||
43 | |||
44 | #define x86_write_percpu(var, val) \ | ||
45 | do { \ | ||
46 | preempt_disable(); \ | ||
47 | __get_cpu_var(var) = (val); \ | ||
48 | preempt_enable(); \ | ||
49 | } while(0) | ||
50 | |||
25 | #else /* CONFIG_X86_64 */ | 51 | #else /* CONFIG_X86_64 */ |
26 | 52 | ||
27 | #ifdef __ASSEMBLY__ | 53 | #ifdef __ASSEMBLY__ |
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 49cbd76b954..96aa76e691d 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h | |||
@@ -302,6 +302,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |||
302 | /* Install a pte for a particular vaddr in kernel space. */ | 302 | /* Install a pte for a particular vaddr in kernel space. */ |
303 | void set_pte_vaddr(unsigned long vaddr, pte_t pte); | 303 | void set_pte_vaddr(unsigned long vaddr, pte_t pte); |
304 | 304 | ||
305 | #ifdef CONFIG_X86_32 | ||
306 | extern void native_pagetable_setup_start(pgd_t *base); | ||
307 | extern void native_pagetable_setup_done(pgd_t *base); | ||
308 | #else | ||
309 | static inline void native_pagetable_setup_start(pgd_t *base) {} | ||
310 | static inline void native_pagetable_setup_done(pgd_t *base) {} | ||
311 | #endif | ||
312 | |||
305 | #ifdef CONFIG_PARAVIRT | 313 | #ifdef CONFIG_PARAVIRT |
306 | #include <asm/paravirt.h> | 314 | #include <asm/paravirt.h> |
307 | #else /* !CONFIG_PARAVIRT */ | 315 | #else /* !CONFIG_PARAVIRT */ |
@@ -333,6 +341,16 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pte); | |||
333 | 341 | ||
334 | #define pte_update(mm, addr, ptep) do { } while (0) | 342 | #define pte_update(mm, addr, ptep) do { } while (0) |
335 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | 343 | #define pte_update_defer(mm, addr, ptep) do { } while (0) |
344 | |||
345 | static inline void __init paravirt_pagetable_setup_start(pgd_t *base) | ||
346 | { | ||
347 | native_pagetable_setup_start(base); | ||
348 | } | ||
349 | |||
350 | static inline void __init paravirt_pagetable_setup_done(pgd_t *base) | ||
351 | { | ||
352 | native_pagetable_setup_done(base); | ||
353 | } | ||
336 | #endif /* CONFIG_PARAVIRT */ | 354 | #endif /* CONFIG_PARAVIRT */ |
337 | 355 | ||
338 | #endif /* __ASSEMBLY__ */ | 356 | #endif /* __ASSEMBLY__ */ |
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index ec871c420d7..0611abf96a5 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h | |||
@@ -171,21 +171,6 @@ do { \ | |||
171 | */ | 171 | */ |
172 | #define update_mmu_cache(vma, address, pte) do { } while (0) | 172 | #define update_mmu_cache(vma, address, pte) do { } while (0) |
173 | 173 | ||
174 | extern void native_pagetable_setup_start(pgd_t *base); | ||
175 | extern void native_pagetable_setup_done(pgd_t *base); | ||
176 | |||
177 | #ifndef CONFIG_PARAVIRT | ||
178 | static inline void __init paravirt_pagetable_setup_start(pgd_t *base) | ||
179 | { | ||
180 | native_pagetable_setup_start(base); | ||
181 | } | ||
182 | |||
183 | static inline void __init paravirt_pagetable_setup_done(pgd_t *base) | ||
184 | { | ||
185 | native_pagetable_setup_done(base); | ||
186 | } | ||
187 | #endif /* !CONFIG_PARAVIRT */ | ||
188 | |||
189 | #endif /* !__ASSEMBLY__ */ | 174 | #endif /* !__ASSEMBLY__ */ |
190 | 175 | ||
191 | /* | 176 | /* |
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index fa7208b483c..805d3128bfc 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h | |||
@@ -16,6 +16,8 @@ | |||
16 | extern pud_t level3_kernel_pgt[512]; | 16 | extern pud_t level3_kernel_pgt[512]; |
17 | extern pud_t level3_ident_pgt[512]; | 17 | extern pud_t level3_ident_pgt[512]; |
18 | extern pmd_t level2_kernel_pgt[512]; | 18 | extern pmd_t level2_kernel_pgt[512]; |
19 | extern pmd_t level2_fixmap_pgt[512]; | ||
20 | extern pmd_t level2_ident_pgt[512]; | ||
19 | extern pgd_t init_level4_pgt[]; | 21 | extern pgd_t init_level4_pgt[]; |
20 | 22 | ||
21 | #define swapper_pg_dir init_level4_pgt | 23 | #define swapper_pg_dir init_level4_pgt |
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 55402d2ab93..15cb82a44e8 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h | |||
@@ -722,8 +722,6 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | |||
722 | 722 | ||
723 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | 723 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); |
724 | 724 | ||
725 | extern int force_mwait; | ||
726 | |||
727 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | 725 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
728 | 726 | ||
729 | extern unsigned long boot_option_idle_override; | 727 | extern unsigned long boot_option_idle_override; |
diff --git a/include/asm-x86/ptrace-abi.h b/include/asm-x86/ptrace-abi.h index f224eb3c315..72e7b9db29b 100644 --- a/include/asm-x86/ptrace-abi.h +++ b/include/asm-x86/ptrace-abi.h | |||
@@ -73,11 +73,11 @@ | |||
73 | 73 | ||
74 | #ifdef __x86_64__ | 74 | #ifdef __x86_64__ |
75 | # define PTRACE_ARCH_PRCTL 30 | 75 | # define PTRACE_ARCH_PRCTL 30 |
76 | #else | ||
77 | # define PTRACE_SYSEMU 31 | ||
78 | # define PTRACE_SYSEMU_SINGLESTEP 32 | ||
79 | #endif | 76 | #endif |
80 | 77 | ||
78 | #define PTRACE_SYSEMU 31 | ||
79 | #define PTRACE_SYSEMU_SINGLESTEP 32 | ||
80 | |||
81 | #define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ | 81 | #define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ |
82 | 82 | ||
83 | #ifndef __ASSEMBLY__ | 83 | #ifndef __ASSEMBLY__ |
diff --git a/include/asm-x86/segment.h b/include/asm-x86/segment.h index dfc8601c089..646452ea9ea 100644 --- a/include/asm-x86/segment.h +++ b/include/asm-x86/segment.h | |||
@@ -1,6 +1,15 @@ | |||
1 | #ifndef _ASM_X86_SEGMENT_H_ | 1 | #ifndef _ASM_X86_SEGMENT_H_ |
2 | #define _ASM_X86_SEGMENT_H_ | 2 | #define _ASM_X86_SEGMENT_H_ |
3 | 3 | ||
4 | /* Constructor for a conventional segment GDT (or LDT) entry */ | ||
5 | /* This is a macro so it can be used in initializers */ | ||
6 | #define GDT_ENTRY(flags, base, limit) \ | ||
7 | ((((base) & 0xff000000ULL) << (56-24)) | \ | ||
8 | (((flags) & 0x0000f0ffULL) << 40) | \ | ||
9 | (((limit) & 0x000f0000ULL) << (48-16)) | \ | ||
10 | (((base) & 0x00ffffffULL) << 16) | \ | ||
11 | (((limit) & 0x0000ffffULL))) | ||
12 | |||
4 | /* Simple and small GDT entries for booting only */ | 13 | /* Simple and small GDT entries for booting only */ |
5 | 14 | ||
6 | #define GDT_ENTRY_BOOT_CS 2 | 15 | #define GDT_ENTRY_BOOT_CS 2 |
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h index 90ab2225e71..a07c6f1c01e 100644 --- a/include/asm-x86/setup.h +++ b/include/asm-x86/setup.h | |||
@@ -19,13 +19,28 @@ static inline int is_visws_box(void) { return 0; } | |||
19 | /* | 19 | /* |
20 | * Any setup quirks to be performed? | 20 | * Any setup quirks to be performed? |
21 | */ | 21 | */ |
22 | extern int (*arch_time_init_quirk)(void); | 22 | struct mpc_config_processor; |
23 | extern int (*arch_pre_intr_init_quirk)(void); | 23 | struct mpc_config_bus; |
24 | extern int (*arch_intr_init_quirk)(void); | 24 | struct mp_config_oemtable; |
25 | extern int (*arch_trap_init_quirk)(void); | 25 | struct x86_quirks { |
26 | extern char * (*arch_memory_setup_quirk)(void); | 26 | int (*arch_pre_time_init)(void); |
27 | extern int (*mach_get_smp_config_quirk)(unsigned int early); | 27 | int (*arch_time_init)(void); |
28 | extern int (*mach_find_smp_config_quirk)(unsigned int reserve); | 28 | int (*arch_pre_intr_init)(void); |
29 | int (*arch_intr_init)(void); | ||
30 | int (*arch_trap_init)(void); | ||
31 | char * (*arch_memory_setup)(void); | ||
32 | int (*mach_get_smp_config)(unsigned int early); | ||
33 | int (*mach_find_smp_config)(unsigned int reserve); | ||
34 | |||
35 | int *mpc_record; | ||
36 | int (*mpc_apic_id)(struct mpc_config_processor *m); | ||
37 | void (*mpc_oem_bus_info)(struct mpc_config_bus *m, char *name); | ||
38 | void (*mpc_oem_pci_bus)(struct mpc_config_bus *m); | ||
39 | void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable, | ||
40 | unsigned short oemsize); | ||
41 | }; | ||
42 | |||
43 | extern struct x86_quirks *x86_quirks; | ||
29 | 44 | ||
30 | #ifndef CONFIG_PARAVIRT | 45 | #ifndef CONFIG_PARAVIRT |
31 | #define paravirt_post_allocator_init() do {} while (0) | 46 | #define paravirt_post_allocator_init() do {} while (0) |
@@ -76,6 +91,7 @@ extern unsigned long init_pg_tables_start; | |||
76 | extern unsigned long init_pg_tables_end; | 91 | extern unsigned long init_pg_tables_end; |
77 | 92 | ||
78 | #else | 93 | #else |
94 | void __init x86_64_init_pda(void); | ||
79 | void __init x86_64_start_kernel(char *real_mode); | 95 | void __init x86_64_start_kernel(char *real_mode); |
80 | void __init x86_64_start_reservations(char *real_mode_data); | 96 | void __init x86_64_start_reservations(char *real_mode_data); |
81 | 97 | ||
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index c2784b3e0b7..3c877f74f27 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h | |||
@@ -25,6 +25,8 @@ extern cpumask_t cpu_callin_map; | |||
25 | extern void (*mtrr_hook)(void); | 25 | extern void (*mtrr_hook)(void); |
26 | extern void zap_low_mappings(void); | 26 | extern void zap_low_mappings(void); |
27 | 27 | ||
28 | extern int __cpuinit get_local_pda(int cpu); | ||
29 | |||
28 | extern int smp_num_siblings; | 30 | extern int smp_num_siblings; |
29 | extern unsigned int num_processors; | 31 | extern unsigned int num_processors; |
30 | extern cpumask_t cpu_initialized; | 32 | extern cpumask_t cpu_initialized; |
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h index f5d9e74b1e4..c706a744263 100644 --- a/include/asm-x86/swiotlb.h +++ b/include/asm-x86/swiotlb.h | |||
@@ -45,12 +45,14 @@ extern int swiotlb_force; | |||
45 | 45 | ||
46 | #ifdef CONFIG_SWIOTLB | 46 | #ifdef CONFIG_SWIOTLB |
47 | extern int swiotlb; | 47 | extern int swiotlb; |
48 | extern void pci_swiotlb_init(void); | ||
48 | #else | 49 | #else |
49 | #define swiotlb 0 | 50 | #define swiotlb 0 |
51 | static inline void pci_swiotlb_init(void) | ||
52 | { | ||
53 | } | ||
50 | #endif | 54 | #endif |
51 | 55 | ||
52 | extern void pci_swiotlb_init(void); | ||
53 | |||
54 | static inline void dma_mark_clean(void *addr, size_t size) {} | 56 | static inline void dma_mark_clean(void *addr, size_t size) {} |
55 | 57 | ||
56 | #endif /* _ASM_SWIOTLB_H */ | 58 | #endif /* _ASM_SWIOTLB_H */ |
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h index 895339d2bc0..0a8f27d31d0 100644 --- a/include/asm-x86/thread_info.h +++ b/include/asm-x86/thread_info.h | |||
@@ -75,9 +75,7 @@ struct thread_info { | |||
75 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | 75 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
76 | #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ | 76 | #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ |
77 | #define TIF_IRET 5 /* force IRET */ | 77 | #define TIF_IRET 5 /* force IRET */ |
78 | #ifdef CONFIG_X86_32 | ||
79 | #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ | 78 | #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ |
80 | #endif | ||
81 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | 79 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
82 | #define TIF_SECCOMP 8 /* secure computing */ | 80 | #define TIF_SECCOMP 8 /* secure computing */ |
83 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ | 81 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ |
@@ -100,11 +98,7 @@ struct thread_info { | |||
100 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | 98 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
101 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 99 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
102 | #define _TIF_IRET (1 << TIF_IRET) | 100 | #define _TIF_IRET (1 << TIF_IRET) |
103 | #ifdef CONFIG_X86_32 | ||
104 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) | 101 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) |
105 | #else | ||
106 | #define _TIF_SYSCALL_EMU 0 | ||
107 | #endif | ||
108 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 102 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
109 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 103 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
110 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) | 104 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) |
@@ -121,18 +115,27 @@ struct thread_info { | |||
121 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) | 115 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) |
122 | #define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS) | 116 | #define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS) |
123 | 117 | ||
118 | /* work to do in syscall_trace_enter() */ | ||
119 | #define _TIF_WORK_SYSCALL_ENTRY \ | ||
120 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | \ | ||
121 | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP) | ||
122 | |||
123 | /* work to do in syscall_trace_leave() */ | ||
124 | #define _TIF_WORK_SYSCALL_EXIT \ | ||
125 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP) | ||
126 | |||
124 | /* work to do on interrupt/exception return */ | 127 | /* work to do on interrupt/exception return */ |
125 | #define _TIF_WORK_MASK \ | 128 | #define _TIF_WORK_MASK \ |
126 | (0x0000FFFF & \ | 129 | (0x0000FFFF & \ |
127 | ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP| \ | 130 | ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT| \ |
128 | _TIF_SECCOMP|_TIF_SYSCALL_EMU)) | 131 | _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) |
129 | 132 | ||
130 | /* work to do on any return to user space */ | 133 | /* work to do on any return to user space */ |
131 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | 134 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) |
132 | 135 | ||
133 | /* Only used for 64 bit */ | 136 | /* Only used for 64 bit */ |
134 | #define _TIF_DO_NOTIFY_MASK \ | 137 | #define _TIF_DO_NOTIFY_MASK \ |
135 | (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) | 138 | (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) |
136 | 139 | ||
137 | /* flags to check in __switch_to() */ | 140 | /* flags to check in __switch_to() */ |
138 | #define _TIF_WORK_CTXSW \ | 141 | #define _TIF_WORK_CTXSW \ |
diff --git a/include/asm-x86/traps.h b/include/asm-x86/traps.h new file mode 100644 index 00000000000..a4b65a71bd6 --- /dev/null +++ b/include/asm-x86/traps.h | |||
@@ -0,0 +1,66 @@ | |||
1 | #ifndef _ASM_X86_TRAPS_H | ||
2 | #define _ASM_X86_TRAPS_H | ||
3 | |||
4 | /* Common in X86_32 and X86_64 */ | ||
5 | asmlinkage void divide_error(void); | ||
6 | asmlinkage void debug(void); | ||
7 | asmlinkage void nmi(void); | ||
8 | asmlinkage void int3(void); | ||
9 | asmlinkage void overflow(void); | ||
10 | asmlinkage void bounds(void); | ||
11 | asmlinkage void invalid_op(void); | ||
12 | asmlinkage void device_not_available(void); | ||
13 | asmlinkage void coprocessor_segment_overrun(void); | ||
14 | asmlinkage void invalid_TSS(void); | ||
15 | asmlinkage void segment_not_present(void); | ||
16 | asmlinkage void stack_segment(void); | ||
17 | asmlinkage void general_protection(void); | ||
18 | asmlinkage void page_fault(void); | ||
19 | asmlinkage void coprocessor_error(void); | ||
20 | asmlinkage void simd_coprocessor_error(void); | ||
21 | asmlinkage void alignment_check(void); | ||
22 | asmlinkage void spurious_interrupt_bug(void); | ||
23 | #ifdef CONFIG_X86_MCE | ||
24 | asmlinkage void machine_check(void); | ||
25 | #endif /* CONFIG_X86_MCE */ | ||
26 | |||
27 | void do_divide_error(struct pt_regs *, long); | ||
28 | void do_overflow(struct pt_regs *, long); | ||
29 | void do_bounds(struct pt_regs *, long); | ||
30 | void do_coprocessor_segment_overrun(struct pt_regs *, long); | ||
31 | void do_invalid_TSS(struct pt_regs *, long); | ||
32 | void do_segment_not_present(struct pt_regs *, long); | ||
33 | void do_stack_segment(struct pt_regs *, long); | ||
34 | void do_alignment_check(struct pt_regs *, long); | ||
35 | void do_invalid_op(struct pt_regs *, long); | ||
36 | void do_general_protection(struct pt_regs *, long); | ||
37 | void do_nmi(struct pt_regs *, long); | ||
38 | |||
39 | extern int panic_on_unrecovered_nmi; | ||
40 | extern int kstack_depth_to_print; | ||
41 | |||
42 | #ifdef CONFIG_X86_32 | ||
43 | |||
44 | void do_iret_error(struct pt_regs *, long); | ||
45 | void do_int3(struct pt_regs *, long); | ||
46 | void do_debug(struct pt_regs *, long); | ||
47 | void math_error(void __user *); | ||
48 | void do_coprocessor_error(struct pt_regs *, long); | ||
49 | void do_simd_coprocessor_error(struct pt_regs *, long); | ||
50 | void do_spurious_interrupt_bug(struct pt_regs *, long); | ||
51 | unsigned long patch_espfix_desc(unsigned long, unsigned long); | ||
52 | asmlinkage void math_emulate(long); | ||
53 | |||
54 | #else /* CONFIG_X86_32 */ | ||
55 | |||
56 | asmlinkage void double_fault(void); | ||
57 | |||
58 | asmlinkage void do_int3(struct pt_regs *, long); | ||
59 | asmlinkage void do_stack_segment(struct pt_regs *, long); | ||
60 | asmlinkage void do_debug(struct pt_regs *, unsigned long); | ||
61 | asmlinkage void do_coprocessor_error(struct pt_regs *); | ||
62 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *); | ||
63 | asmlinkage void do_spurious_interrupt_bug(struct pt_regs *); | ||
64 | |||
65 | #endif /* CONFIG_X86_32 */ | ||
66 | #endif /* _ASM_X86_TRAPS_H */ | ||
diff --git a/include/asm-x86/uv/bios.h b/include/asm-x86/uv/bios.h new file mode 100644 index 00000000000..aa73362ff5d --- /dev/null +++ b/include/asm-x86/uv/bios.h | |||
@@ -0,0 +1,68 @@ | |||
1 | #ifndef _ASM_X86_BIOS_H | ||
2 | #define _ASM_X86_BIOS_H | ||
3 | |||
4 | /* | ||
5 | * BIOS layer definitions. | ||
6 | * | ||
7 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 | */ | ||
23 | |||
24 | #include <linux/rtc.h> | ||
25 | |||
26 | #define BIOS_FREQ_BASE 0x01000001 | ||
27 | |||
28 | enum { | ||
29 | BIOS_FREQ_BASE_PLATFORM = 0, | ||
30 | BIOS_FREQ_BASE_INTERVAL_TIMER = 1, | ||
31 | BIOS_FREQ_BASE_REALTIME_CLOCK = 2 | ||
32 | }; | ||
33 | |||
34 | # define BIOS_CALL(result, a0, a1, a2, a3, a4, a5, a6, a7) \ | ||
35 | do { \ | ||
36 | /* XXX - the real call goes here */ \ | ||
37 | result.status = BIOS_STATUS_UNIMPLEMENTED; \ | ||
38 | isrv.v0 = 0; \ | ||
39 | isrv.v1 = 0; \ | ||
40 | } while (0) | ||
41 | |||
42 | enum { | ||
43 | BIOS_STATUS_SUCCESS = 0, | ||
44 | BIOS_STATUS_UNIMPLEMENTED = -1, | ||
45 | BIOS_STATUS_EINVAL = -2, | ||
46 | BIOS_STATUS_ERROR = -3 | ||
47 | }; | ||
48 | |||
49 | struct uv_bios_retval { | ||
50 | /* | ||
51 | * A zero status value indicates call completed without error. | ||
52 | * A negative status value indicates reason of call failure. | ||
53 | * A positive status value indicates success but an | ||
54 | * informational value should be printed (e.g., "reboot for | ||
55 | * change to take effect"). | ||
56 | */ | ||
57 | s64 status; | ||
58 | u64 v0; | ||
59 | u64 v1; | ||
60 | u64 v2; | ||
61 | }; | ||
62 | |||
63 | extern long | ||
64 | x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second, | ||
65 | unsigned long *drift_info); | ||
66 | extern const char *x86_bios_strerror(long status); | ||
67 | |||
68 | #endif /* _ASM_X86_BIOS_H */ | ||
diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h index 86e085e003d..8e18fb80f5e 100644 --- a/include/asm-x86/vdso.h +++ b/include/asm-x86/vdso.h | |||
@@ -36,4 +36,12 @@ extern const char VDSO32_PRELINK[]; | |||
36 | extern void __user __kernel_sigreturn; | 36 | extern void __user __kernel_sigreturn; |
37 | extern void __user __kernel_rt_sigreturn; | 37 | extern void __user __kernel_rt_sigreturn; |
38 | 38 | ||
39 | /* | ||
40 | * These symbols are defined by vdso32.S to mark the bounds | ||
41 | * of the ELF DSO images included therein. | ||
42 | */ | ||
43 | extern const char vdso32_int80_start, vdso32_int80_end; | ||
44 | extern const char vdso32_syscall_start, vdso32_syscall_end; | ||
45 | extern const char vdso32_sysenter_start, vdso32_sysenter_end; | ||
46 | |||
39 | #endif /* asm-x86/vdso.h */ | 47 | #endif /* asm-x86/vdso.h */ |
diff --git a/include/asm-x86/xen/hypercall.h b/include/asm-x86/xen/hypercall.h index 2a4f9b41d68..91cb7fd5c12 100644 --- a/include/asm-x86/xen/hypercall.h +++ b/include/asm-x86/xen/hypercall.h | |||
@@ -40,83 +40,157 @@ | |||
40 | #include <xen/interface/sched.h> | 40 | #include <xen/interface/sched.h> |
41 | #include <xen/interface/physdev.h> | 41 | #include <xen/interface/physdev.h> |
42 | 42 | ||
43 | /* | ||
44 | * The hypercall asms have to meet several constraints: | ||
45 | * - Work on 32- and 64-bit. | ||
46 | * The two architectures put their arguments in different sets of | ||
47 | * registers. | ||
48 | * | ||
49 | * - Work around asm syntax quirks | ||
50 | * It isn't possible to specify one of the rNN registers in a | ||
51 | * constraint, so we use explicit register variables to get the | ||
52 | * args into the right place. | ||
53 | * | ||
54 | * - Mark all registers as potentially clobbered | ||
55 | * Even unused parameters can be clobbered by the hypervisor, so we | ||
56 | * need to make sure gcc knows it. | ||
57 | * | ||
58 | * - Avoid compiler bugs. | ||
59 | * This is the tricky part. Because x86_32 has such a constrained | ||
60 | * register set, gcc versions below 4.3 have trouble generating | ||
61 | * code when all the arg registers and memory are trashed by the | ||
62 | * asm. There are syntactically simpler ways of achieving the | ||
63 | * semantics below, but they cause the compiler to crash. | ||
64 | * | ||
65 | * The only combination I found which works is: | ||
66 | * - assign the __argX variables first | ||
67 | * - list all actually used parameters as "+r" (__argX) | ||
68 | * - clobber the rest | ||
69 | * | ||
70 | * The result certainly isn't pretty, and it really shows up cpp's | ||
71 | * weakness as as macro language. Sorry. (But let's just give thanks | ||
72 | * there aren't more than 5 arguments...) | ||
73 | */ | ||
74 | |||
43 | extern struct { char _entry[32]; } hypercall_page[]; | 75 | extern struct { char _entry[32]; } hypercall_page[]; |
44 | 76 | ||
77 | #define __HYPERCALL "call hypercall_page+%c[offset]" | ||
78 | #define __HYPERCALL_ENTRY(x) \ | ||
79 | [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0])) | ||
80 | |||
81 | #ifdef CONFIG_X86_32 | ||
82 | #define __HYPERCALL_RETREG "eax" | ||
83 | #define __HYPERCALL_ARG1REG "ebx" | ||
84 | #define __HYPERCALL_ARG2REG "ecx" | ||
85 | #define __HYPERCALL_ARG3REG "edx" | ||
86 | #define __HYPERCALL_ARG4REG "esi" | ||
87 | #define __HYPERCALL_ARG5REG "edi" | ||
88 | #else | ||
89 | #define __HYPERCALL_RETREG "rax" | ||
90 | #define __HYPERCALL_ARG1REG "rdi" | ||
91 | #define __HYPERCALL_ARG2REG "rsi" | ||
92 | #define __HYPERCALL_ARG3REG "rdx" | ||
93 | #define __HYPERCALL_ARG4REG "r10" | ||
94 | #define __HYPERCALL_ARG5REG "r8" | ||
95 | #endif | ||
96 | |||
97 | #define __HYPERCALL_DECLS \ | ||
98 | register unsigned long __res asm(__HYPERCALL_RETREG); \ | ||
99 | register unsigned long __arg1 asm(__HYPERCALL_ARG1REG) = __arg1; \ | ||
100 | register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \ | ||
101 | register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \ | ||
102 | register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \ | ||
103 | register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; | ||
104 | |||
105 | #define __HYPERCALL_0PARAM "=r" (__res) | ||
106 | #define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1) | ||
107 | #define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2) | ||
108 | #define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3) | ||
109 | #define __HYPERCALL_4PARAM __HYPERCALL_3PARAM, "+r" (__arg4) | ||
110 | #define __HYPERCALL_5PARAM __HYPERCALL_4PARAM, "+r" (__arg5) | ||
111 | |||
112 | #define __HYPERCALL_0ARG() | ||
113 | #define __HYPERCALL_1ARG(a1) \ | ||
114 | __HYPERCALL_0ARG() __arg1 = (unsigned long)(a1); | ||
115 | #define __HYPERCALL_2ARG(a1,a2) \ | ||
116 | __HYPERCALL_1ARG(a1) __arg2 = (unsigned long)(a2); | ||
117 | #define __HYPERCALL_3ARG(a1,a2,a3) \ | ||
118 | __HYPERCALL_2ARG(a1,a2) __arg3 = (unsigned long)(a3); | ||
119 | #define __HYPERCALL_4ARG(a1,a2,a3,a4) \ | ||
120 | __HYPERCALL_3ARG(a1,a2,a3) __arg4 = (unsigned long)(a4); | ||
121 | #define __HYPERCALL_5ARG(a1,a2,a3,a4,a5) \ | ||
122 | __HYPERCALL_4ARG(a1,a2,a3,a4) __arg5 = (unsigned long)(a5); | ||
123 | |||
124 | #define __HYPERCALL_CLOBBER5 "memory" | ||
125 | #define __HYPERCALL_CLOBBER4 __HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG | ||
126 | #define __HYPERCALL_CLOBBER3 __HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG | ||
127 | #define __HYPERCALL_CLOBBER2 __HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG | ||
128 | #define __HYPERCALL_CLOBBER1 __HYPERCALL_CLOBBER2, __HYPERCALL_ARG2REG | ||
129 | #define __HYPERCALL_CLOBBER0 __HYPERCALL_CLOBBER1, __HYPERCALL_ARG1REG | ||
130 | |||
45 | #define _hypercall0(type, name) \ | 131 | #define _hypercall0(type, name) \ |
46 | ({ \ | 132 | ({ \ |
47 | long __res; \ | 133 | __HYPERCALL_DECLS; \ |
48 | asm volatile ( \ | 134 | __HYPERCALL_0ARG(); \ |
49 | "call %[call]" \ | 135 | asm volatile (__HYPERCALL \ |
50 | : "=a" (__res) \ | 136 | : __HYPERCALL_0PARAM \ |
51 | : [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ | 137 | : __HYPERCALL_ENTRY(name) \ |
52 | : "memory" ); \ | 138 | : __HYPERCALL_CLOBBER0); \ |
53 | (type)__res; \ | 139 | (type)__res; \ |
54 | }) | 140 | }) |
55 | 141 | ||
56 | #define _hypercall1(type, name, a1) \ | 142 | #define _hypercall1(type, name, a1) \ |
57 | ({ \ | 143 | ({ \ |
58 | long __res, __ign1; \ | 144 | __HYPERCALL_DECLS; \ |
59 | asm volatile ( \ | 145 | __HYPERCALL_1ARG(a1); \ |
60 | "call %[call]" \ | 146 | asm volatile (__HYPERCALL \ |
61 | : "=a" (__res), "=b" (__ign1) \ | 147 | : __HYPERCALL_1PARAM \ |
62 | : "1" ((long)(a1)), \ | 148 | : __HYPERCALL_ENTRY(name) \ |
63 | [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ | 149 | : __HYPERCALL_CLOBBER1); \ |
64 | : "memory" ); \ | ||
65 | (type)__res; \ | 150 | (type)__res; \ |
66 | }) | 151 | }) |
67 | 152 | ||
68 | #define _hypercall2(type, name, a1, a2) \ | 153 | #define _hypercall2(type, name, a1, a2) \ |
69 | ({ \ | 154 | ({ \ |
70 | long __res, __ign1, __ign2; \ | 155 | __HYPERCALL_DECLS; \ |
71 | asm volatile ( \ | 156 | __HYPERCALL_2ARG(a1, a2); \ |
72 | "call %[call]" \ | 157 | asm volatile (__HYPERCALL \ |
73 | : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ | 158 | : __HYPERCALL_2PARAM \ |
74 | : "1" ((long)(a1)), "2" ((long)(a2)), \ | 159 | : __HYPERCALL_ENTRY(name) \ |
75 | [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ | 160 | : __HYPERCALL_CLOBBER2); \ |
76 | : "memory" ); \ | ||
77 | (type)__res; \ | 161 | (type)__res; \ |
78 | }) | 162 | }) |
79 | 163 | ||
80 | #define _hypercall3(type, name, a1, a2, a3) \ | 164 | #define _hypercall3(type, name, a1, a2, a3) \ |
81 | ({ \ | 165 | ({ \ |
82 | long __res, __ign1, __ign2, __ign3; \ | 166 | __HYPERCALL_DECLS; \ |
83 | asm volatile ( \ | 167 | __HYPERCALL_3ARG(a1, a2, a3); \ |
84 | "call %[call]" \ | 168 | asm volatile (__HYPERCALL \ |
85 | : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ | 169 | : __HYPERCALL_3PARAM \ |
86 | "=d" (__ign3) \ | 170 | : __HYPERCALL_ENTRY(name) \ |
87 | : "1" ((long)(a1)), "2" ((long)(a2)), \ | 171 | : __HYPERCALL_CLOBBER3); \ |
88 | "3" ((long)(a3)), \ | ||
89 | [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ | ||
90 | : "memory" ); \ | ||
91 | (type)__res; \ | 172 | (type)__res; \ |
92 | }) | 173 | }) |
93 | 174 | ||
94 | #define _hypercall4(type, name, a1, a2, a3, a4) \ | 175 | #define _hypercall4(type, name, a1, a2, a3, a4) \ |
95 | ({ \ | 176 | ({ \ |
96 | long __res, __ign1, __ign2, __ign3, __ign4; \ | 177 | __HYPERCALL_DECLS; \ |
97 | asm volatile ( \ | 178 | __HYPERCALL_4ARG(a1, a2, a3, a4); \ |
98 | "call %[call]" \ | 179 | asm volatile (__HYPERCALL \ |
99 | : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ | 180 | : __HYPERCALL_4PARAM \ |
100 | "=d" (__ign3), "=S" (__ign4) \ | 181 | : __HYPERCALL_ENTRY(name) \ |
101 | : "1" ((long)(a1)), "2" ((long)(a2)), \ | 182 | : __HYPERCALL_CLOBBER4); \ |
102 | "3" ((long)(a3)), "4" ((long)(a4)), \ | ||
103 | [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ | ||
104 | : "memory" ); \ | ||
105 | (type)__res; \ | 183 | (type)__res; \ |
106 | }) | 184 | }) |
107 | 185 | ||
108 | #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ | 186 | #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ |
109 | ({ \ | 187 | ({ \ |
110 | long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \ | 188 | __HYPERCALL_DECLS; \ |
111 | asm volatile ( \ | 189 | __HYPERCALL_5ARG(a1, a2, a3, a4, a5); \ |
112 | "call %[call]" \ | 190 | asm volatile (__HYPERCALL \ |
113 | : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ | 191 | : __HYPERCALL_5PARAM \ |
114 | "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ | 192 | : __HYPERCALL_ENTRY(name) \ |
115 | : "1" ((long)(a1)), "2" ((long)(a2)), \ | 193 | : __HYPERCALL_CLOBBER5); \ |
116 | "3" ((long)(a3)), "4" ((long)(a4)), \ | ||
117 | "5" ((long)(a5)), \ | ||
118 | [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ | ||
119 | : "memory" ); \ | ||
120 | (type)__res; \ | 194 | (type)__res; \ |
121 | }) | 195 | }) |
122 | 196 | ||
@@ -152,6 +226,7 @@ HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp) | |||
152 | return _hypercall2(int, stack_switch, ss, esp); | 226 | return _hypercall2(int, stack_switch, ss, esp); |
153 | } | 227 | } |
154 | 228 | ||
229 | #ifdef CONFIG_X86_32 | ||
155 | static inline int | 230 | static inline int |
156 | HYPERVISOR_set_callbacks(unsigned long event_selector, | 231 | HYPERVISOR_set_callbacks(unsigned long event_selector, |
157 | unsigned long event_address, | 232 | unsigned long event_address, |
@@ -162,6 +237,17 @@ HYPERVISOR_set_callbacks(unsigned long event_selector, | |||
162 | event_selector, event_address, | 237 | event_selector, event_address, |
163 | failsafe_selector, failsafe_address); | 238 | failsafe_selector, failsafe_address); |
164 | } | 239 | } |
240 | #else /* CONFIG_X86_64 */ | ||
241 | static inline int | ||
242 | HYPERVISOR_set_callbacks(unsigned long event_address, | ||
243 | unsigned long failsafe_address, | ||
244 | unsigned long syscall_address) | ||
245 | { | ||
246 | return _hypercall3(int, set_callbacks, | ||
247 | event_address, failsafe_address, | ||
248 | syscall_address); | ||
249 | } | ||
250 | #endif /* CONFIG_X86_{32,64} */ | ||
165 | 251 | ||
166 | static inline int | 252 | static inline int |
167 | HYPERVISOR_callback_op(int cmd, void *arg) | 253 | HYPERVISOR_callback_op(int cmd, void *arg) |
@@ -223,12 +309,12 @@ static inline int | |||
223 | HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val, | 309 | HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val, |
224 | unsigned long flags) | 310 | unsigned long flags) |
225 | { | 311 | { |
226 | unsigned long pte_hi = 0; | 312 | if (sizeof(new_val) == sizeof(long)) |
227 | #ifdef CONFIG_X86_PAE | 313 | return _hypercall3(int, update_va_mapping, va, |
228 | pte_hi = new_val.pte_high; | 314 | new_val.pte, flags); |
229 | #endif | 315 | else |
230 | return _hypercall4(int, update_va_mapping, va, | 316 | return _hypercall4(int, update_va_mapping, va, |
231 | new_val.pte_low, pte_hi, flags); | 317 | new_val.pte, new_val.pte >> 32, flags); |
232 | } | 318 | } |
233 | 319 | ||
234 | static inline int | 320 | static inline int |
@@ -281,12 +367,13 @@ static inline int | |||
281 | HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val, | 367 | HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val, |
282 | unsigned long flags, domid_t domid) | 368 | unsigned long flags, domid_t domid) |
283 | { | 369 | { |
284 | unsigned long pte_hi = 0; | 370 | if (sizeof(new_val) == sizeof(long)) |
285 | #ifdef CONFIG_X86_PAE | 371 | return _hypercall4(int, update_va_mapping_otherdomain, va, |
286 | pte_hi = new_val.pte_high; | 372 | new_val.pte, flags, domid); |
287 | #endif | 373 | else |
288 | return _hypercall5(int, update_va_mapping_otherdomain, va, | 374 | return _hypercall5(int, update_va_mapping_otherdomain, va, |
289 | new_val.pte_low, pte_hi, flags, domid); | 375 | new_val.pte, new_val.pte >> 32, |
376 | flags, domid); | ||
290 | } | 377 | } |
291 | 378 | ||
292 | static inline int | 379 | static inline int |
@@ -301,6 +388,14 @@ HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args) | |||
301 | return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); | 388 | return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); |
302 | } | 389 | } |
303 | 390 | ||
391 | #ifdef CONFIG_X86_64 | ||
392 | static inline int | ||
393 | HYPERVISOR_set_segment_base(int reg, unsigned long value) | ||
394 | { | ||
395 | return _hypercall2(int, set_segment_base, reg, value); | ||
396 | } | ||
397 | #endif | ||
398 | |||
304 | static inline int | 399 | static inline int |
305 | HYPERVISOR_suspend(unsigned long srec) | 400 | HYPERVISOR_suspend(unsigned long srec) |
306 | { | 401 | { |
@@ -327,14 +422,14 @@ MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, | |||
327 | { | 422 | { |
328 | mcl->op = __HYPERVISOR_update_va_mapping; | 423 | mcl->op = __HYPERVISOR_update_va_mapping; |
329 | mcl->args[0] = va; | 424 | mcl->args[0] = va; |
330 | #ifdef CONFIG_X86_PAE | 425 | if (sizeof(new_val) == sizeof(long)) { |
331 | mcl->args[1] = new_val.pte_low; | 426 | mcl->args[1] = new_val.pte; |
332 | mcl->args[2] = new_val.pte_high; | 427 | mcl->args[2] = flags; |
333 | #else | 428 | } else { |
334 | mcl->args[1] = new_val.pte_low; | 429 | mcl->args[1] = new_val.pte; |
335 | mcl->args[2] = 0; | 430 | mcl->args[2] = new_val.pte >> 32; |
336 | #endif | 431 | mcl->args[3] = flags; |
337 | mcl->args[3] = flags; | 432 | } |
338 | } | 433 | } |
339 | 434 | ||
340 | static inline void | 435 | static inline void |
@@ -354,15 +449,16 @@ MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long v | |||
354 | { | 449 | { |
355 | mcl->op = __HYPERVISOR_update_va_mapping_otherdomain; | 450 | mcl->op = __HYPERVISOR_update_va_mapping_otherdomain; |
356 | mcl->args[0] = va; | 451 | mcl->args[0] = va; |
357 | #ifdef CONFIG_X86_PAE | 452 | if (sizeof(new_val) == sizeof(long)) { |
358 | mcl->args[1] = new_val.pte_low; | 453 | mcl->args[1] = new_val.pte; |
359 | mcl->args[2] = new_val.pte_high; | 454 | mcl->args[2] = flags; |
360 | #else | 455 | mcl->args[3] = domid; |
361 | mcl->args[1] = new_val.pte_low; | 456 | } else { |
362 | mcl->args[2] = 0; | 457 | mcl->args[1] = new_val.pte; |
363 | #endif | 458 | mcl->args[2] = new_val.pte >> 32; |
364 | mcl->args[3] = flags; | 459 | mcl->args[3] = flags; |
365 | mcl->args[4] = domid; | 460 | mcl->args[4] = domid; |
461 | } | ||
366 | } | 462 | } |
367 | 463 | ||
368 | static inline void | 464 | static inline void |
@@ -370,10 +466,15 @@ MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr, | |||
370 | struct desc_struct desc) | 466 | struct desc_struct desc) |
371 | { | 467 | { |
372 | mcl->op = __HYPERVISOR_update_descriptor; | 468 | mcl->op = __HYPERVISOR_update_descriptor; |
373 | mcl->args[0] = maddr; | 469 | if (sizeof(maddr) == sizeof(long)) { |
374 | mcl->args[1] = maddr >> 32; | 470 | mcl->args[0] = maddr; |
375 | mcl->args[2] = desc.a; | 471 | mcl->args[1] = *(unsigned long *)&desc; |
376 | mcl->args[3] = desc.b; | 472 | } else { |
473 | mcl->args[0] = maddr; | ||
474 | mcl->args[1] = maddr >> 32; | ||
475 | mcl->args[2] = desc.a; | ||
476 | mcl->args[3] = desc.b; | ||
477 | } | ||
377 | } | 478 | } |
378 | 479 | ||
379 | static inline void | 480 | static inline void |
diff --git a/include/asm-x86/xen/interface.h b/include/asm-x86/xen/interface.h index 6227000a1e8..9d810f2538a 100644 --- a/include/asm-x86/xen/interface.h +++ b/include/asm-x86/xen/interface.h | |||
@@ -1,13 +1,13 @@ | |||
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | * arch-x86_32.h | 2 | * arch-x86_32.h |
3 | * | 3 | * |
4 | * Guest OS interface to x86 32-bit Xen. | 4 | * Guest OS interface to x86 Xen. |
5 | * | 5 | * |
6 | * Copyright (c) 2004, K A Fraser | 6 | * Copyright (c) 2004, K A Fraser |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #ifndef __XEN_PUBLIC_ARCH_X86_32_H__ | 9 | #ifndef __ASM_X86_XEN_INTERFACE_H |
10 | #define __XEN_PUBLIC_ARCH_X86_32_H__ | 10 | #define __ASM_X86_XEN_INTERFACE_H |
11 | 11 | ||
12 | #ifdef __XEN__ | 12 | #ifdef __XEN__ |
13 | #define __DEFINE_GUEST_HANDLE(name, type) \ | 13 | #define __DEFINE_GUEST_HANDLE(name, type) \ |
@@ -57,6 +57,17 @@ DEFINE_GUEST_HANDLE(long); | |||
57 | DEFINE_GUEST_HANDLE(void); | 57 | DEFINE_GUEST_HANDLE(void); |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | #ifndef HYPERVISOR_VIRT_START | ||
61 | #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) | ||
62 | #endif | ||
63 | |||
64 | #ifndef machine_to_phys_mapping | ||
65 | #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) | ||
66 | #endif | ||
67 | |||
68 | /* Maximum number of virtual CPUs in multi-processor guests. */ | ||
69 | #define MAX_VIRT_CPUS 32 | ||
70 | |||
60 | /* | 71 | /* |
61 | * SEGMENT DESCRIPTOR TABLES | 72 | * SEGMENT DESCRIPTOR TABLES |
62 | */ | 73 | */ |
@@ -71,58 +82,21 @@ DEFINE_GUEST_HANDLE(void); | |||
71 | #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) | 82 | #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) |
72 | 83 | ||
73 | /* | 84 | /* |
74 | * These flat segments are in the Xen-private section of every GDT. Since these | ||
75 | * are also present in the initial GDT, many OSes will be able to avoid | ||
76 | * installing their own GDT. | ||
77 | */ | ||
78 | #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ | ||
79 | #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ | ||
80 | #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ | ||
81 | #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ | ||
82 | #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ | ||
83 | #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ | ||
84 | |||
85 | #define FLAT_KERNEL_CS FLAT_RING1_CS | ||
86 | #define FLAT_KERNEL_DS FLAT_RING1_DS | ||
87 | #define FLAT_KERNEL_SS FLAT_RING1_SS | ||
88 | #define FLAT_USER_CS FLAT_RING3_CS | ||
89 | #define FLAT_USER_DS FLAT_RING3_DS | ||
90 | #define FLAT_USER_SS FLAT_RING3_SS | ||
91 | |||
92 | /* And the trap vector is... */ | ||
93 | #define TRAP_INSTR "int $0x82" | ||
94 | |||
95 | /* | ||
96 | * Virtual addresses beyond this are not modifiable by guest OSes. The | ||
97 | * machine->physical mapping table starts at this address, read-only. | ||
98 | */ | ||
99 | #ifdef CONFIG_X86_PAE | ||
100 | #define __HYPERVISOR_VIRT_START 0xF5800000 | ||
101 | #else | ||
102 | #define __HYPERVISOR_VIRT_START 0xFC000000 | ||
103 | #endif | ||
104 | |||
105 | #ifndef HYPERVISOR_VIRT_START | ||
106 | #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) | ||
107 | #endif | ||
108 | |||
109 | #ifndef machine_to_phys_mapping | ||
110 | #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) | ||
111 | #endif | ||
112 | |||
113 | /* Maximum number of virtual CPUs in multi-processor guests. */ | ||
114 | #define MAX_VIRT_CPUS 32 | ||
115 | |||
116 | #ifndef __ASSEMBLY__ | ||
117 | |||
118 | /* | ||
119 | * Send an array of these to HYPERVISOR_set_trap_table() | 85 | * Send an array of these to HYPERVISOR_set_trap_table() |
86 | * The privilege level specifies which modes may enter a trap via a software | ||
87 | * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate | ||
88 | * privilege levels as follows: | ||
89 | * Level == 0: Noone may enter | ||
90 | * Level == 1: Kernel may enter | ||
91 | * Level == 2: Kernel may enter | ||
92 | * Level == 3: Everyone may enter | ||
120 | */ | 93 | */ |
121 | #define TI_GET_DPL(_ti) ((_ti)->flags & 3) | 94 | #define TI_GET_DPL(_ti) ((_ti)->flags & 3) |
122 | #define TI_GET_IF(_ti) ((_ti)->flags & 4) | 95 | #define TI_GET_IF(_ti) ((_ti)->flags & 4) |
123 | #define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl)) | 96 | #define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl)) |
124 | #define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2)) | 97 | #define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2)) |
125 | 98 | ||
99 | #ifndef __ASSEMBLY__ | ||
126 | struct trap_info { | 100 | struct trap_info { |
127 | uint8_t vector; /* exception vector */ | 101 | uint8_t vector; /* exception vector */ |
128 | uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ | 102 | uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ |
@@ -131,32 +105,21 @@ struct trap_info { | |||
131 | }; | 105 | }; |
132 | DEFINE_GUEST_HANDLE_STRUCT(trap_info); | 106 | DEFINE_GUEST_HANDLE_STRUCT(trap_info); |
133 | 107 | ||
134 | struct cpu_user_regs { | 108 | struct arch_shared_info { |
135 | uint32_t ebx; | 109 | unsigned long max_pfn; /* max pfn that appears in table */ |
136 | uint32_t ecx; | 110 | /* Frame containing list of mfns containing list of mfns containing p2m. */ |
137 | uint32_t edx; | 111 | unsigned long pfn_to_mfn_frame_list_list; |
138 | uint32_t esi; | 112 | unsigned long nmi_reason; |
139 | uint32_t edi; | ||
140 | uint32_t ebp; | ||
141 | uint32_t eax; | ||
142 | uint16_t error_code; /* private */ | ||
143 | uint16_t entry_vector; /* private */ | ||
144 | uint32_t eip; | ||
145 | uint16_t cs; | ||
146 | uint8_t saved_upcall_mask; | ||
147 | uint8_t _pad0; | ||
148 | uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ | ||
149 | uint32_t esp; | ||
150 | uint16_t ss, _pad1; | ||
151 | uint16_t es, _pad2; | ||
152 | uint16_t ds, _pad3; | ||
153 | uint16_t fs, _pad4; | ||
154 | uint16_t gs, _pad5; | ||
155 | }; | 113 | }; |
156 | DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs); | 114 | #endif /* !__ASSEMBLY__ */ |
157 | 115 | ||
158 | typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ | 116 | #ifdef CONFIG_X86_32 |
117 | #include "interface_32.h" | ||
118 | #else | ||
119 | #include "interface_64.h" | ||
120 | #endif | ||
159 | 121 | ||
122 | #ifndef __ASSEMBLY__ | ||
160 | /* | 123 | /* |
161 | * The following is all CPU context. Note that the fpu_ctxt block is filled | 124 | * The following is all CPU context. Note that the fpu_ctxt block is filled |
162 | * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. | 125 | * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. |
@@ -173,33 +136,29 @@ struct vcpu_guest_context { | |||
173 | unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ | 136 | unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ |
174 | unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ | 137 | unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ |
175 | unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ | 138 | unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ |
139 | /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */ | ||
176 | unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ | 140 | unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ |
177 | unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ | 141 | unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ |
142 | #ifdef __i386__ | ||
178 | unsigned long event_callback_cs; /* CS:EIP of event callback */ | 143 | unsigned long event_callback_cs; /* CS:EIP of event callback */ |
179 | unsigned long event_callback_eip; | 144 | unsigned long event_callback_eip; |
180 | unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ | 145 | unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ |
181 | unsigned long failsafe_callback_eip; | 146 | unsigned long failsafe_callback_eip; |
147 | #else | ||
148 | unsigned long event_callback_eip; | ||
149 | unsigned long failsafe_callback_eip; | ||
150 | unsigned long syscall_callback_eip; | ||
151 | #endif | ||
182 | unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ | 152 | unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ |
153 | #ifdef __x86_64__ | ||
154 | /* Segment base addresses. */ | ||
155 | uint64_t fs_base; | ||
156 | uint64_t gs_base_kernel; | ||
157 | uint64_t gs_base_user; | ||
158 | #endif | ||
183 | }; | 159 | }; |
184 | DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context); | 160 | DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context); |
185 | 161 | #endif /* !__ASSEMBLY__ */ | |
186 | struct arch_shared_info { | ||
187 | unsigned long max_pfn; /* max pfn that appears in table */ | ||
188 | /* Frame containing list of mfns containing list of mfns containing p2m. */ | ||
189 | unsigned long pfn_to_mfn_frame_list_list; | ||
190 | unsigned long nmi_reason; | ||
191 | }; | ||
192 | |||
193 | struct arch_vcpu_info { | ||
194 | unsigned long cr2; | ||
195 | unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */ | ||
196 | }; | ||
197 | |||
198 | struct xen_callback { | ||
199 | unsigned long cs; | ||
200 | unsigned long eip; | ||
201 | }; | ||
202 | #endif /* !__ASSEMBLY__ */ | ||
203 | 162 | ||
204 | /* | 163 | /* |
205 | * Prefix forces emulation of some non-trapping instructions. | 164 | * Prefix forces emulation of some non-trapping instructions. |
@@ -213,4 +172,4 @@ struct xen_callback { | |||
213 | #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" | 172 | #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" |
214 | #endif | 173 | #endif |
215 | 174 | ||
216 | #endif | 175 | #endif /* __ASM_X86_XEN_INTERFACE_H */ |
diff --git a/include/asm-x86/xen/interface_32.h b/include/asm-x86/xen/interface_32.h new file mode 100644 index 00000000000..d8ac41d5db8 --- /dev/null +++ b/include/asm-x86/xen/interface_32.h | |||
@@ -0,0 +1,97 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch-x86_32.h | ||
3 | * | ||
4 | * Guest OS interface to x86 32-bit Xen. | ||
5 | * | ||
6 | * Copyright (c) 2004, K A Fraser | ||
7 | */ | ||
8 | |||
9 | #ifndef __ASM_X86_XEN_INTERFACE_32_H | ||
10 | #define __ASM_X86_XEN_INTERFACE_32_H | ||
11 | |||
12 | |||
13 | /* | ||
14 | * These flat segments are in the Xen-private section of every GDT. Since these | ||
15 | * are also present in the initial GDT, many OSes will be able to avoid | ||
16 | * installing their own GDT. | ||
17 | */ | ||
18 | #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ | ||
19 | #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ | ||
20 | #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ | ||
21 | #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ | ||
22 | #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ | ||
23 | #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ | ||
24 | |||
25 | #define FLAT_KERNEL_CS FLAT_RING1_CS | ||
26 | #define FLAT_KERNEL_DS FLAT_RING1_DS | ||
27 | #define FLAT_KERNEL_SS FLAT_RING1_SS | ||
28 | #define FLAT_USER_CS FLAT_RING3_CS | ||
29 | #define FLAT_USER_DS FLAT_RING3_DS | ||
30 | #define FLAT_USER_SS FLAT_RING3_SS | ||
31 | |||
32 | /* And the trap vector is... */ | ||
33 | #define TRAP_INSTR "int $0x82" | ||
34 | |||
35 | /* | ||
36 | * Virtual addresses beyond this are not modifiable by guest OSes. The | ||
37 | * machine->physical mapping table starts at this address, read-only. | ||
38 | */ | ||
39 | #define __HYPERVISOR_VIRT_START 0xF5800000 | ||
40 | |||
41 | #ifndef __ASSEMBLY__ | ||
42 | |||
43 | struct cpu_user_regs { | ||
44 | uint32_t ebx; | ||
45 | uint32_t ecx; | ||
46 | uint32_t edx; | ||
47 | uint32_t esi; | ||
48 | uint32_t edi; | ||
49 | uint32_t ebp; | ||
50 | uint32_t eax; | ||
51 | uint16_t error_code; /* private */ | ||
52 | uint16_t entry_vector; /* private */ | ||
53 | uint32_t eip; | ||
54 | uint16_t cs; | ||
55 | uint8_t saved_upcall_mask; | ||
56 | uint8_t _pad0; | ||
57 | uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ | ||
58 | uint32_t esp; | ||
59 | uint16_t ss, _pad1; | ||
60 | uint16_t es, _pad2; | ||
61 | uint16_t ds, _pad3; | ||
62 | uint16_t fs, _pad4; | ||
63 | uint16_t gs, _pad5; | ||
64 | }; | ||
65 | DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs); | ||
66 | |||
67 | typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ | ||
68 | |||
69 | struct arch_vcpu_info { | ||
70 | unsigned long cr2; | ||
71 | unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */ | ||
72 | }; | ||
73 | |||
74 | struct xen_callback { | ||
75 | unsigned long cs; | ||
76 | unsigned long eip; | ||
77 | }; | ||
78 | typedef struct xen_callback xen_callback_t; | ||
79 | |||
80 | #define XEN_CALLBACK(__cs, __eip) \ | ||
81 | ((struct xen_callback){ .cs = (__cs), .eip = (unsigned long)(__eip) }) | ||
82 | #endif /* !__ASSEMBLY__ */ | ||
83 | |||
84 | |||
85 | /* | ||
86 | * Page-directory addresses above 4GB do not fit into architectural %cr3. | ||
87 | * When accessing %cr3, or equivalent field in vcpu_guest_context, guests | ||
88 | * must use the following accessor macros to pack/unpack valid MFNs. | ||
89 | * | ||
90 | * Note that Xen is using the fact that the pagetable base is always | ||
91 | * page-aligned, and putting the 12 MSB of the address into the 12 LSB | ||
92 | * of cr3. | ||
93 | */ | ||
94 | #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) | ||
95 | #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) | ||
96 | |||
97 | #endif /* __ASM_X86_XEN_INTERFACE_32_H */ | ||
diff --git a/include/asm-x86/xen/interface_64.h b/include/asm-x86/xen/interface_64.h new file mode 100644 index 00000000000..842266ce96e --- /dev/null +++ b/include/asm-x86/xen/interface_64.h | |||
@@ -0,0 +1,159 @@ | |||
1 | #ifndef __ASM_X86_XEN_INTERFACE_64_H | ||
2 | #define __ASM_X86_XEN_INTERFACE_64_H | ||
3 | |||
4 | /* | ||
5 | * 64-bit segment selectors | ||
6 | * These flat segments are in the Xen-private section of every GDT. Since these | ||
7 | * are also present in the initial GDT, many OSes will be able to avoid | ||
8 | * installing their own GDT. | ||
9 | */ | ||
10 | |||
11 | #define FLAT_RING3_CS32 0xe023 /* GDT index 260 */ | ||
12 | #define FLAT_RING3_CS64 0xe033 /* GDT index 261 */ | ||
13 | #define FLAT_RING3_DS32 0xe02b /* GDT index 262 */ | ||
14 | #define FLAT_RING3_DS64 0x0000 /* NULL selector */ | ||
15 | #define FLAT_RING3_SS32 0xe02b /* GDT index 262 */ | ||
16 | #define FLAT_RING3_SS64 0xe02b /* GDT index 262 */ | ||
17 | |||
18 | #define FLAT_KERNEL_DS64 FLAT_RING3_DS64 | ||
19 | #define FLAT_KERNEL_DS32 FLAT_RING3_DS32 | ||
20 | #define FLAT_KERNEL_DS FLAT_KERNEL_DS64 | ||
21 | #define FLAT_KERNEL_CS64 FLAT_RING3_CS64 | ||
22 | #define FLAT_KERNEL_CS32 FLAT_RING3_CS32 | ||
23 | #define FLAT_KERNEL_CS FLAT_KERNEL_CS64 | ||
24 | #define FLAT_KERNEL_SS64 FLAT_RING3_SS64 | ||
25 | #define FLAT_KERNEL_SS32 FLAT_RING3_SS32 | ||
26 | #define FLAT_KERNEL_SS FLAT_KERNEL_SS64 | ||
27 | |||
28 | #define FLAT_USER_DS64 FLAT_RING3_DS64 | ||
29 | #define FLAT_USER_DS32 FLAT_RING3_DS32 | ||
30 | #define FLAT_USER_DS FLAT_USER_DS64 | ||
31 | #define FLAT_USER_CS64 FLAT_RING3_CS64 | ||
32 | #define FLAT_USER_CS32 FLAT_RING3_CS32 | ||
33 | #define FLAT_USER_CS FLAT_USER_CS64 | ||
34 | #define FLAT_USER_SS64 FLAT_RING3_SS64 | ||
35 | #define FLAT_USER_SS32 FLAT_RING3_SS32 | ||
36 | #define FLAT_USER_SS FLAT_USER_SS64 | ||
37 | |||
38 | #define __HYPERVISOR_VIRT_START 0xFFFF800000000000 | ||
39 | #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 | ||
40 | #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 | ||
41 | #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 | ||
42 | |||
43 | #ifndef HYPERVISOR_VIRT_START | ||
44 | #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) | ||
45 | #define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) | ||
46 | #endif | ||
47 | |||
48 | #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) | ||
49 | #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) | ||
50 | #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) | ||
51 | #ifndef machine_to_phys_mapping | ||
52 | #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) | ||
53 | #endif | ||
54 | |||
55 | /* | ||
56 | * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) | ||
57 | * @which == SEGBASE_* ; @base == 64-bit base address | ||
58 | * Returns 0 on success. | ||
59 | */ | ||
60 | #define SEGBASE_FS 0 | ||
61 | #define SEGBASE_GS_USER 1 | ||
62 | #define SEGBASE_GS_KERNEL 2 | ||
63 | #define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */ | ||
64 | |||
65 | /* | ||
66 | * int HYPERVISOR_iret(void) | ||
67 | * All arguments are on the kernel stack, in the following format. | ||
68 | * Never returns if successful. Current kernel context is lost. | ||
69 | * The saved CS is mapped as follows: | ||
70 | * RING0 -> RING3 kernel mode. | ||
71 | * RING1 -> RING3 kernel mode. | ||
72 | * RING2 -> RING3 kernel mode. | ||
73 | * RING3 -> RING3 user mode. | ||
74 | * However RING0 indicates that the guest kernel should return to iteself | ||
75 | * directly with | ||
76 | * orb $3,1*8(%rsp) | ||
77 | * iretq | ||
78 | * If flags contains VGCF_in_syscall: | ||
79 | * Restore RAX, RIP, RFLAGS, RSP. | ||
80 | * Discard R11, RCX, CS, SS. | ||
81 | * Otherwise: | ||
82 | * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP. | ||
83 | * All other registers are saved on hypercall entry and restored to user. | ||
84 | */ | ||
85 | /* Guest exited in SYSCALL context? Return to guest with SYSRET? */ | ||
86 | #define _VGCF_in_syscall 8 | ||
87 | #define VGCF_in_syscall (1<<_VGCF_in_syscall) | ||
88 | #define VGCF_IN_SYSCALL VGCF_in_syscall | ||
89 | |||
90 | #ifndef __ASSEMBLY__ | ||
91 | |||
92 | struct iret_context { | ||
93 | /* Top of stack (%rsp at point of hypercall). */ | ||
94 | uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; | ||
95 | /* Bottom of iret stack frame. */ | ||
96 | }; | ||
97 | |||
98 | #if defined(__GNUC__) && !defined(__STRICT_ANSI__) | ||
99 | /* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */ | ||
100 | #define __DECL_REG(name) union { \ | ||
101 | uint64_t r ## name, e ## name; \ | ||
102 | uint32_t _e ## name; \ | ||
103 | } | ||
104 | #else | ||
105 | /* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */ | ||
106 | #define __DECL_REG(name) uint64_t r ## name | ||
107 | #endif | ||
108 | |||
109 | struct cpu_user_regs { | ||
110 | uint64_t r15; | ||
111 | uint64_t r14; | ||
112 | uint64_t r13; | ||
113 | uint64_t r12; | ||
114 | __DECL_REG(bp); | ||
115 | __DECL_REG(bx); | ||
116 | uint64_t r11; | ||
117 | uint64_t r10; | ||
118 | uint64_t r9; | ||
119 | uint64_t r8; | ||
120 | __DECL_REG(ax); | ||
121 | __DECL_REG(cx); | ||
122 | __DECL_REG(dx); | ||
123 | __DECL_REG(si); | ||
124 | __DECL_REG(di); | ||
125 | uint32_t error_code; /* private */ | ||
126 | uint32_t entry_vector; /* private */ | ||
127 | __DECL_REG(ip); | ||
128 | uint16_t cs, _pad0[1]; | ||
129 | uint8_t saved_upcall_mask; | ||
130 | uint8_t _pad1[3]; | ||
131 | __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */ | ||
132 | __DECL_REG(sp); | ||
133 | uint16_t ss, _pad2[3]; | ||
134 | uint16_t es, _pad3[3]; | ||
135 | uint16_t ds, _pad4[3]; | ||
136 | uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */ | ||
137 | uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */ | ||
138 | }; | ||
139 | DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs); | ||
140 | |||
141 | #undef __DECL_REG | ||
142 | |||
143 | #define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12) | ||
144 | #define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12) | ||
145 | |||
146 | struct arch_vcpu_info { | ||
147 | unsigned long cr2; | ||
148 | unsigned long pad; /* sizeof(vcpu_info_t) == 64 */ | ||
149 | }; | ||
150 | |||
151 | typedef unsigned long xen_callback_t; | ||
152 | |||
153 | #define XEN_CALLBACK(__cs, __rip) \ | ||
154 | ((unsigned long)(__rip)) | ||
155 | |||
156 | #endif /* !__ASSEMBLY__ */ | ||
157 | |||
158 | |||
159 | #endif /* __ASM_X86_XEN_INTERFACE_64_H */ | ||
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h index 377c04591c1..05e678a8662 100644 --- a/include/asm-x86/xen/page.h +++ b/include/asm-x86/xen/page.h | |||
@@ -148,13 +148,17 @@ static inline pte_t __pte_ma(pteval_t x) | |||
148 | } | 148 | } |
149 | 149 | ||
150 | #define pmd_val_ma(v) ((v).pmd) | 150 | #define pmd_val_ma(v) ((v).pmd) |
151 | #ifdef __PAGETABLE_PUD_FOLDED | ||
151 | #define pud_val_ma(v) ((v).pgd.pgd) | 152 | #define pud_val_ma(v) ((v).pgd.pgd) |
153 | #else | ||
154 | #define pud_val_ma(v) ((v).pud) | ||
155 | #endif | ||
152 | #define __pmd_ma(x) ((pmd_t) { (x) } ) | 156 | #define __pmd_ma(x) ((pmd_t) { (x) } ) |
153 | 157 | ||
154 | #define pgd_val_ma(x) ((x).pgd) | 158 | #define pgd_val_ma(x) ((x).pgd) |
155 | 159 | ||
156 | 160 | ||
157 | xmaddr_t arbitrary_virt_to_machine(unsigned long address); | 161 | xmaddr_t arbitrary_virt_to_machine(void *address); |
158 | void make_lowmem_page_readonly(void *vaddr); | 162 | void make_lowmem_page_readonly(void *vaddr); |
159 | void make_lowmem_page_readwrite(void *vaddr); | 163 | void make_lowmem_page_readwrite(void *vaddr); |
160 | 164 | ||
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 7266124361b..32755cdf68d 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h | |||
@@ -26,6 +26,8 @@ struct debugfs_blob_wrapper { | |||
26 | unsigned long size; | 26 | unsigned long size; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | extern struct dentry *arch_debugfs_dir; | ||
30 | |||
29 | #if defined(CONFIG_DEBUG_FS) | 31 | #if defined(CONFIG_DEBUG_FS) |
30 | 32 | ||
31 | /* declared over in file.c */ | 33 | /* declared over in file.c */ |
diff --git a/include/xen/hvc-console.h b/include/xen/hvc-console.h index 98b79bc404d..c3adde32669 100644 --- a/include/xen/hvc-console.h +++ b/include/xen/hvc-console.h | |||
@@ -5,11 +5,12 @@ extern struct console xenboot_console; | |||
5 | 5 | ||
6 | #ifdef CONFIG_HVC_XEN | 6 | #ifdef CONFIG_HVC_XEN |
7 | void xen_console_resume(void); | 7 | void xen_console_resume(void); |
8 | void xen_raw_console_write(const char *str); | ||
9 | void xen_raw_printk(const char *fmt, ...); | ||
8 | #else | 10 | #else |
9 | static inline void xen_console_resume(void) { } | 11 | static inline void xen_console_resume(void) { } |
12 | static inline void xen_raw_console_write(const char *str) { } | ||
13 | static inline void xen_raw_printk(const char *fmt, ...) { } | ||
10 | #endif | 14 | #endif |
11 | 15 | ||
12 | void xen_raw_console_write(const char *str); | ||
13 | void xen_raw_printk(const char *fmt, ...); | ||
14 | |||
15 | #endif /* XEN_HVC_CONSOLE_H */ | 16 | #endif /* XEN_HVC_CONSOLE_H */ |
diff --git a/include/xen/interface/callback.h b/include/xen/interface/callback.h index 4aadcba31af..2ae3cd24326 100644 --- a/include/xen/interface/callback.h +++ b/include/xen/interface/callback.h | |||
@@ -82,9 +82,9 @@ | |||
82 | */ | 82 | */ |
83 | #define CALLBACKOP_register 0 | 83 | #define CALLBACKOP_register 0 |
84 | struct callback_register { | 84 | struct callback_register { |
85 | uint16_t type; | 85 | uint16_t type; |
86 | uint16_t flags; | 86 | uint16_t flags; |
87 | struct xen_callback address; | 87 | xen_callback_t address; |
88 | }; | 88 | }; |
89 | 89 | ||
90 | /* | 90 | /* |
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index a706d6a7896..883a21bba24 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h | |||
@@ -11,4 +11,7 @@ void xen_post_suspend(int suspend_cancelled); | |||
11 | void xen_mm_pin_all(void); | 11 | void xen_mm_pin_all(void); |
12 | void xen_mm_unpin_all(void); | 12 | void xen_mm_unpin_all(void); |
13 | 13 | ||
14 | void xen_timer_resume(void); | ||
15 | void xen_arch_resume(void); | ||
16 | |||
14 | #endif /* INCLUDE_XEN_OPS_H */ | 17 | #endif /* INCLUDE_XEN_OPS_H */ |