diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-12-09 01:14:38 -0500 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-12-09 01:14:38 -0500 |
commit | bcd6acd51f3d4d1ada201e9bc5c40a31d6d80c71 (patch) | |
tree | 2f6dffd2d3e4dd67355a224de7e7a960335a92fd /arch/x86/include | |
parent | 11c34c7deaeeebcee342cbc35e1bb2a6711b2431 (diff) | |
parent | 3ff6a468b45b5dfeb0e903e56f4eb27d34b2437c (diff) |
Merge commit 'origin/master' into next
Conflicts:
include/linux/kvm.h
Diffstat (limited to 'arch/x86/include')
62 files changed, 1202 insertions, 494 deletions
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 4a8e80cdcfa5..9f828f87ca35 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild | |||
@@ -10,6 +10,7 @@ header-y += ptrace-abi.h | |||
10 | header-y += sigcontext32.h | 10 | header-y += sigcontext32.h |
11 | header-y += ucontext.h | 11 | header-y += ucontext.h |
12 | header-y += processor-flags.h | 12 | header-y += processor-flags.h |
13 | header-y += hw_breakpoint.h | ||
13 | 14 | ||
14 | unifdef-y += e820.h | 15 | unifdef-y += e820.h |
15 | unifdef-y += ist.h | 16 | unifdef-y += ist.h |
diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h index bb70e397aa84..7a15588e45d4 100644 --- a/arch/x86/include/asm/a.out-core.h +++ b/arch/x86/include/asm/a.out-core.h | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <linux/user.h> | 18 | #include <linux/user.h> |
19 | #include <linux/elfcore.h> | 19 | #include <linux/elfcore.h> |
20 | #include <asm/debugreg.h> | ||
20 | 21 | ||
21 | /* | 22 | /* |
22 | * fill in the user structure for an a.out core dump | 23 | * fill in the user structure for an a.out core dump |
@@ -32,14 +33,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | |||
32 | >> PAGE_SHIFT; | 33 | >> PAGE_SHIFT; |
33 | dump->u_dsize -= dump->u_tsize; | 34 | dump->u_dsize -= dump->u_tsize; |
34 | dump->u_ssize = 0; | 35 | dump->u_ssize = 0; |
35 | dump->u_debugreg[0] = current->thread.debugreg0; | 36 | aout_dump_debugregs(dump); |
36 | dump->u_debugreg[1] = current->thread.debugreg1; | ||
37 | dump->u_debugreg[2] = current->thread.debugreg2; | ||
38 | dump->u_debugreg[3] = current->thread.debugreg3; | ||
39 | dump->u_debugreg[4] = 0; | ||
40 | dump->u_debugreg[5] = 0; | ||
41 | dump->u_debugreg[6] = current->thread.debugreg6; | ||
42 | dump->u_debugreg[7] = current->thread.debugreg7; | ||
43 | 37 | ||
44 | if (dump->start_stack < TASK_SIZE) | 38 | if (dump->start_stack < TASK_SIZE) |
45 | dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack)) | 39 | dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack)) |
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 4518dc500903..60d2b2db0bc5 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -118,7 +118,7 @@ extern void acpi_restore_state_mem(void); | |||
118 | extern unsigned long acpi_wakeup_address; | 118 | extern unsigned long acpi_wakeup_address; |
119 | 119 | ||
120 | /* early initialization routine */ | 120 | /* early initialization routine */ |
121 | extern void acpi_reserve_bootmem(void); | 121 | extern void acpi_reserve_wakeup_memory(void); |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * Check if the CPU can handle C2 and deeper | 124 | * Check if the CPU can handle C2 and deeper |
@@ -158,6 +158,7 @@ struct bootnode; | |||
158 | 158 | ||
159 | #ifdef CONFIG_ACPI_NUMA | 159 | #ifdef CONFIG_ACPI_NUMA |
160 | extern int acpi_numa; | 160 | extern int acpi_numa; |
161 | extern int acpi_get_nodes(struct bootnode *physnodes); | ||
161 | extern int acpi_scan_nodes(unsigned long start, unsigned long end); | 162 | extern int acpi_scan_nodes(unsigned long start, unsigned long end); |
162 | #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) | 163 | #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) |
163 | extern void acpi_fake_nodes(const struct bootnode *fake_nodes, | 164 | extern void acpi_fake_nodes(const struct bootnode *fake_nodes, |
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index e2077d343c33..b97f786a48d5 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h | |||
@@ -1,17 +1,13 @@ | |||
1 | #ifdef __ASSEMBLY__ | 1 | #ifdef __ASSEMBLY__ |
2 | 2 | ||
3 | #ifdef CONFIG_X86_32 | 3 | #include <asm/asm.h> |
4 | # define X86_ALIGN .long | ||
5 | #else | ||
6 | # define X86_ALIGN .quad | ||
7 | #endif | ||
8 | 4 | ||
9 | #ifdef CONFIG_SMP | 5 | #ifdef CONFIG_SMP |
10 | .macro LOCK_PREFIX | 6 | .macro LOCK_PREFIX |
11 | 1: lock | 7 | 1: lock |
12 | .section .smp_locks,"a" | 8 | .section .smp_locks,"a" |
13 | .align 4 | 9 | _ASM_ALIGN |
14 | X86_ALIGN 1b | 10 | _ASM_PTR 1b |
15 | .previous | 11 | .previous |
16 | .endm | 12 | .endm |
17 | #else | 13 | #else |
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index c240efc74e00..69b74a7b877f 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -84,6 +84,7 @@ static inline void alternatives_smp_switch(int smp) {} | |||
84 | " .byte " __stringify(feature) "\n" /* feature bit */ \ | 84 | " .byte " __stringify(feature) "\n" /* feature bit */ \ |
85 | " .byte 662b-661b\n" /* sourcelen */ \ | 85 | " .byte 662b-661b\n" /* sourcelen */ \ |
86 | " .byte 664f-663f\n" /* replacementlen */ \ | 86 | " .byte 664f-663f\n" /* replacementlen */ \ |
87 | " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ | ||
87 | ".previous\n" \ | 88 | ".previous\n" \ |
88 | ".section .altinstr_replacement, \"ax\"\n" \ | 89 | ".section .altinstr_replacement, \"ax\"\n" \ |
89 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | 90 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ |
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h index 4b180897e6b5..5af2982133b5 100644 --- a/arch/x86/include/asm/amd_iommu.h +++ b/arch/x86/include/asm/amd_iommu.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
@@ -23,19 +23,13 @@ | |||
23 | #include <linux/irqreturn.h> | 23 | #include <linux/irqreturn.h> |
24 | 24 | ||
25 | #ifdef CONFIG_AMD_IOMMU | 25 | #ifdef CONFIG_AMD_IOMMU |
26 | extern int amd_iommu_init(void); | 26 | |
27 | extern int amd_iommu_init_dma_ops(void); | ||
28 | extern int amd_iommu_init_passthrough(void); | ||
29 | extern void amd_iommu_detect(void); | 27 | extern void amd_iommu_detect(void); |
30 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); | 28 | |
31 | extern void amd_iommu_flush_all_domains(void); | ||
32 | extern void amd_iommu_flush_all_devices(void); | ||
33 | extern void amd_iommu_shutdown(void); | ||
34 | extern void amd_iommu_apply_erratum_63(u16 devid); | ||
35 | #else | 29 | #else |
36 | static inline int amd_iommu_init(void) { return -ENODEV; } | 30 | |
37 | static inline void amd_iommu_detect(void) { } | 31 | static inline void amd_iommu_detect(void) { } |
38 | static inline void amd_iommu_shutdown(void) { } | 32 | |
39 | #endif | 33 | #endif |
40 | 34 | ||
41 | #endif /* _ASM_X86_AMD_IOMMU_H */ | 35 | #endif /* _ASM_X86_AMD_IOMMU_H */ |
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h new file mode 100644 index 000000000000..84786fb9a23b --- /dev/null +++ b/arch/x86/include/asm/amd_iommu_proto.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009 Advanced Micro Devices, Inc. | ||
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #ifndef _ASM_X86_AMD_IOMMU_PROTO_H | ||
20 | #define _ASM_X86_AMD_IOMMU_PROTO_H | ||
21 | |||
22 | struct amd_iommu; | ||
23 | |||
24 | extern int amd_iommu_init_dma_ops(void); | ||
25 | extern int amd_iommu_init_passthrough(void); | ||
26 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); | ||
27 | extern void amd_iommu_flush_all_domains(void); | ||
28 | extern void amd_iommu_flush_all_devices(void); | ||
29 | extern void amd_iommu_apply_erratum_63(u16 devid); | ||
30 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); | ||
31 | |||
32 | #ifndef CONFIG_AMD_IOMMU_STATS | ||
33 | |||
34 | static inline void amd_iommu_stats_init(void) { } | ||
35 | |||
36 | #endif /* !CONFIG_AMD_IOMMU_STATS */ | ||
37 | |||
38 | #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ | ||
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 2a2cc7a78a81..ba19ad4c47d0 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
@@ -25,6 +25,11 @@ | |||
25 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Maximum number of IOMMUs supported | ||
29 | */ | ||
30 | #define MAX_IOMMUS 32 | ||
31 | |||
32 | /* | ||
28 | * some size calculation constants | 33 | * some size calculation constants |
29 | */ | 34 | */ |
30 | #define DEV_TABLE_ENTRY_SIZE 32 | 35 | #define DEV_TABLE_ENTRY_SIZE 32 |
@@ -206,6 +211,9 @@ extern bool amd_iommu_dump; | |||
206 | printk(KERN_INFO "AMD-Vi: " format, ## arg); \ | 211 | printk(KERN_INFO "AMD-Vi: " format, ## arg); \ |
207 | } while(0); | 212 | } while(0); |
208 | 213 | ||
214 | /* global flag if IOMMUs cache non-present entries */ | ||
215 | extern bool amd_iommu_np_cache; | ||
216 | |||
209 | /* | 217 | /* |
210 | * Make iterating over all IOMMUs easier | 218 | * Make iterating over all IOMMUs easier |
211 | */ | 219 | */ |
@@ -226,6 +234,8 @@ extern bool amd_iommu_dump; | |||
226 | * independent of their use. | 234 | * independent of their use. |
227 | */ | 235 | */ |
228 | struct protection_domain { | 236 | struct protection_domain { |
237 | struct list_head list; /* for list of all protection domains */ | ||
238 | struct list_head dev_list; /* List of all devices in this domain */ | ||
229 | spinlock_t lock; /* mostly used to lock the page table*/ | 239 | spinlock_t lock; /* mostly used to lock the page table*/ |
230 | u16 id; /* the domain id written to the device table */ | 240 | u16 id; /* the domain id written to the device table */ |
231 | int mode; /* paging mode (0-6 levels) */ | 241 | int mode; /* paging mode (0-6 levels) */ |
@@ -233,7 +243,20 @@ struct protection_domain { | |||
233 | unsigned long flags; /* flags to find out type of domain */ | 243 | unsigned long flags; /* flags to find out type of domain */ |
234 | bool updated; /* complete domain flush required */ | 244 | bool updated; /* complete domain flush required */ |
235 | unsigned dev_cnt; /* devices assigned to this domain */ | 245 | unsigned dev_cnt; /* devices assigned to this domain */ |
246 | unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ | ||
236 | void *priv; /* private data */ | 247 | void *priv; /* private data */ |
248 | |||
249 | }; | ||
250 | |||
251 | /* | ||
252 | * This struct contains device specific data for the IOMMU | ||
253 | */ | ||
254 | struct iommu_dev_data { | ||
255 | struct list_head list; /* For domain->dev_list */ | ||
256 | struct device *dev; /* Device this data belong to */ | ||
257 | struct device *alias; /* The Alias Device */ | ||
258 | struct protection_domain *domain; /* Domain the device is bound to */ | ||
259 | atomic_t bind; /* Domain attach reverent count */ | ||
237 | }; | 260 | }; |
238 | 261 | ||
239 | /* | 262 | /* |
@@ -291,6 +314,9 @@ struct dma_ops_domain { | |||
291 | struct amd_iommu { | 314 | struct amd_iommu { |
292 | struct list_head list; | 315 | struct list_head list; |
293 | 316 | ||
317 | /* Index within the IOMMU array */ | ||
318 | int index; | ||
319 | |||
294 | /* locks the accesses to the hardware */ | 320 | /* locks the accesses to the hardware */ |
295 | spinlock_t lock; | 321 | spinlock_t lock; |
296 | 322 | ||
@@ -357,6 +383,21 @@ struct amd_iommu { | |||
357 | extern struct list_head amd_iommu_list; | 383 | extern struct list_head amd_iommu_list; |
358 | 384 | ||
359 | /* | 385 | /* |
386 | * Array with pointers to each IOMMU struct | ||
387 | * The indices are referenced in the protection domains | ||
388 | */ | ||
389 | extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; | ||
390 | |||
391 | /* Number of IOMMUs present in the system */ | ||
392 | extern int amd_iommus_present; | ||
393 | |||
394 | /* | ||
395 | * Declarations for the global list of all protection domains | ||
396 | */ | ||
397 | extern spinlock_t amd_iommu_pd_lock; | ||
398 | extern struct list_head amd_iommu_pd_list; | ||
399 | |||
400 | /* | ||
360 | * Structure defining one entry in the device table | 401 | * Structure defining one entry in the device table |
361 | */ | 402 | */ |
362 | struct dev_table_entry { | 403 | struct dev_table_entry { |
@@ -416,15 +457,9 @@ extern unsigned amd_iommu_aperture_order; | |||
416 | /* largest PCI device id we expect translation requests for */ | 457 | /* largest PCI device id we expect translation requests for */ |
417 | extern u16 amd_iommu_last_bdf; | 458 | extern u16 amd_iommu_last_bdf; |
418 | 459 | ||
419 | /* data structures for protection domain handling */ | ||
420 | extern struct protection_domain **amd_iommu_pd_table; | ||
421 | |||
422 | /* allocation bitmap for domain ids */ | 460 | /* allocation bitmap for domain ids */ |
423 | extern unsigned long *amd_iommu_pd_alloc_bitmap; | 461 | extern unsigned long *amd_iommu_pd_alloc_bitmap; |
424 | 462 | ||
425 | /* will be 1 if device isolation is enabled */ | ||
426 | extern bool amd_iommu_isolate; | ||
427 | |||
428 | /* | 463 | /* |
429 | * If true, the addresses will be flushed on unmap time, not when | 464 | * If true, the addresses will be flushed on unmap time, not when |
430 | * they are reused | 465 | * they are reused |
@@ -462,11 +497,6 @@ struct __iommu_counter { | |||
462 | #define ADD_STATS_COUNTER(name, x) | 497 | #define ADD_STATS_COUNTER(name, x) |
463 | #define SUB_STATS_COUNTER(name, x) | 498 | #define SUB_STATS_COUNTER(name, x) |
464 | 499 | ||
465 | static inline void amd_iommu_stats_init(void) { } | ||
466 | |||
467 | #endif /* CONFIG_AMD_IOMMU_STATS */ | 500 | #endif /* CONFIG_AMD_IOMMU_STATS */ |
468 | 501 | ||
469 | /* some function prototypes */ | ||
470 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); | ||
471 | |||
472 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ | 502 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 474d80d3e6cc..b4ac2cdcb64f 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -297,20 +297,20 @@ struct apic { | |||
297 | int disable_esr; | 297 | int disable_esr; |
298 | 298 | ||
299 | int dest_logical; | 299 | int dest_logical; |
300 | unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); | 300 | unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); |
301 | unsigned long (*check_apicid_present)(int apicid); | 301 | unsigned long (*check_apicid_present)(int apicid); |
302 | 302 | ||
303 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); | 303 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); |
304 | void (*init_apic_ldr)(void); | 304 | void (*init_apic_ldr)(void); |
305 | 305 | ||
306 | physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); | 306 | void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); |
307 | 307 | ||
308 | void (*setup_apic_routing)(void); | 308 | void (*setup_apic_routing)(void); |
309 | int (*multi_timer_check)(int apic, int irq); | 309 | int (*multi_timer_check)(int apic, int irq); |
310 | int (*apicid_to_node)(int logical_apicid); | 310 | int (*apicid_to_node)(int logical_apicid); |
311 | int (*cpu_to_logical_apicid)(int cpu); | 311 | int (*cpu_to_logical_apicid)(int cpu); |
312 | int (*cpu_present_to_apicid)(int mps_cpu); | 312 | int (*cpu_present_to_apicid)(int mps_cpu); |
313 | physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); | 313 | void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); |
314 | void (*setup_portio_remap)(void); | 314 | void (*setup_portio_remap)(void); |
315 | int (*check_phys_apicid_present)(int phys_apicid); | 315 | int (*check_phys_apicid_present)(int phys_apicid); |
316 | void (*enable_apic_mode)(void); | 316 | void (*enable_apic_mode)(void); |
@@ -488,6 +488,8 @@ static inline unsigned int read_apic_id(void) | |||
488 | 488 | ||
489 | extern void default_setup_apic_routing(void); | 489 | extern void default_setup_apic_routing(void); |
490 | 490 | ||
491 | extern struct apic apic_noop; | ||
492 | |||
491 | #ifdef CONFIG_X86_32 | 493 | #ifdef CONFIG_X86_32 |
492 | 494 | ||
493 | extern struct apic apic_default; | 495 | extern struct apic apic_default; |
@@ -532,9 +534,9 @@ default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | |||
532 | return (unsigned int)(mask1 & mask2 & mask3); | 534 | return (unsigned int)(mask1 & mask2 & mask3); |
533 | } | 535 | } |
534 | 536 | ||
535 | static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid) | 537 | static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) |
536 | { | 538 | { |
537 | return physid_isset(apicid, bitmap); | 539 | return physid_isset(apicid, *map); |
538 | } | 540 | } |
539 | 541 | ||
540 | static inline unsigned long default_check_apicid_present(int bit) | 542 | static inline unsigned long default_check_apicid_present(int bit) |
@@ -542,9 +544,9 @@ static inline unsigned long default_check_apicid_present(int bit) | |||
542 | return physid_isset(bit, phys_cpu_present_map); | 544 | return physid_isset(bit, phys_cpu_present_map); |
543 | } | 545 | } |
544 | 546 | ||
545 | static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map) | 547 | static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) |
546 | { | 548 | { |
547 | return phys_map; | 549 | *retmap = *phys_map; |
548 | } | 550 | } |
549 | 551 | ||
550 | /* Mapping from cpu number to logical apicid */ | 552 | /* Mapping from cpu number to logical apicid */ |
@@ -583,11 +585,6 @@ extern int default_cpu_present_to_apicid(int mps_cpu); | |||
583 | extern int default_check_phys_apicid_present(int phys_apicid); | 585 | extern int default_check_phys_apicid_present(int phys_apicid); |
584 | #endif | 586 | #endif |
585 | 587 | ||
586 | static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) | ||
587 | { | ||
588 | return physid_mask_of_physid(phys_apicid); | ||
589 | } | ||
590 | |||
591 | #endif /* CONFIG_X86_LOCAL_APIC */ | 588 | #endif /* CONFIG_X86_LOCAL_APIC */ |
592 | 589 | ||
593 | #ifdef CONFIG_X86_32 | 590 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 3b62da926de9..7fe3b3060f08 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
@@ -11,6 +11,12 @@ | |||
11 | #define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000 | 11 | #define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000 |
12 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 | 12 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 |
13 | 13 | ||
14 | /* | ||
15 | * This is the IO-APIC register space as specified | ||
16 | * by Intel docs: | ||
17 | */ | ||
18 | #define IO_APIC_SLOT_SIZE 1024 | ||
19 | |||
14 | #define APIC_ID 0x20 | 20 | #define APIC_ID 0x20 |
15 | 21 | ||
16 | #define APIC_LVR 0x30 | 22 | #define APIC_LVR 0x30 |
diff --git a/arch/x86/include/asm/apicnum.h b/arch/x86/include/asm/apicnum.h deleted file mode 100644 index 82f613c607ce..000000000000 --- a/arch/x86/include/asm/apicnum.h +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | #ifndef _ASM_X86_APICNUM_H | ||
2 | #define _ASM_X86_APICNUM_H | ||
3 | |||
4 | /* define MAX_IO_APICS */ | ||
5 | #ifdef CONFIG_X86_32 | ||
6 | # define MAX_IO_APICS 64 | ||
7 | #else | ||
8 | # define MAX_IO_APICS 128 | ||
9 | # define MAX_LOCAL_APIC 32768 | ||
10 | #endif | ||
11 | |||
12 | #endif /* _ASM_X86_APICNUM_H */ | ||
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index d9cf1cd156d2..f654d1bb17fb 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h | |||
@@ -22,14 +22,14 @@ do { \ | |||
22 | ".popsection" \ | 22 | ".popsection" \ |
23 | : : "i" (__FILE__), "i" (__LINE__), \ | 23 | : : "i" (__FILE__), "i" (__LINE__), \ |
24 | "i" (sizeof(struct bug_entry))); \ | 24 | "i" (sizeof(struct bug_entry))); \ |
25 | for (;;) ; \ | 25 | unreachable(); \ |
26 | } while (0) | 26 | } while (0) |
27 | 27 | ||
28 | #else | 28 | #else |
29 | #define BUG() \ | 29 | #define BUG() \ |
30 | do { \ | 30 | do { \ |
31 | asm volatile("ud2"); \ | 31 | asm volatile("ud2"); \ |
32 | for (;;) ; \ | 32 | unreachable(); \ |
33 | } while (0) | 33 | } while (0) |
34 | #endif | 34 | #endif |
35 | 35 | ||
diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h index 549860d3be8f..2f9047cfaaca 100644 --- a/arch/x86/include/asm/cache.h +++ b/arch/x86/include/asm/cache.h | |||
@@ -9,12 +9,13 @@ | |||
9 | 9 | ||
10 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | 10 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) |
11 | 11 | ||
12 | #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT | ||
13 | #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) | ||
14 | |||
12 | #ifdef CONFIG_X86_VSMP | 15 | #ifdef CONFIG_X86_VSMP |
13 | /* vSMP Internode cacheline shift */ | ||
14 | #define INTERNODE_CACHE_SHIFT (12) | ||
15 | #ifdef CONFIG_SMP | 16 | #ifdef CONFIG_SMP |
16 | #define __cacheline_aligned_in_smp \ | 17 | #define __cacheline_aligned_in_smp \ |
17 | __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \ | 18 | __attribute__((__aligned__(INTERNODE_CACHE_BYTES))) \ |
18 | __page_aligned_data | 19 | __page_aligned_data |
19 | #endif | 20 | #endif |
20 | #endif | 21 | #endif |
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index b54f6afe7ec4..634c40a739a6 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h | |||
@@ -12,6 +12,7 @@ static inline void flush_cache_range(struct vm_area_struct *vma, | |||
12 | unsigned long start, unsigned long end) { } | 12 | unsigned long start, unsigned long end) { } |
13 | static inline void flush_cache_page(struct vm_area_struct *vma, | 13 | static inline void flush_cache_page(struct vm_area_struct *vma, |
14 | unsigned long vmaddr, unsigned long pfn) { } | 14 | unsigned long vmaddr, unsigned long pfn) { } |
15 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
15 | static inline void flush_dcache_page(struct page *page) { } | 16 | static inline void flush_dcache_page(struct page *page) { } |
16 | static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } | 17 | static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } |
17 | static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } | 18 | static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } |
@@ -176,6 +177,7 @@ void clflush_cache_range(void *addr, unsigned int size); | |||
176 | #ifdef CONFIG_DEBUG_RODATA | 177 | #ifdef CONFIG_DEBUG_RODATA |
177 | void mark_rodata_ro(void); | 178 | void mark_rodata_ro(void); |
178 | extern const int rodata_test_data; | 179 | extern const int rodata_test_data; |
180 | extern int kernel_set_to_readonly; | ||
179 | void set_kernel_text_rw(void); | 181 | void set_kernel_text_rw(void); |
180 | void set_kernel_text_ro(void); | 182 | void set_kernel_text_ro(void); |
181 | #else | 183 | #else |
diff --git a/arch/x86/include/asm/calgary.h b/arch/x86/include/asm/calgary.h index b03bedb62aa7..0918654305af 100644 --- a/arch/x86/include/asm/calgary.h +++ b/arch/x86/include/asm/calgary.h | |||
@@ -62,10 +62,8 @@ struct cal_chipset_ops { | |||
62 | extern int use_calgary; | 62 | extern int use_calgary; |
63 | 63 | ||
64 | #ifdef CONFIG_CALGARY_IOMMU | 64 | #ifdef CONFIG_CALGARY_IOMMU |
65 | extern int calgary_iommu_init(void); | ||
66 | extern void detect_calgary(void); | 65 | extern void detect_calgary(void); |
67 | #else | 66 | #else |
68 | static inline int calgary_iommu_init(void) { return 1; } | ||
69 | static inline void detect_calgary(void) { return; } | 67 | static inline void detect_calgary(void) { return; } |
70 | #endif | 68 | #endif |
71 | 69 | ||
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index ee1931be6593..ffb9bb6b6c37 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h | |||
@@ -8,14 +8,50 @@ | |||
8 | * you need to test for the feature in boot_cpu_data. | 8 | * you need to test for the feature in boot_cpu_data. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define xchg(ptr, v) \ | 11 | extern void __xchg_wrong_size(void); |
12 | ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) | 12 | |
13 | /* | ||
14 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | ||
15 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | ||
16 | * but generally the primitive is invalid, *ptr is output argument. --ANK | ||
17 | */ | ||
13 | 18 | ||
14 | struct __xchg_dummy { | 19 | struct __xchg_dummy { |
15 | unsigned long a[100]; | 20 | unsigned long a[100]; |
16 | }; | 21 | }; |
17 | #define __xg(x) ((struct __xchg_dummy *)(x)) | 22 | #define __xg(x) ((struct __xchg_dummy *)(x)) |
18 | 23 | ||
24 | #define __xchg(x, ptr, size) \ | ||
25 | ({ \ | ||
26 | __typeof(*(ptr)) __x = (x); \ | ||
27 | switch (size) { \ | ||
28 | case 1: \ | ||
29 | asm volatile("xchgb %b0,%1" \ | ||
30 | : "=q" (__x) \ | ||
31 | : "m" (*__xg(ptr)), "0" (__x) \ | ||
32 | : "memory"); \ | ||
33 | break; \ | ||
34 | case 2: \ | ||
35 | asm volatile("xchgw %w0,%1" \ | ||
36 | : "=r" (__x) \ | ||
37 | : "m" (*__xg(ptr)), "0" (__x) \ | ||
38 | : "memory"); \ | ||
39 | break; \ | ||
40 | case 4: \ | ||
41 | asm volatile("xchgl %0,%1" \ | ||
42 | : "=r" (__x) \ | ||
43 | : "m" (*__xg(ptr)), "0" (__x) \ | ||
44 | : "memory"); \ | ||
45 | break; \ | ||
46 | default: \ | ||
47 | __xchg_wrong_size(); \ | ||
48 | } \ | ||
49 | __x; \ | ||
50 | }) | ||
51 | |||
52 | #define xchg(ptr, v) \ | ||
53 | __xchg((v), (ptr), sizeof(*ptr)) | ||
54 | |||
19 | /* | 55 | /* |
20 | * The semantics of XCHGCMP8B are a bit strange, this is why | 56 | * The semantics of XCHGCMP8B are a bit strange, this is why |
21 | * there is a loop and the loading of %%eax and %%edx has to | 57 | * there is a loop and the loading of %%eax and %%edx has to |
@@ -71,57 +107,63 @@ static inline void __set_64bit_var(unsigned long long *ptr, | |||
71 | (unsigned int)((value) >> 32)) \ | 107 | (unsigned int)((value) >> 32)) \ |
72 | : __set_64bit(ptr, ll_low((value)), ll_high((value)))) | 108 | : __set_64bit(ptr, ll_low((value)), ll_high((value)))) |
73 | 109 | ||
74 | /* | 110 | extern void __cmpxchg_wrong_size(void); |
75 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | ||
76 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | ||
77 | * but generally the primitive is invalid, *ptr is output argument. --ANK | ||
78 | */ | ||
79 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | ||
80 | int size) | ||
81 | { | ||
82 | switch (size) { | ||
83 | case 1: | ||
84 | asm volatile("xchgb %b0,%1" | ||
85 | : "=q" (x) | ||
86 | : "m" (*__xg(ptr)), "0" (x) | ||
87 | : "memory"); | ||
88 | break; | ||
89 | case 2: | ||
90 | asm volatile("xchgw %w0,%1" | ||
91 | : "=r" (x) | ||
92 | : "m" (*__xg(ptr)), "0" (x) | ||
93 | : "memory"); | ||
94 | break; | ||
95 | case 4: | ||
96 | asm volatile("xchgl %0,%1" | ||
97 | : "=r" (x) | ||
98 | : "m" (*__xg(ptr)), "0" (x) | ||
99 | : "memory"); | ||
100 | break; | ||
101 | } | ||
102 | return x; | ||
103 | } | ||
104 | 111 | ||
105 | /* | 112 | /* |
106 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | 113 | * Atomic compare and exchange. Compare OLD with MEM, if identical, |
107 | * store NEW in MEM. Return the initial value in MEM. Success is | 114 | * store NEW in MEM. Return the initial value in MEM. Success is |
108 | * indicated by comparing RETURN with OLD. | 115 | * indicated by comparing RETURN with OLD. |
109 | */ | 116 | */ |
117 | #define __raw_cmpxchg(ptr, old, new, size, lock) \ | ||
118 | ({ \ | ||
119 | __typeof__(*(ptr)) __ret; \ | ||
120 | __typeof__(*(ptr)) __old = (old); \ | ||
121 | __typeof__(*(ptr)) __new = (new); \ | ||
122 | switch (size) { \ | ||
123 | case 1: \ | ||
124 | asm volatile(lock "cmpxchgb %b1,%2" \ | ||
125 | : "=a"(__ret) \ | ||
126 | : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
127 | : "memory"); \ | ||
128 | break; \ | ||
129 | case 2: \ | ||
130 | asm volatile(lock "cmpxchgw %w1,%2" \ | ||
131 | : "=a"(__ret) \ | ||
132 | : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
133 | : "memory"); \ | ||
134 | break; \ | ||
135 | case 4: \ | ||
136 | asm volatile(lock "cmpxchgl %1,%2" \ | ||
137 | : "=a"(__ret) \ | ||
138 | : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
139 | : "memory"); \ | ||
140 | break; \ | ||
141 | default: \ | ||
142 | __cmpxchg_wrong_size(); \ | ||
143 | } \ | ||
144 | __ret; \ | ||
145 | }) | ||
146 | |||
147 | #define __cmpxchg(ptr, old, new, size) \ | ||
148 | __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) | ||
149 | |||
150 | #define __sync_cmpxchg(ptr, old, new, size) \ | ||
151 | __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") | ||
152 | |||
153 | #define __cmpxchg_local(ptr, old, new, size) \ | ||
154 | __raw_cmpxchg((ptr), (old), (new), (size), "") | ||
110 | 155 | ||
111 | #ifdef CONFIG_X86_CMPXCHG | 156 | #ifdef CONFIG_X86_CMPXCHG |
112 | #define __HAVE_ARCH_CMPXCHG 1 | 157 | #define __HAVE_ARCH_CMPXCHG 1 |
113 | #define cmpxchg(ptr, o, n) \ | 158 | |
114 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | 159 | #define cmpxchg(ptr, old, new) \ |
115 | (unsigned long)(n), \ | 160 | __cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
116 | sizeof(*(ptr)))) | 161 | |
117 | #define sync_cmpxchg(ptr, o, n) \ | 162 | #define sync_cmpxchg(ptr, old, new) \ |
118 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ | 163 | __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
119 | (unsigned long)(n), \ | 164 | |
120 | sizeof(*(ptr)))) | 165 | #define cmpxchg_local(ptr, old, new) \ |
121 | #define cmpxchg_local(ptr, o, n) \ | 166 | __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) |
122 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | ||
123 | (unsigned long)(n), \ | ||
124 | sizeof(*(ptr)))) | ||
125 | #endif | 167 | #endif |
126 | 168 | ||
127 | #ifdef CONFIG_X86_CMPXCHG64 | 169 | #ifdef CONFIG_X86_CMPXCHG64 |
@@ -133,94 +175,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | |||
133 | (unsigned long long)(n))) | 175 | (unsigned long long)(n))) |
134 | #endif | 176 | #endif |
135 | 177 | ||
136 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | ||
137 | unsigned long new, int size) | ||
138 | { | ||
139 | unsigned long prev; | ||
140 | switch (size) { | ||
141 | case 1: | ||
142 | asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" | ||
143 | : "=a"(prev) | ||
144 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
145 | : "memory"); | ||
146 | return prev; | ||
147 | case 2: | ||
148 | asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" | ||
149 | : "=a"(prev) | ||
150 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
151 | : "memory"); | ||
152 | return prev; | ||
153 | case 4: | ||
154 | asm volatile(LOCK_PREFIX "cmpxchgl %1,%2" | ||
155 | : "=a"(prev) | ||
156 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
157 | : "memory"); | ||
158 | return prev; | ||
159 | } | ||
160 | return old; | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * Always use locked operations when touching memory shared with a | ||
165 | * hypervisor, since the system may be SMP even if the guest kernel | ||
166 | * isn't. | ||
167 | */ | ||
168 | static inline unsigned long __sync_cmpxchg(volatile void *ptr, | ||
169 | unsigned long old, | ||
170 | unsigned long new, int size) | ||
171 | { | ||
172 | unsigned long prev; | ||
173 | switch (size) { | ||
174 | case 1: | ||
175 | asm volatile("lock; cmpxchgb %b1,%2" | ||
176 | : "=a"(prev) | ||
177 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
178 | : "memory"); | ||
179 | return prev; | ||
180 | case 2: | ||
181 | asm volatile("lock; cmpxchgw %w1,%2" | ||
182 | : "=a"(prev) | ||
183 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
184 | : "memory"); | ||
185 | return prev; | ||
186 | case 4: | ||
187 | asm volatile("lock; cmpxchgl %1,%2" | ||
188 | : "=a"(prev) | ||
189 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
190 | : "memory"); | ||
191 | return prev; | ||
192 | } | ||
193 | return old; | ||
194 | } | ||
195 | |||
196 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | ||
197 | unsigned long old, | ||
198 | unsigned long new, int size) | ||
199 | { | ||
200 | unsigned long prev; | ||
201 | switch (size) { | ||
202 | case 1: | ||
203 | asm volatile("cmpxchgb %b1,%2" | ||
204 | : "=a"(prev) | ||
205 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
206 | : "memory"); | ||
207 | return prev; | ||
208 | case 2: | ||
209 | asm volatile("cmpxchgw %w1,%2" | ||
210 | : "=a"(prev) | ||
211 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
212 | : "memory"); | ||
213 | return prev; | ||
214 | case 4: | ||
215 | asm volatile("cmpxchgl %1,%2" | ||
216 | : "=a"(prev) | ||
217 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
218 | : "memory"); | ||
219 | return prev; | ||
220 | } | ||
221 | return old; | ||
222 | } | ||
223 | |||
224 | static inline unsigned long long __cmpxchg64(volatile void *ptr, | 178 | static inline unsigned long long __cmpxchg64(volatile void *ptr, |
225 | unsigned long long old, | 179 | unsigned long long old, |
226 | unsigned long long new) | 180 | unsigned long long new) |
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h index 52de72e0de8c..485ae415faec 100644 --- a/arch/x86/include/asm/cmpxchg_64.h +++ b/arch/x86/include/asm/cmpxchg_64.h | |||
@@ -3,9 +3,6 @@ | |||
3 | 3 | ||
4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ | 4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ |
5 | 5 | ||
6 | #define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \ | ||
7 | (ptr), sizeof(*(ptr)))) | ||
8 | |||
9 | #define __xg(x) ((volatile long *)(x)) | 6 | #define __xg(x) ((volatile long *)(x)) |
10 | 7 | ||
11 | static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) | 8 | static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) |
@@ -15,167 +12,118 @@ static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) | |||
15 | 12 | ||
16 | #define _set_64bit set_64bit | 13 | #define _set_64bit set_64bit |
17 | 14 | ||
15 | extern void __xchg_wrong_size(void); | ||
16 | extern void __cmpxchg_wrong_size(void); | ||
17 | |||
18 | /* | 18 | /* |
19 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | 19 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway |
20 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | 20 | * Note 2: xchg has side effect, so that attribute volatile is necessary, |
21 | * but generally the primitive is invalid, *ptr is output argument. --ANK | 21 | * but generally the primitive is invalid, *ptr is output argument. --ANK |
22 | */ | 22 | */ |
23 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | 23 | #define __xchg(x, ptr, size) \ |
24 | int size) | 24 | ({ \ |
25 | { | 25 | __typeof(*(ptr)) __x = (x); \ |
26 | switch (size) { | 26 | switch (size) { \ |
27 | case 1: | 27 | case 1: \ |
28 | asm volatile("xchgb %b0,%1" | 28 | asm volatile("xchgb %b0,%1" \ |
29 | : "=q" (x) | 29 | : "=q" (__x) \ |
30 | : "m" (*__xg(ptr)), "0" (x) | 30 | : "m" (*__xg(ptr)), "0" (__x) \ |
31 | : "memory"); | 31 | : "memory"); \ |
32 | break; | 32 | break; \ |
33 | case 2: | 33 | case 2: \ |
34 | asm volatile("xchgw %w0,%1" | 34 | asm volatile("xchgw %w0,%1" \ |
35 | : "=r" (x) | 35 | : "=r" (__x) \ |
36 | : "m" (*__xg(ptr)), "0" (x) | 36 | : "m" (*__xg(ptr)), "0" (__x) \ |
37 | : "memory"); | 37 | : "memory"); \ |
38 | break; | 38 | break; \ |
39 | case 4: | 39 | case 4: \ |
40 | asm volatile("xchgl %k0,%1" | 40 | asm volatile("xchgl %k0,%1" \ |
41 | : "=r" (x) | 41 | : "=r" (__x) \ |
42 | : "m" (*__xg(ptr)), "0" (x) | 42 | : "m" (*__xg(ptr)), "0" (__x) \ |
43 | : "memory"); | 43 | : "memory"); \ |
44 | break; | 44 | break; \ |
45 | case 8: | 45 | case 8: \ |
46 | asm volatile("xchgq %0,%1" | 46 | asm volatile("xchgq %0,%1" \ |
47 | : "=r" (x) | 47 | : "=r" (__x) \ |
48 | : "m" (*__xg(ptr)), "0" (x) | 48 | : "m" (*__xg(ptr)), "0" (__x) \ |
49 | : "memory"); | 49 | : "memory"); \ |
50 | break; | 50 | break; \ |
51 | } | 51 | default: \ |
52 | return x; | 52 | __xchg_wrong_size(); \ |
53 | } | 53 | } \ |
54 | __x; \ | ||
55 | }) | ||
56 | |||
57 | #define xchg(ptr, v) \ | ||
58 | __xchg((v), (ptr), sizeof(*ptr)) | ||
59 | |||
60 | #define __HAVE_ARCH_CMPXCHG 1 | ||
54 | 61 | ||
55 | /* | 62 | /* |
56 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | 63 | * Atomic compare and exchange. Compare OLD with MEM, if identical, |
57 | * store NEW in MEM. Return the initial value in MEM. Success is | 64 | * store NEW in MEM. Return the initial value in MEM. Success is |
58 | * indicated by comparing RETURN with OLD. | 65 | * indicated by comparing RETURN with OLD. |
59 | */ | 66 | */ |
67 | #define __raw_cmpxchg(ptr, old, new, size, lock) \ | ||
68 | ({ \ | ||
69 | __typeof__(*(ptr)) __ret; \ | ||
70 | __typeof__(*(ptr)) __old = (old); \ | ||
71 | __typeof__(*(ptr)) __new = (new); \ | ||
72 | switch (size) { \ | ||
73 | case 1: \ | ||
74 | asm volatile(lock "cmpxchgb %b1,%2" \ | ||
75 | : "=a"(__ret) \ | ||
76 | : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
77 | : "memory"); \ | ||
78 | break; \ | ||
79 | case 2: \ | ||
80 | asm volatile(lock "cmpxchgw %w1,%2" \ | ||
81 | : "=a"(__ret) \ | ||
82 | : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
83 | : "memory"); \ | ||
84 | break; \ | ||
85 | case 4: \ | ||
86 | asm volatile(lock "cmpxchgl %k1,%2" \ | ||
87 | : "=a"(__ret) \ | ||
88 | : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
89 | : "memory"); \ | ||
90 | break; \ | ||
91 | case 8: \ | ||
92 | asm volatile(lock "cmpxchgq %1,%2" \ | ||
93 | : "=a"(__ret) \ | ||
94 | : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
95 | : "memory"); \ | ||
96 | break; \ | ||
97 | default: \ | ||
98 | __cmpxchg_wrong_size(); \ | ||
99 | } \ | ||
100 | __ret; \ | ||
101 | }) | ||
60 | 102 | ||
61 | #define __HAVE_ARCH_CMPXCHG 1 | 103 | #define __cmpxchg(ptr, old, new, size) \ |
104 | __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) | ||
62 | 105 | ||
63 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | 106 | #define __sync_cmpxchg(ptr, old, new, size) \ |
64 | unsigned long new, int size) | 107 | __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") |
65 | { | ||
66 | unsigned long prev; | ||
67 | switch (size) { | ||
68 | case 1: | ||
69 | asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" | ||
70 | : "=a"(prev) | ||
71 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
72 | : "memory"); | ||
73 | return prev; | ||
74 | case 2: | ||
75 | asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" | ||
76 | : "=a"(prev) | ||
77 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
78 | : "memory"); | ||
79 | return prev; | ||
80 | case 4: | ||
81 | asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2" | ||
82 | : "=a"(prev) | ||
83 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
84 | : "memory"); | ||
85 | return prev; | ||
86 | case 8: | ||
87 | asm volatile(LOCK_PREFIX "cmpxchgq %1,%2" | ||
88 | : "=a"(prev) | ||
89 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
90 | : "memory"); | ||
91 | return prev; | ||
92 | } | ||
93 | return old; | ||
94 | } | ||
95 | 108 | ||
96 | /* | 109 | #define __cmpxchg_local(ptr, old, new, size) \ |
97 | * Always use locked operations when touching memory shared with a | 110 | __raw_cmpxchg((ptr), (old), (new), (size), "") |
98 | * hypervisor, since the system may be SMP even if the guest kernel | ||
99 | * isn't. | ||
100 | */ | ||
101 | static inline unsigned long __sync_cmpxchg(volatile void *ptr, | ||
102 | unsigned long old, | ||
103 | unsigned long new, int size) | ||
104 | { | ||
105 | unsigned long prev; | ||
106 | switch (size) { | ||
107 | case 1: | ||
108 | asm volatile("lock; cmpxchgb %b1,%2" | ||
109 | : "=a"(prev) | ||
110 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
111 | : "memory"); | ||
112 | return prev; | ||
113 | case 2: | ||
114 | asm volatile("lock; cmpxchgw %w1,%2" | ||
115 | : "=a"(prev) | ||
116 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
117 | : "memory"); | ||
118 | return prev; | ||
119 | case 4: | ||
120 | asm volatile("lock; cmpxchgl %1,%2" | ||
121 | : "=a"(prev) | ||
122 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
123 | : "memory"); | ||
124 | return prev; | ||
125 | } | ||
126 | return old; | ||
127 | } | ||
128 | 111 | ||
129 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | 112 | #define cmpxchg(ptr, old, new) \ |
130 | unsigned long old, | 113 | __cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
131 | unsigned long new, int size) | 114 | |
132 | { | 115 | #define sync_cmpxchg(ptr, old, new) \ |
133 | unsigned long prev; | 116 | __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
134 | switch (size) { | 117 | |
135 | case 1: | 118 | #define cmpxchg_local(ptr, old, new) \ |
136 | asm volatile("cmpxchgb %b1,%2" | 119 | __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) |
137 | : "=a"(prev) | ||
138 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
139 | : "memory"); | ||
140 | return prev; | ||
141 | case 2: | ||
142 | asm volatile("cmpxchgw %w1,%2" | ||
143 | : "=a"(prev) | ||
144 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
145 | : "memory"); | ||
146 | return prev; | ||
147 | case 4: | ||
148 | asm volatile("cmpxchgl %k1,%2" | ||
149 | : "=a"(prev) | ||
150 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
151 | : "memory"); | ||
152 | return prev; | ||
153 | case 8: | ||
154 | asm volatile("cmpxchgq %1,%2" | ||
155 | : "=a"(prev) | ||
156 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
157 | : "memory"); | ||
158 | return prev; | ||
159 | } | ||
160 | return old; | ||
161 | } | ||
162 | 120 | ||
163 | #define cmpxchg(ptr, o, n) \ | ||
164 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | ||
165 | (unsigned long)(n), sizeof(*(ptr)))) | ||
166 | #define cmpxchg64(ptr, o, n) \ | 121 | #define cmpxchg64(ptr, o, n) \ |
167 | ({ \ | 122 | ({ \ |
168 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | 123 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ |
169 | cmpxchg((ptr), (o), (n)); \ | 124 | cmpxchg((ptr), (o), (n)); \ |
170 | }) | 125 | }) |
171 | #define cmpxchg_local(ptr, o, n) \ | 126 | |
172 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | ||
173 | (unsigned long)(n), \ | ||
174 | sizeof(*(ptr)))) | ||
175 | #define sync_cmpxchg(ptr, o, n) \ | ||
176 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ | ||
177 | (unsigned long)(n), \ | ||
178 | sizeof(*(ptr)))) | ||
179 | #define cmpxchg64_local(ptr, o, n) \ | 127 | #define cmpxchg64_local(ptr, o, n) \ |
180 | ({ \ | 128 | ({ \ |
181 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | 129 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ |
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index 3ea6f37be9e2..8240f76b531e 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #define DR_TRAP1 (0x2) /* db1 */ | 18 | #define DR_TRAP1 (0x2) /* db1 */ |
19 | #define DR_TRAP2 (0x4) /* db2 */ | 19 | #define DR_TRAP2 (0x4) /* db2 */ |
20 | #define DR_TRAP3 (0x8) /* db3 */ | 20 | #define DR_TRAP3 (0x8) /* db3 */ |
21 | #define DR_TRAP_BITS (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3) | ||
21 | 22 | ||
22 | #define DR_STEP (0x4000) /* single-step */ | 23 | #define DR_STEP (0x4000) /* single-step */ |
23 | #define DR_SWITCH (0x8000) /* task switch */ | 24 | #define DR_SWITCH (0x8000) /* task switch */ |
@@ -49,6 +50,8 @@ | |||
49 | 50 | ||
50 | #define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */ | 51 | #define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */ |
51 | #define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */ | 52 | #define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */ |
53 | #define DR_LOCAL_ENABLE (0x1) /* Local enable for reg 0 */ | ||
54 | #define DR_GLOBAL_ENABLE (0x2) /* Global enable for reg 0 */ | ||
52 | #define DR_ENABLE_SIZE 2 /* 2 enable bits per register */ | 55 | #define DR_ENABLE_SIZE 2 /* 2 enable bits per register */ |
53 | 56 | ||
54 | #define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */ | 57 | #define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */ |
@@ -67,4 +70,34 @@ | |||
67 | #define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ | 70 | #define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ |
68 | #define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ | 71 | #define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ |
69 | 72 | ||
73 | /* | ||
74 | * HW breakpoint additions | ||
75 | */ | ||
76 | #ifdef __KERNEL__ | ||
77 | |||
78 | DECLARE_PER_CPU(unsigned long, cpu_dr7); | ||
79 | |||
80 | static inline void hw_breakpoint_disable(void) | ||
81 | { | ||
82 | /* Zero the control register for HW Breakpoint */ | ||
83 | set_debugreg(0UL, 7); | ||
84 | |||
85 | /* Zero-out the individual HW breakpoint address registers */ | ||
86 | set_debugreg(0UL, 0); | ||
87 | set_debugreg(0UL, 1); | ||
88 | set_debugreg(0UL, 2); | ||
89 | set_debugreg(0UL, 3); | ||
90 | } | ||
91 | |||
92 | static inline int hw_breakpoint_active(void) | ||
93 | { | ||
94 | return __get_cpu_var(cpu_dr7) & DR_GLOBAL_ENABLE_MASK; | ||
95 | } | ||
96 | |||
97 | extern void aout_dump_debugregs(struct user *dump); | ||
98 | |||
99 | extern void hw_breakpoint_restore(void); | ||
100 | |||
101 | #endif /* __KERNEL__ */ | ||
102 | |||
70 | #endif /* _ASM_X86_DEBUGREG_H */ | 103 | #endif /* _ASM_X86_DEBUGREG_H */ |
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index cee34e9ca45b..029f230ab637 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h | |||
@@ -8,7 +8,7 @@ struct dev_archdata { | |||
8 | #ifdef CONFIG_X86_64 | 8 | #ifdef CONFIG_X86_64 |
9 | struct dma_map_ops *dma_ops; | 9 | struct dma_map_ops *dma_ops; |
10 | #endif | 10 | #endif |
11 | #ifdef CONFIG_DMAR | 11 | #if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU) |
12 | void *iommu; /* hook for IOMMU specific extension */ | 12 | void *iommu; /* hook for IOMMU specific extension */ |
13 | #endif | 13 | #endif |
14 | }; | 14 | }; |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 6a25d5d42836..0f6c02f3b7d4 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -20,7 +20,8 @@ | |||
20 | # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) | 20 | # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | extern dma_addr_t bad_dma_address; | 23 | #define DMA_ERROR_CODE 0 |
24 | |||
24 | extern int iommu_merge; | 25 | extern int iommu_merge; |
25 | extern struct device x86_dma_fallback_dev; | 26 | extern struct device x86_dma_fallback_dev; |
26 | extern int panic_on_overflow; | 27 | extern int panic_on_overflow; |
@@ -48,7 +49,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
48 | if (ops->mapping_error) | 49 | if (ops->mapping_error) |
49 | return ops->mapping_error(dev, dma_addr); | 50 | return ops->mapping_error(dev, dma_addr); |
50 | 51 | ||
51 | return (dma_addr == bad_dma_address); | 52 | return (dma_addr == DMA_ERROR_CODE); |
52 | } | 53 | } |
53 | 54 | ||
54 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 55 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index 40b4e614fe71..761249e396fe 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h | |||
@@ -61,6 +61,12 @@ struct e820map { | |||
61 | struct e820entry map[E820_X_MAX]; | 61 | struct e820entry map[E820_X_MAX]; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | #define ISA_START_ADDRESS 0xa0000 | ||
65 | #define ISA_END_ADDRESS 0x100000 | ||
66 | |||
67 | #define BIOS_BEGIN 0x000a0000 | ||
68 | #define BIOS_END 0x00100000 | ||
69 | |||
64 | #ifdef __KERNEL__ | 70 | #ifdef __KERNEL__ |
65 | /* see comment in arch/x86/kernel/e820.c */ | 71 | /* see comment in arch/x86/kernel/e820.c */ |
66 | extern struct e820map e820; | 72 | extern struct e820map e820; |
@@ -126,15 +132,18 @@ extern void e820_reserve_resources(void); | |||
126 | extern void e820_reserve_resources_late(void); | 132 | extern void e820_reserve_resources_late(void); |
127 | extern void setup_memory_map(void); | 133 | extern void setup_memory_map(void); |
128 | extern char *default_machine_specific_memory_setup(void); | 134 | extern char *default_machine_specific_memory_setup(void); |
129 | #endif /* __KERNEL__ */ | ||
130 | #endif /* __ASSEMBLY__ */ | ||
131 | 135 | ||
132 | #define ISA_START_ADDRESS 0xa0000 | 136 | /* |
133 | #define ISA_END_ADDRESS 0x100000 | 137 | * Returns true iff the specified range [s,e) is completely contained inside |
134 | #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS) | 138 | * the ISA region. |
139 | */ | ||
140 | static inline bool is_ISA_range(u64 s, u64 e) | ||
141 | { | ||
142 | return s >= ISA_START_ADDRESS && e <= ISA_END_ADDRESS; | ||
143 | } | ||
135 | 144 | ||
136 | #define BIOS_BEGIN 0x000a0000 | 145 | #endif /* __KERNEL__ */ |
137 | #define BIOS_END 0x00100000 | 146 | #endif /* __ASSEMBLY__ */ |
138 | 147 | ||
139 | #ifdef __KERNEL__ | 148 | #ifdef __KERNEL__ |
140 | #include <linux/ioport.h> | 149 | #include <linux/ioport.h> |
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 456a304b8172..8a024babe5e6 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h | |||
@@ -157,19 +157,6 @@ do { \ | |||
157 | 157 | ||
158 | #define compat_elf_check_arch(x) elf_check_arch_ia32(x) | 158 | #define compat_elf_check_arch(x) elf_check_arch_ia32(x) |
159 | 159 | ||
160 | static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp) | ||
161 | { | ||
162 | loadsegment(fs, 0); | ||
163 | loadsegment(ds, __USER32_DS); | ||
164 | loadsegment(es, __USER32_DS); | ||
165 | load_gs_index(0); | ||
166 | regs->ip = ip; | ||
167 | regs->sp = sp; | ||
168 | regs->flags = X86_EFLAGS_IF; | ||
169 | regs->cs = __USER32_CS; | ||
170 | regs->ss = __USER32_DS; | ||
171 | } | ||
172 | |||
173 | static inline void elf_common_init(struct thread_struct *t, | 160 | static inline void elf_common_init(struct thread_struct *t, |
174 | struct pt_regs *regs, const u16 ds) | 161 | struct pt_regs *regs, const u16 ds) |
175 | { | 162 | { |
@@ -191,11 +178,8 @@ do { \ | |||
191 | #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ | 178 | #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ |
192 | elf_common_init(¤t->thread, regs, __USER_DS) | 179 | elf_common_init(¤t->thread, regs, __USER_DS) |
193 | 180 | ||
194 | #define compat_start_thread(regs, ip, sp) \ | 181 | void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp); |
195 | do { \ | 182 | #define compat_start_thread start_thread_ia32 |
196 | start_ia32_thread(regs, ip, sp); \ | ||
197 | set_fs(USER_DS); \ | ||
198 | } while (0) | ||
199 | 183 | ||
200 | #define COMPAT_SET_PERSONALITY(ex) \ | 184 | #define COMPAT_SET_PERSONALITY(ex) \ |
201 | do { \ | 185 | do { \ |
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index f5693c81a1db..8e8ec663a98f 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h | |||
@@ -34,7 +34,7 @@ BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7, | |||
34 | smp_invalidate_interrupt) | 34 | smp_invalidate_interrupt) |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | BUILD_INTERRUPT(generic_interrupt, GENERIC_INTERRUPT_VECTOR) | 37 | BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR) |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * every pentium local APIC has two 'local interrupts', with a | 40 | * every pentium local APIC has two 'local interrupts', with a |
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h index 6cfdafa409d8..4ac5b0f33fc1 100644 --- a/arch/x86/include/asm/gart.h +++ b/arch/x86/include/asm/gart.h | |||
@@ -35,8 +35,7 @@ extern int gart_iommu_aperture_allowed; | |||
35 | extern int gart_iommu_aperture_disabled; | 35 | extern int gart_iommu_aperture_disabled; |
36 | 36 | ||
37 | extern void early_gart_iommu_check(void); | 37 | extern void early_gart_iommu_check(void); |
38 | extern void gart_iommu_init(void); | 38 | extern int gart_iommu_init(void); |
39 | extern void gart_iommu_shutdown(void); | ||
40 | extern void __init gart_parse_options(char *); | 39 | extern void __init gart_parse_options(char *); |
41 | extern void gart_iommu_hole_init(void); | 40 | extern void gart_iommu_hole_init(void); |
42 | 41 | ||
@@ -48,12 +47,6 @@ extern void gart_iommu_hole_init(void); | |||
48 | static inline void early_gart_iommu_check(void) | 47 | static inline void early_gart_iommu_check(void) |
49 | { | 48 | { |
50 | } | 49 | } |
51 | static inline void gart_iommu_init(void) | ||
52 | { | ||
53 | } | ||
54 | static inline void gart_iommu_shutdown(void) | ||
55 | { | ||
56 | } | ||
57 | static inline void gart_parse_options(char *options) | 50 | static inline void gart_parse_options(char *options) |
58 | { | 51 | { |
59 | } | 52 | } |
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 82e3e8f01043..0f8576427cfe 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h | |||
@@ -12,7 +12,7 @@ typedef struct { | |||
12 | unsigned int apic_timer_irqs; /* arch dependent */ | 12 | unsigned int apic_timer_irqs; /* arch dependent */ |
13 | unsigned int irq_spurious_count; | 13 | unsigned int irq_spurious_count; |
14 | #endif | 14 | #endif |
15 | unsigned int generic_irqs; /* arch dependent */ | 15 | unsigned int x86_platform_ipis; /* arch dependent */ |
16 | unsigned int apic_perf_irqs; | 16 | unsigned int apic_perf_irqs; |
17 | unsigned int apic_pending_irqs; | 17 | unsigned int apic_pending_irqs; |
18 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
@@ -20,11 +20,11 @@ typedef struct { | |||
20 | unsigned int irq_call_count; | 20 | unsigned int irq_call_count; |
21 | unsigned int irq_tlb_count; | 21 | unsigned int irq_tlb_count; |
22 | #endif | 22 | #endif |
23 | #ifdef CONFIG_X86_MCE | 23 | #ifdef CONFIG_X86_THERMAL_VECTOR |
24 | unsigned int irq_thermal_count; | 24 | unsigned int irq_thermal_count; |
25 | # ifdef CONFIG_X86_MCE_THRESHOLD | 25 | #endif |
26 | #ifdef CONFIG_X86_MCE_THRESHOLD | ||
26 | unsigned int irq_threshold_count; | 27 | unsigned int irq_threshold_count; |
27 | # endif | ||
28 | #endif | 28 | #endif |
29 | } ____cacheline_aligned irq_cpustat_t; | 29 | } ____cacheline_aligned irq_cpustat_t; |
30 | 30 | ||
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h new file mode 100644 index 000000000000..0675a7c4c20e --- /dev/null +++ b/arch/x86/include/asm/hw_breakpoint.h | |||
@@ -0,0 +1,73 @@ | |||
1 | #ifndef _I386_HW_BREAKPOINT_H | ||
2 | #define _I386_HW_BREAKPOINT_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | #define __ARCH_HW_BREAKPOINT_H | ||
6 | |||
7 | /* | ||
8 | * The name should probably be something dealt in | ||
9 | * a higher level. While dealing with the user | ||
10 | * (display/resolving) | ||
11 | */ | ||
12 | struct arch_hw_breakpoint { | ||
13 | char *name; /* Contains name of the symbol to set bkpt */ | ||
14 | unsigned long address; | ||
15 | u8 len; | ||
16 | u8 type; | ||
17 | }; | ||
18 | |||
19 | #include <linux/kdebug.h> | ||
20 | #include <linux/percpu.h> | ||
21 | #include <linux/list.h> | ||
22 | |||
23 | /* Available HW breakpoint length encodings */ | ||
24 | #define X86_BREAKPOINT_LEN_1 0x40 | ||
25 | #define X86_BREAKPOINT_LEN_2 0x44 | ||
26 | #define X86_BREAKPOINT_LEN_4 0x4c | ||
27 | #define X86_BREAKPOINT_LEN_EXECUTE 0x40 | ||
28 | |||
29 | #ifdef CONFIG_X86_64 | ||
30 | #define X86_BREAKPOINT_LEN_8 0x48 | ||
31 | #endif | ||
32 | |||
33 | /* Available HW breakpoint type encodings */ | ||
34 | |||
35 | /* trigger on instruction execute */ | ||
36 | #define X86_BREAKPOINT_EXECUTE 0x80 | ||
37 | /* trigger on memory write */ | ||
38 | #define X86_BREAKPOINT_WRITE 0x81 | ||
39 | /* trigger on memory read or write */ | ||
40 | #define X86_BREAKPOINT_RW 0x83 | ||
41 | |||
42 | /* Total number of available HW breakpoint registers */ | ||
43 | #define HBP_NUM 4 | ||
44 | |||
45 | struct perf_event; | ||
46 | struct pmu; | ||
47 | |||
48 | extern int arch_check_va_in_userspace(unsigned long va, u8 hbp_len); | ||
49 | extern int arch_validate_hwbkpt_settings(struct perf_event *bp, | ||
50 | struct task_struct *tsk); | ||
51 | extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, | ||
52 | unsigned long val, void *data); | ||
53 | |||
54 | |||
55 | int arch_install_hw_breakpoint(struct perf_event *bp); | ||
56 | void arch_uninstall_hw_breakpoint(struct perf_event *bp); | ||
57 | void hw_breakpoint_pmu_read(struct perf_event *bp); | ||
58 | void hw_breakpoint_pmu_unthrottle(struct perf_event *bp); | ||
59 | |||
60 | extern void | ||
61 | arch_fill_perf_breakpoint(struct perf_event *bp); | ||
62 | |||
63 | unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type); | ||
64 | int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type); | ||
65 | |||
66 | extern int arch_bp_generic_fields(int x86_len, int x86_type, | ||
67 | int *gen_len, int *gen_type); | ||
68 | |||
69 | extern struct pmu perf_ops_bp; | ||
70 | |||
71 | #endif /* __KERNEL__ */ | ||
72 | #endif /* _I386_HW_BREAKPOINT_H */ | ||
73 | |||
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index ba180d93b08c..08c48a81841f 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -27,7 +27,7 @@ | |||
27 | 27 | ||
28 | /* Interrupt handlers registered during init_IRQ */ | 28 | /* Interrupt handlers registered during init_IRQ */ |
29 | extern void apic_timer_interrupt(void); | 29 | extern void apic_timer_interrupt(void); |
30 | extern void generic_interrupt(void); | 30 | extern void x86_platform_ipi(void); |
31 | extern void error_interrupt(void); | 31 | extern void error_interrupt(void); |
32 | extern void perf_pending_interrupt(void); | 32 | extern void perf_pending_interrupt(void); |
33 | 33 | ||
@@ -79,14 +79,32 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, | |||
79 | int ioapic, int ioapic_pin, | 79 | int ioapic, int ioapic_pin, |
80 | int trigger, int polarity) | 80 | int trigger, int polarity) |
81 | { | 81 | { |
82 | irq_attr->ioapic = ioapic; | 82 | irq_attr->ioapic = ioapic; |
83 | irq_attr->ioapic_pin = ioapic_pin; | 83 | irq_attr->ioapic_pin = ioapic_pin; |
84 | irq_attr->trigger = trigger; | 84 | irq_attr->trigger = trigger; |
85 | irq_attr->polarity = polarity; | 85 | irq_attr->polarity = polarity; |
86 | } | 86 | } |
87 | 87 | ||
88 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, | 88 | /* |
89 | struct io_apic_irq_attr *irq_attr); | 89 | * This is performance-critical, we want to do it O(1) |
90 | * | ||
91 | * Most irqs are mapped 1:1 with pins. | ||
92 | */ | ||
93 | struct irq_cfg { | ||
94 | struct irq_pin_list *irq_2_pin; | ||
95 | cpumask_var_t domain; | ||
96 | cpumask_var_t old_domain; | ||
97 | u8 vector; | ||
98 | u8 move_in_progress : 1; | ||
99 | }; | ||
100 | |||
101 | extern struct irq_cfg *irq_cfg(unsigned int); | ||
102 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); | ||
103 | extern void send_cleanup_vector(struct irq_cfg *); | ||
104 | |||
105 | struct irq_desc; | ||
106 | extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *); | ||
107 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); | ||
90 | extern void setup_ioapic_dest(void); | 108 | extern void setup_ioapic_dest(void); |
91 | 109 | ||
92 | extern void enable_IO_APIC(void); | 110 | extern void enable_IO_APIC(void); |
@@ -101,7 +119,7 @@ extern void eisa_set_level_irq(unsigned int irq); | |||
101 | /* SMP */ | 119 | /* SMP */ |
102 | extern void smp_apic_timer_interrupt(struct pt_regs *); | 120 | extern void smp_apic_timer_interrupt(struct pt_regs *); |
103 | extern void smp_spurious_interrupt(struct pt_regs *); | 121 | extern void smp_spurious_interrupt(struct pt_regs *); |
104 | extern void smp_generic_interrupt(struct pt_regs *); | 122 | extern void smp_x86_platform_ipi(struct pt_regs *); |
105 | extern void smp_error_interrupt(struct pt_regs *); | 123 | extern void smp_error_interrupt(struct pt_regs *); |
106 | #ifdef CONFIG_X86_IO_APIC | 124 | #ifdef CONFIG_X86_IO_APIC |
107 | extern asmlinkage void smp_irq_move_cleanup_interrupt(void); | 125 | extern asmlinkage void smp_irq_move_cleanup_interrupt(void); |
diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h new file mode 100644 index 000000000000..205b063e3e32 --- /dev/null +++ b/arch/x86/include/asm/inat.h | |||
@@ -0,0 +1,220 @@ | |||
1 | #ifndef _ASM_X86_INAT_H | ||
2 | #define _ASM_X86_INAT_H | ||
3 | /* | ||
4 | * x86 instruction attributes | ||
5 | * | ||
6 | * Written by Masami Hiramatsu <mhiramat@redhat.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
21 | * | ||
22 | */ | ||
23 | #include <asm/inat_types.h> | ||
24 | |||
25 | /* | ||
26 | * Internal bits. Don't use bitmasks directly, because these bits are | ||
27 | * unstable. You should use checking functions. | ||
28 | */ | ||
29 | |||
30 | #define INAT_OPCODE_TABLE_SIZE 256 | ||
31 | #define INAT_GROUP_TABLE_SIZE 8 | ||
32 | |||
33 | /* Legacy last prefixes */ | ||
34 | #define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */ | ||
35 | #define INAT_PFX_REPE 2 /* 0xF3 */ /* LPFX2 */ | ||
36 | #define INAT_PFX_REPNE 3 /* 0xF2 */ /* LPFX3 */ | ||
37 | /* Other Legacy prefixes */ | ||
38 | #define INAT_PFX_LOCK 4 /* 0xF0 */ | ||
39 | #define INAT_PFX_CS 5 /* 0x2E */ | ||
40 | #define INAT_PFX_DS 6 /* 0x3E */ | ||
41 | #define INAT_PFX_ES 7 /* 0x26 */ | ||
42 | #define INAT_PFX_FS 8 /* 0x64 */ | ||
43 | #define INAT_PFX_GS 9 /* 0x65 */ | ||
44 | #define INAT_PFX_SS 10 /* 0x36 */ | ||
45 | #define INAT_PFX_ADDRSZ 11 /* 0x67 */ | ||
46 | /* x86-64 REX prefix */ | ||
47 | #define INAT_PFX_REX 12 /* 0x4X */ | ||
48 | /* AVX VEX prefixes */ | ||
49 | #define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */ | ||
50 | #define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */ | ||
51 | |||
52 | #define INAT_LSTPFX_MAX 3 | ||
53 | #define INAT_LGCPFX_MAX 11 | ||
54 | |||
55 | /* Immediate size */ | ||
56 | #define INAT_IMM_BYTE 1 | ||
57 | #define INAT_IMM_WORD 2 | ||
58 | #define INAT_IMM_DWORD 3 | ||
59 | #define INAT_IMM_QWORD 4 | ||
60 | #define INAT_IMM_PTR 5 | ||
61 | #define INAT_IMM_VWORD32 6 | ||
62 | #define INAT_IMM_VWORD 7 | ||
63 | |||
64 | /* Legacy prefix */ | ||
65 | #define INAT_PFX_OFFS 0 | ||
66 | #define INAT_PFX_BITS 4 | ||
67 | #define INAT_PFX_MAX ((1 << INAT_PFX_BITS) - 1) | ||
68 | #define INAT_PFX_MASK (INAT_PFX_MAX << INAT_PFX_OFFS) | ||
69 | /* Escape opcodes */ | ||
70 | #define INAT_ESC_OFFS (INAT_PFX_OFFS + INAT_PFX_BITS) | ||
71 | #define INAT_ESC_BITS 2 | ||
72 | #define INAT_ESC_MAX ((1 << INAT_ESC_BITS) - 1) | ||
73 | #define INAT_ESC_MASK (INAT_ESC_MAX << INAT_ESC_OFFS) | ||
74 | /* Group opcodes (1-16) */ | ||
75 | #define INAT_GRP_OFFS (INAT_ESC_OFFS + INAT_ESC_BITS) | ||
76 | #define INAT_GRP_BITS 5 | ||
77 | #define INAT_GRP_MAX ((1 << INAT_GRP_BITS) - 1) | ||
78 | #define INAT_GRP_MASK (INAT_GRP_MAX << INAT_GRP_OFFS) | ||
79 | /* Immediates */ | ||
80 | #define INAT_IMM_OFFS (INAT_GRP_OFFS + INAT_GRP_BITS) | ||
81 | #define INAT_IMM_BITS 3 | ||
82 | #define INAT_IMM_MASK (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS) | ||
83 | /* Flags */ | ||
84 | #define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS) | ||
85 | #define INAT_MODRM (1 << (INAT_FLAG_OFFS)) | ||
86 | #define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 1)) | ||
87 | #define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 2)) | ||
88 | #define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 3)) | ||
89 | #define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4)) | ||
90 | #define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5)) | ||
91 | #define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6)) | ||
92 | /* Attribute making macros for attribute tables */ | ||
93 | #define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS) | ||
94 | #define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS) | ||
95 | #define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM) | ||
96 | #define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS) | ||
97 | |||
98 | /* Attribute search APIs */ | ||
99 | extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); | ||
100 | extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, | ||
101 | insn_byte_t last_pfx, | ||
102 | insn_attr_t esc_attr); | ||
103 | extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm, | ||
104 | insn_byte_t last_pfx, | ||
105 | insn_attr_t esc_attr); | ||
106 | extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, | ||
107 | insn_byte_t vex_m, | ||
108 | insn_byte_t vex_pp); | ||
109 | |||
110 | /* Attribute checking functions */ | ||
111 | static inline int inat_is_legacy_prefix(insn_attr_t attr) | ||
112 | { | ||
113 | attr &= INAT_PFX_MASK; | ||
114 | return attr && attr <= INAT_LGCPFX_MAX; | ||
115 | } | ||
116 | |||
117 | static inline int inat_is_address_size_prefix(insn_attr_t attr) | ||
118 | { | ||
119 | return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ; | ||
120 | } | ||
121 | |||
122 | static inline int inat_is_operand_size_prefix(insn_attr_t attr) | ||
123 | { | ||
124 | return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ; | ||
125 | } | ||
126 | |||
127 | static inline int inat_is_rex_prefix(insn_attr_t attr) | ||
128 | { | ||
129 | return (attr & INAT_PFX_MASK) == INAT_PFX_REX; | ||
130 | } | ||
131 | |||
132 | static inline int inat_last_prefix_id(insn_attr_t attr) | ||
133 | { | ||
134 | if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX) | ||
135 | return 0; | ||
136 | else | ||
137 | return attr & INAT_PFX_MASK; | ||
138 | } | ||
139 | |||
140 | static inline int inat_is_vex_prefix(insn_attr_t attr) | ||
141 | { | ||
142 | attr &= INAT_PFX_MASK; | ||
143 | return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3; | ||
144 | } | ||
145 | |||
146 | static inline int inat_is_vex3_prefix(insn_attr_t attr) | ||
147 | { | ||
148 | return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3; | ||
149 | } | ||
150 | |||
151 | static inline int inat_is_escape(insn_attr_t attr) | ||
152 | { | ||
153 | return attr & INAT_ESC_MASK; | ||
154 | } | ||
155 | |||
156 | static inline int inat_escape_id(insn_attr_t attr) | ||
157 | { | ||
158 | return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS; | ||
159 | } | ||
160 | |||
161 | static inline int inat_is_group(insn_attr_t attr) | ||
162 | { | ||
163 | return attr & INAT_GRP_MASK; | ||
164 | } | ||
165 | |||
166 | static inline int inat_group_id(insn_attr_t attr) | ||
167 | { | ||
168 | return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS; | ||
169 | } | ||
170 | |||
171 | static inline int inat_group_common_attribute(insn_attr_t attr) | ||
172 | { | ||
173 | return attr & ~INAT_GRP_MASK; | ||
174 | } | ||
175 | |||
176 | static inline int inat_has_immediate(insn_attr_t attr) | ||
177 | { | ||
178 | return attr & INAT_IMM_MASK; | ||
179 | } | ||
180 | |||
181 | static inline int inat_immediate_size(insn_attr_t attr) | ||
182 | { | ||
183 | return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS; | ||
184 | } | ||
185 | |||
186 | static inline int inat_has_modrm(insn_attr_t attr) | ||
187 | { | ||
188 | return attr & INAT_MODRM; | ||
189 | } | ||
190 | |||
191 | static inline int inat_is_force64(insn_attr_t attr) | ||
192 | { | ||
193 | return attr & INAT_FORCE64; | ||
194 | } | ||
195 | |||
196 | static inline int inat_has_second_immediate(insn_attr_t attr) | ||
197 | { | ||
198 | return attr & INAT_SCNDIMM; | ||
199 | } | ||
200 | |||
201 | static inline int inat_has_moffset(insn_attr_t attr) | ||
202 | { | ||
203 | return attr & INAT_MOFFSET; | ||
204 | } | ||
205 | |||
206 | static inline int inat_has_variant(insn_attr_t attr) | ||
207 | { | ||
208 | return attr & INAT_VARIANT; | ||
209 | } | ||
210 | |||
211 | static inline int inat_accept_vex(insn_attr_t attr) | ||
212 | { | ||
213 | return attr & INAT_VEXOK; | ||
214 | } | ||
215 | |||
216 | static inline int inat_must_vex(insn_attr_t attr) | ||
217 | { | ||
218 | return attr & INAT_VEXONLY; | ||
219 | } | ||
220 | #endif | ||
diff --git a/arch/x86/include/asm/inat_types.h b/arch/x86/include/asm/inat_types.h new file mode 100644 index 000000000000..cb3c20ce39cf --- /dev/null +++ b/arch/x86/include/asm/inat_types.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef _ASM_X86_INAT_TYPES_H | ||
2 | #define _ASM_X86_INAT_TYPES_H | ||
3 | /* | ||
4 | * x86 instruction attributes | ||
5 | * | ||
6 | * Written by Masami Hiramatsu <mhiramat@redhat.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | /* Instruction attributes */ | ||
25 | typedef unsigned int insn_attr_t; | ||
26 | typedef unsigned char insn_byte_t; | ||
27 | typedef signed int insn_value_t; | ||
28 | |||
29 | #endif | ||
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h new file mode 100644 index 000000000000..96c2e0ad04ca --- /dev/null +++ b/arch/x86/include/asm/insn.h | |||
@@ -0,0 +1,184 @@ | |||
1 | #ifndef _ASM_X86_INSN_H | ||
2 | #define _ASM_X86_INSN_H | ||
3 | /* | ||
4 | * x86 instruction analysis | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | * | ||
20 | * Copyright (C) IBM Corporation, 2009 | ||
21 | */ | ||
22 | |||
23 | /* insn_attr_t is defined in inat.h */ | ||
24 | #include <asm/inat.h> | ||
25 | |||
26 | struct insn_field { | ||
27 | union { | ||
28 | insn_value_t value; | ||
29 | insn_byte_t bytes[4]; | ||
30 | }; | ||
31 | /* !0 if we've run insn_get_xxx() for this field */ | ||
32 | unsigned char got; | ||
33 | unsigned char nbytes; | ||
34 | }; | ||
35 | |||
36 | struct insn { | ||
37 | struct insn_field prefixes; /* | ||
38 | * Prefixes | ||
39 | * prefixes.bytes[3]: last prefix | ||
40 | */ | ||
41 | struct insn_field rex_prefix; /* REX prefix */ | ||
42 | struct insn_field vex_prefix; /* VEX prefix */ | ||
43 | struct insn_field opcode; /* | ||
44 | * opcode.bytes[0]: opcode1 | ||
45 | * opcode.bytes[1]: opcode2 | ||
46 | * opcode.bytes[2]: opcode3 | ||
47 | */ | ||
48 | struct insn_field modrm; | ||
49 | struct insn_field sib; | ||
50 | struct insn_field displacement; | ||
51 | union { | ||
52 | struct insn_field immediate; | ||
53 | struct insn_field moffset1; /* for 64bit MOV */ | ||
54 | struct insn_field immediate1; /* for 64bit imm or off16/32 */ | ||
55 | }; | ||
56 | union { | ||
57 | struct insn_field moffset2; /* for 64bit MOV */ | ||
58 | struct insn_field immediate2; /* for 64bit imm or seg16 */ | ||
59 | }; | ||
60 | |||
61 | insn_attr_t attr; | ||
62 | unsigned char opnd_bytes; | ||
63 | unsigned char addr_bytes; | ||
64 | unsigned char length; | ||
65 | unsigned char x86_64; | ||
66 | |||
67 | const insn_byte_t *kaddr; /* kernel address of insn to analyze */ | ||
68 | const insn_byte_t *next_byte; | ||
69 | }; | ||
70 | |||
71 | #define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6) | ||
72 | #define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3) | ||
73 | #define X86_MODRM_RM(modrm) ((modrm) & 0x07) | ||
74 | |||
75 | #define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6) | ||
76 | #define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3) | ||
77 | #define X86_SIB_BASE(sib) ((sib) & 0x07) | ||
78 | |||
79 | #define X86_REX_W(rex) ((rex) & 8) | ||
80 | #define X86_REX_R(rex) ((rex) & 4) | ||
81 | #define X86_REX_X(rex) ((rex) & 2) | ||
82 | #define X86_REX_B(rex) ((rex) & 1) | ||
83 | |||
84 | /* VEX bit flags */ | ||
85 | #define X86_VEX_W(vex) ((vex) & 0x80) /* VEX3 Byte2 */ | ||
86 | #define X86_VEX_R(vex) ((vex) & 0x80) /* VEX2/3 Byte1 */ | ||
87 | #define X86_VEX_X(vex) ((vex) & 0x40) /* VEX3 Byte1 */ | ||
88 | #define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */ | ||
89 | #define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */ | ||
90 | /* VEX bit fields */ | ||
91 | #define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */ | ||
92 | #define X86_VEX2_M 1 /* VEX2.M always 1 */ | ||
93 | #define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */ | ||
94 | #define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */ | ||
95 | #define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */ | ||
96 | |||
97 | /* The last prefix is needed for two-byte and three-byte opcodes */ | ||
98 | static inline insn_byte_t insn_last_prefix(struct insn *insn) | ||
99 | { | ||
100 | return insn->prefixes.bytes[3]; | ||
101 | } | ||
102 | |||
103 | extern void insn_init(struct insn *insn, const void *kaddr, int x86_64); | ||
104 | extern void insn_get_prefixes(struct insn *insn); | ||
105 | extern void insn_get_opcode(struct insn *insn); | ||
106 | extern void insn_get_modrm(struct insn *insn); | ||
107 | extern void insn_get_sib(struct insn *insn); | ||
108 | extern void insn_get_displacement(struct insn *insn); | ||
109 | extern void insn_get_immediate(struct insn *insn); | ||
110 | extern void insn_get_length(struct insn *insn); | ||
111 | |||
112 | /* Attribute will be determined after getting ModRM (for opcode groups) */ | ||
113 | static inline void insn_get_attribute(struct insn *insn) | ||
114 | { | ||
115 | insn_get_modrm(insn); | ||
116 | } | ||
117 | |||
118 | /* Instruction uses RIP-relative addressing */ | ||
119 | extern int insn_rip_relative(struct insn *insn); | ||
120 | |||
121 | /* Init insn for kernel text */ | ||
122 | static inline void kernel_insn_init(struct insn *insn, const void *kaddr) | ||
123 | { | ||
124 | #ifdef CONFIG_X86_64 | ||
125 | insn_init(insn, kaddr, 1); | ||
126 | #else /* CONFIG_X86_32 */ | ||
127 | insn_init(insn, kaddr, 0); | ||
128 | #endif | ||
129 | } | ||
130 | |||
131 | static inline int insn_is_avx(struct insn *insn) | ||
132 | { | ||
133 | if (!insn->prefixes.got) | ||
134 | insn_get_prefixes(insn); | ||
135 | return (insn->vex_prefix.value != 0); | ||
136 | } | ||
137 | |||
138 | static inline insn_byte_t insn_vex_m_bits(struct insn *insn) | ||
139 | { | ||
140 | if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ | ||
141 | return X86_VEX2_M; | ||
142 | else | ||
143 | return X86_VEX3_M(insn->vex_prefix.bytes[1]); | ||
144 | } | ||
145 | |||
146 | static inline insn_byte_t insn_vex_p_bits(struct insn *insn) | ||
147 | { | ||
148 | if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ | ||
149 | return X86_VEX_P(insn->vex_prefix.bytes[1]); | ||
150 | else | ||
151 | return X86_VEX_P(insn->vex_prefix.bytes[2]); | ||
152 | } | ||
153 | |||
154 | /* Offset of each field from kaddr */ | ||
155 | static inline int insn_offset_rex_prefix(struct insn *insn) | ||
156 | { | ||
157 | return insn->prefixes.nbytes; | ||
158 | } | ||
159 | static inline int insn_offset_vex_prefix(struct insn *insn) | ||
160 | { | ||
161 | return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes; | ||
162 | } | ||
163 | static inline int insn_offset_opcode(struct insn *insn) | ||
164 | { | ||
165 | return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes; | ||
166 | } | ||
167 | static inline int insn_offset_modrm(struct insn *insn) | ||
168 | { | ||
169 | return insn_offset_opcode(insn) + insn->opcode.nbytes; | ||
170 | } | ||
171 | static inline int insn_offset_sib(struct insn *insn) | ||
172 | { | ||
173 | return insn_offset_modrm(insn) + insn->modrm.nbytes; | ||
174 | } | ||
175 | static inline int insn_offset_displacement(struct insn *insn) | ||
176 | { | ||
177 | return insn_offset_sib(insn) + insn->sib.nbytes; | ||
178 | } | ||
179 | static inline int insn_offset_immediate(struct insn *insn) | ||
180 | { | ||
181 | return insn_offset_displacement(insn) + insn->displacement.nbytes; | ||
182 | } | ||
183 | |||
184 | #endif /* _ASM_X86_INSN_H */ | ||
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index fd6d21bbee6c..345c99cef152 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _ASM_X86_IOMMU_H | 1 | #ifndef _ASM_X86_IOMMU_H |
2 | #define _ASM_X86_IOMMU_H | 2 | #define _ASM_X86_IOMMU_H |
3 | 3 | ||
4 | extern void pci_iommu_shutdown(void); | ||
5 | extern void no_iommu_init(void); | ||
6 | extern struct dma_map_ops nommu_dma_ops; | 4 | extern struct dma_map_ops nommu_dma_ops; |
7 | extern int force_iommu, no_iommu; | 5 | extern int force_iommu, no_iommu; |
8 | extern int iommu_detected; | 6 | extern int iommu_detected; |
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index ddda6cbed6f4..5458380b6ef8 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
@@ -34,9 +34,10 @@ static inline int irq_canonicalize(int irq) | |||
34 | #ifdef CONFIG_HOTPLUG_CPU | 34 | #ifdef CONFIG_HOTPLUG_CPU |
35 | #include <linux/cpumask.h> | 35 | #include <linux/cpumask.h> |
36 | extern void fixup_irqs(void); | 36 | extern void fixup_irqs(void); |
37 | extern void irq_force_complete_move(int); | ||
37 | #endif | 38 | #endif |
38 | 39 | ||
39 | extern void (*generic_interrupt_extension)(void); | 40 | extern void (*x86_platform_ipi_callback)(void); |
40 | extern void native_init_IRQ(void); | 41 | extern void native_init_IRQ(void); |
41 | extern bool handle_irq(unsigned irq, struct pt_regs *regs); | 42 | extern bool handle_irq(unsigned irq, struct pt_regs *regs); |
42 | 43 | ||
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 5b21f0ec3df2..6a635bd39867 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -106,7 +106,7 @@ | |||
106 | /* | 106 | /* |
107 | * Generic system vector for platform specific use | 107 | * Generic system vector for platform specific use |
108 | */ | 108 | */ |
109 | #define GENERIC_INTERRUPT_VECTOR 0xed | 109 | #define X86_PLATFORM_IPI_VECTOR 0xed |
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Performance monitoring pending work vector: | 112 | * Performance monitoring pending work vector: |
diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h index c2d1f3b58e5f..f70e60071fe8 100644 --- a/arch/x86/include/asm/k8.h +++ b/arch/x86/include/asm/k8.h | |||
@@ -4,13 +4,16 @@ | |||
4 | #include <linux/pci.h> | 4 | #include <linux/pci.h> |
5 | 5 | ||
6 | extern struct pci_device_id k8_nb_ids[]; | 6 | extern struct pci_device_id k8_nb_ids[]; |
7 | struct bootnode; | ||
7 | 8 | ||
8 | extern int early_is_k8_nb(u32 value); | 9 | extern int early_is_k8_nb(u32 value); |
9 | extern struct pci_dev **k8_northbridges; | 10 | extern struct pci_dev **k8_northbridges; |
10 | extern int num_k8_northbridges; | 11 | extern int num_k8_northbridges; |
11 | extern int cache_k8_northbridges(void); | 12 | extern int cache_k8_northbridges(void); |
12 | extern void k8_flush_garts(void); | 13 | extern void k8_flush_garts(void); |
13 | extern int k8_scan_nodes(unsigned long start, unsigned long end); | 14 | extern int k8_get_nodes(struct bootnode *nodes); |
15 | extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); | ||
16 | extern int k8_scan_nodes(void); | ||
14 | 17 | ||
15 | #ifdef CONFIG_K8_NB | 18 | #ifdef CONFIG_K8_NB |
16 | static inline struct pci_dev *node_to_k8_nb_misc(int node) | 19 | static inline struct pci_dev *node_to_k8_nb_misc(int node) |
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 4a5fe914dc59..950df434763f 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h | |||
@@ -19,6 +19,8 @@ | |||
19 | #define __KVM_HAVE_MSIX | 19 | #define __KVM_HAVE_MSIX |
20 | #define __KVM_HAVE_MCE | 20 | #define __KVM_HAVE_MCE |
21 | #define __KVM_HAVE_PIT_STATE2 | 21 | #define __KVM_HAVE_PIT_STATE2 |
22 | #define __KVM_HAVE_XEN_HVM | ||
23 | #define __KVM_HAVE_VCPU_EVENTS | ||
22 | 24 | ||
23 | /* Architectural interrupt line count. */ | 25 | /* Architectural interrupt line count. */ |
24 | #define KVM_NR_INTERRUPTS 256 | 26 | #define KVM_NR_INTERRUPTS 256 |
@@ -79,6 +81,7 @@ struct kvm_ioapic_state { | |||
79 | #define KVM_IRQCHIP_PIC_MASTER 0 | 81 | #define KVM_IRQCHIP_PIC_MASTER 0 |
80 | #define KVM_IRQCHIP_PIC_SLAVE 1 | 82 | #define KVM_IRQCHIP_PIC_SLAVE 1 |
81 | #define KVM_IRQCHIP_IOAPIC 2 | 83 | #define KVM_IRQCHIP_IOAPIC 2 |
84 | #define KVM_NR_IRQCHIPS 3 | ||
82 | 85 | ||
83 | /* for KVM_GET_REGS and KVM_SET_REGS */ | 86 | /* for KVM_GET_REGS and KVM_SET_REGS */ |
84 | struct kvm_regs { | 87 | struct kvm_regs { |
@@ -250,4 +253,31 @@ struct kvm_reinject_control { | |||
250 | __u8 pit_reinject; | 253 | __u8 pit_reinject; |
251 | __u8 reserved[31]; | 254 | __u8 reserved[31]; |
252 | }; | 255 | }; |
256 | |||
257 | /* for KVM_GET/SET_VCPU_EVENTS */ | ||
258 | struct kvm_vcpu_events { | ||
259 | struct { | ||
260 | __u8 injected; | ||
261 | __u8 nr; | ||
262 | __u8 has_error_code; | ||
263 | __u8 pad; | ||
264 | __u32 error_code; | ||
265 | } exception; | ||
266 | struct { | ||
267 | __u8 injected; | ||
268 | __u8 nr; | ||
269 | __u8 soft; | ||
270 | __u8 pad; | ||
271 | } interrupt; | ||
272 | struct { | ||
273 | __u8 injected; | ||
274 | __u8 pending; | ||
275 | __u8 masked; | ||
276 | __u8 pad; | ||
277 | } nmi; | ||
278 | __u32 sipi_vector; | ||
279 | __u32 flags; | ||
280 | __u32 reserved[10]; | ||
281 | }; | ||
282 | |||
253 | #endif /* _ASM_X86_KVM_H */ | 283 | #endif /* _ASM_X86_KVM_H */ |
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index b7ed2c423116..7c18e1230f54 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -129,7 +129,7 @@ struct decode_cache { | |||
129 | u8 seg_override; | 129 | u8 seg_override; |
130 | unsigned int d; | 130 | unsigned int d; |
131 | unsigned long regs[NR_VCPU_REGS]; | 131 | unsigned long regs[NR_VCPU_REGS]; |
132 | unsigned long eip; | 132 | unsigned long eip, eip_orig; |
133 | /* modrm */ | 133 | /* modrm */ |
134 | u8 modrm; | 134 | u8 modrm; |
135 | u8 modrm_mod; | 135 | u8 modrm_mod; |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d83892226f73..4f865e8b8540 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -354,7 +354,6 @@ struct kvm_vcpu_arch { | |||
354 | unsigned int time_offset; | 354 | unsigned int time_offset; |
355 | struct page *time_page; | 355 | struct page *time_page; |
356 | 356 | ||
357 | bool singlestep; /* guest is single stepped by KVM */ | ||
358 | bool nmi_pending; | 357 | bool nmi_pending; |
359 | bool nmi_injected; | 358 | bool nmi_injected; |
360 | 359 | ||
@@ -371,6 +370,10 @@ struct kvm_vcpu_arch { | |||
371 | u64 mcg_status; | 370 | u64 mcg_status; |
372 | u64 mcg_ctl; | 371 | u64 mcg_ctl; |
373 | u64 *mce_banks; | 372 | u64 *mce_banks; |
373 | |||
374 | /* used for guest single stepping over the given code position */ | ||
375 | u16 singlestep_cs; | ||
376 | unsigned long singlestep_rip; | ||
374 | }; | 377 | }; |
375 | 378 | ||
376 | struct kvm_mem_alias { | 379 | struct kvm_mem_alias { |
@@ -397,7 +400,6 @@ struct kvm_arch{ | |||
397 | struct kvm_pic *vpic; | 400 | struct kvm_pic *vpic; |
398 | struct kvm_ioapic *vioapic; | 401 | struct kvm_ioapic *vioapic; |
399 | struct kvm_pit *vpit; | 402 | struct kvm_pit *vpit; |
400 | struct hlist_head irq_ack_notifier_list; | ||
401 | int vapics_in_nmi_mode; | 403 | int vapics_in_nmi_mode; |
402 | 404 | ||
403 | unsigned int tss_addr; | 405 | unsigned int tss_addr; |
@@ -410,8 +412,10 @@ struct kvm_arch{ | |||
410 | gpa_t ept_identity_map_addr; | 412 | gpa_t ept_identity_map_addr; |
411 | 413 | ||
412 | unsigned long irq_sources_bitmap; | 414 | unsigned long irq_sources_bitmap; |
413 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; | ||
414 | u64 vm_init_tsc; | 415 | u64 vm_init_tsc; |
416 | s64 kvmclock_offset; | ||
417 | |||
418 | struct kvm_xen_hvm_config xen_hvm_config; | ||
415 | }; | 419 | }; |
416 | 420 | ||
417 | struct kvm_vm_stat { | 421 | struct kvm_vm_stat { |
@@ -461,7 +465,7 @@ struct descriptor_table { | |||
461 | struct kvm_x86_ops { | 465 | struct kvm_x86_ops { |
462 | int (*cpu_has_kvm_support)(void); /* __init */ | 466 | int (*cpu_has_kvm_support)(void); /* __init */ |
463 | int (*disabled_by_bios)(void); /* __init */ | 467 | int (*disabled_by_bios)(void); /* __init */ |
464 | void (*hardware_enable)(void *dummy); /* __init */ | 468 | int (*hardware_enable)(void *dummy); |
465 | void (*hardware_disable)(void *dummy); | 469 | void (*hardware_disable)(void *dummy); |
466 | void (*check_processor_compatibility)(void *rtn); | 470 | void (*check_processor_compatibility)(void *rtn); |
467 | int (*hardware_setup)(void); /* __init */ | 471 | int (*hardware_setup)(void); /* __init */ |
@@ -477,8 +481,8 @@ struct kvm_x86_ops { | |||
477 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | 481 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); |
478 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | 482 | void (*vcpu_put)(struct kvm_vcpu *vcpu); |
479 | 483 | ||
480 | int (*set_guest_debug)(struct kvm_vcpu *vcpu, | 484 | void (*set_guest_debug)(struct kvm_vcpu *vcpu, |
481 | struct kvm_guest_debug *dbg); | 485 | struct kvm_guest_debug *dbg); |
482 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); | 486 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); |
483 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | 487 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); |
484 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); | 488 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); |
@@ -506,8 +510,8 @@ struct kvm_x86_ops { | |||
506 | 510 | ||
507 | void (*tlb_flush)(struct kvm_vcpu *vcpu); | 511 | void (*tlb_flush)(struct kvm_vcpu *vcpu); |
508 | 512 | ||
509 | void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); | 513 | void (*run)(struct kvm_vcpu *vcpu); |
510 | int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); | 514 | int (*handle_exit)(struct kvm_vcpu *vcpu); |
511 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); | 515 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
512 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); | 516 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
513 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); | 517 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
@@ -519,6 +523,8 @@ struct kvm_x86_ops { | |||
519 | bool has_error_code, u32 error_code); | 523 | bool has_error_code, u32 error_code); |
520 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); | 524 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); |
521 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); | 525 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); |
526 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); | ||
527 | void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); | ||
522 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); | 528 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); |
523 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); | 529 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); |
524 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); | 530 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); |
@@ -568,7 +574,7 @@ enum emulation_result { | |||
568 | #define EMULTYPE_NO_DECODE (1 << 0) | 574 | #define EMULTYPE_NO_DECODE (1 << 0) |
569 | #define EMULTYPE_TRAP_UD (1 << 1) | 575 | #define EMULTYPE_TRAP_UD (1 << 1) |
570 | #define EMULTYPE_SKIP (1 << 2) | 576 | #define EMULTYPE_SKIP (1 << 2) |
571 | int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, | 577 | int emulate_instruction(struct kvm_vcpu *vcpu, |
572 | unsigned long cr2, u16 error_code, int emulation_type); | 578 | unsigned long cr2, u16 error_code, int emulation_type); |
573 | void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); | 579 | void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); |
574 | void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); | 580 | void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); |
@@ -585,9 +591,9 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | |||
585 | 591 | ||
586 | struct x86_emulate_ctxt; | 592 | struct x86_emulate_ctxt; |
587 | 593 | ||
588 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | 594 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, |
589 | int size, unsigned port); | 595 | int size, unsigned port); |
590 | int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | 596 | int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in, |
591 | int size, unsigned long count, int down, | 597 | int size, unsigned long count, int down, |
592 | gva_t address, int rep, unsigned port); | 598 | gva_t address, int rep, unsigned port); |
593 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); | 599 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); |
@@ -616,6 +622,9 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); | |||
616 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | 622 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); |
617 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); | 623 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); |
618 | 624 | ||
625 | unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); | ||
626 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | ||
627 | |||
619 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); | 628 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
620 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | 629 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); |
621 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, | 630 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, |
@@ -802,4 +811,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); | |||
802 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | 811 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); |
803 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); | 812 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
804 | 813 | ||
814 | void kvm_define_shared_msr(unsigned index, u32 msr); | ||
815 | void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); | ||
816 | |||
805 | #endif /* _ASM_X86_KVM_HOST_H */ | 817 | #endif /* _ASM_X86_KVM_HOST_H */ |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index f1363b72364f..858baa061cfc 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -108,6 +108,8 @@ struct mce_log { | |||
108 | #define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) | 108 | #define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) |
109 | #define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) | 109 | #define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) |
110 | 110 | ||
111 | extern struct atomic_notifier_head x86_mce_decoder_chain; | ||
112 | |||
111 | #ifdef __KERNEL__ | 113 | #ifdef __KERNEL__ |
112 | 114 | ||
113 | #include <linux/percpu.h> | 115 | #include <linux/percpu.h> |
@@ -118,9 +120,11 @@ extern int mce_disabled; | |||
118 | extern int mce_p5_enabled; | 120 | extern int mce_p5_enabled; |
119 | 121 | ||
120 | #ifdef CONFIG_X86_MCE | 122 | #ifdef CONFIG_X86_MCE |
121 | void mcheck_init(struct cpuinfo_x86 *c); | 123 | int mcheck_init(void); |
124 | void mcheck_cpu_init(struct cpuinfo_x86 *c); | ||
122 | #else | 125 | #else |
123 | static inline void mcheck_init(struct cpuinfo_x86 *c) {} | 126 | static inline int mcheck_init(void) { return 0; } |
127 | static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {} | ||
124 | #endif | 128 | #endif |
125 | 129 | ||
126 | #ifdef CONFIG_X86_ANCIENT_MCE | 130 | #ifdef CONFIG_X86_ANCIENT_MCE |
@@ -214,5 +218,11 @@ void intel_init_thermal(struct cpuinfo_x86 *c); | |||
214 | 218 | ||
215 | void mce_log_therm_throt_event(__u64 status); | 219 | void mce_log_therm_throt_event(__u64 status); |
216 | 220 | ||
221 | #ifdef CONFIG_X86_THERMAL_VECTOR | ||
222 | extern void mcheck_intel_therm_init(void); | ||
223 | #else | ||
224 | static inline void mcheck_intel_therm_init(void) { } | ||
225 | #endif | ||
226 | |||
217 | #endif /* __KERNEL__ */ | 227 | #endif /* __KERNEL__ */ |
218 | #endif /* _ASM_X86_MCE_H */ | 228 | #endif /* _ASM_X86_MCE_H */ |
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index ef51b501e22a..c24ca9a56458 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
@@ -12,6 +12,8 @@ struct device; | |||
12 | enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; | 12 | enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; |
13 | 13 | ||
14 | struct microcode_ops { | 14 | struct microcode_ops { |
15 | void (*init)(struct device *device); | ||
16 | void (*fini)(void); | ||
15 | enum ucode_state (*request_microcode_user) (int cpu, | 17 | enum ucode_state (*request_microcode_user) (int cpu, |
16 | const void __user *buf, size_t size); | 18 | const void __user *buf, size_t size); |
17 | 19 | ||
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 79c94500c0bb..d8bf23a88d05 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h | |||
@@ -71,12 +71,7 @@ static inline void early_get_smp_config(void) | |||
71 | 71 | ||
72 | static inline void find_smp_config(void) | 72 | static inline void find_smp_config(void) |
73 | { | 73 | { |
74 | x86_init.mpparse.find_smp_config(1); | 74 | x86_init.mpparse.find_smp_config(); |
75 | } | ||
76 | |||
77 | static inline void early_find_smp_config(void) | ||
78 | { | ||
79 | x86_init.mpparse.find_smp_config(0); | ||
80 | } | 75 | } |
81 | 76 | ||
82 | #ifdef CONFIG_X86_MPPARSE | 77 | #ifdef CONFIG_X86_MPPARSE |
@@ -89,7 +84,7 @@ extern void default_mpc_oem_bus_info(struct mpc_bus *m, char *str); | |||
89 | # else | 84 | # else |
90 | # define default_mpc_oem_bus_info NULL | 85 | # define default_mpc_oem_bus_info NULL |
91 | # endif | 86 | # endif |
92 | extern void default_find_smp_config(unsigned int reserve); | 87 | extern void default_find_smp_config(void); |
93 | extern void default_get_smp_config(unsigned int early); | 88 | extern void default_get_smp_config(unsigned int early); |
94 | #else | 89 | #else |
95 | static inline void early_reserve_e820_mpc_new(void) { } | 90 | static inline void early_reserve_e820_mpc_new(void) { } |
@@ -97,7 +92,7 @@ static inline void early_reserve_e820_mpc_new(void) { } | |||
97 | #define default_mpc_apic_id NULL | 92 | #define default_mpc_apic_id NULL |
98 | #define default_smp_read_mpc_oem NULL | 93 | #define default_smp_read_mpc_oem NULL |
99 | #define default_mpc_oem_bus_info NULL | 94 | #define default_mpc_oem_bus_info NULL |
100 | #define default_find_smp_config x86_init_uint_noop | 95 | #define default_find_smp_config x86_init_noop |
101 | #define default_get_smp_config x86_init_uint_noop | 96 | #define default_get_smp_config x86_init_uint_noop |
102 | #endif | 97 | #endif |
103 | 98 | ||
@@ -163,14 +158,16 @@ typedef struct physid_mask physid_mask_t; | |||
163 | #define physids_shift_left(d, s, n) \ | 158 | #define physids_shift_left(d, s, n) \ |
164 | bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) | 159 | bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) |
165 | 160 | ||
166 | #define physids_coerce(map) ((map).mask[0]) | 161 | static inline unsigned long physids_coerce(physid_mask_t *map) |
162 | { | ||
163 | return map->mask[0]; | ||
164 | } | ||
167 | 165 | ||
168 | #define physids_promote(physids) \ | 166 | static inline void physids_promote(unsigned long physids, physid_mask_t *map) |
169 | ({ \ | 167 | { |
170 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ | 168 | physids_clear(*map); |
171 | __physid_mask.mask[0] = physids; \ | 169 | map->mask[0] = physids; |
172 | __physid_mask; \ | 170 | } |
173 | }) | ||
174 | 171 | ||
175 | /* Note: will create very large stack frames if physid_mask_t is big */ | 172 | /* Note: will create very large stack frames if physid_mask_t is big */ |
176 | #define physid_mask_of_physid(physid) \ | 173 | #define physid_mask_of_physid(physid) \ |
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 7e2b6ba962ff..5bef931f8b14 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
@@ -247,8 +247,8 @@ do { \ | |||
247 | #ifdef CONFIG_SMP | 247 | #ifdef CONFIG_SMP |
248 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 248 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
249 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 249 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
250 | void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); | 250 | void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); |
251 | void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); | 251 | void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); |
252 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 252 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
253 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 253 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
254 | int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); | 254 | int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); |
@@ -264,12 +264,12 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | |||
264 | wrmsr(msr_no, l, h); | 264 | wrmsr(msr_no, l, h); |
265 | return 0; | 265 | return 0; |
266 | } | 266 | } |
267 | static inline void rdmsr_on_cpus(const cpumask_t *m, u32 msr_no, | 267 | static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, |
268 | struct msr *msrs) | 268 | struct msr *msrs) |
269 | { | 269 | { |
270 | rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); | 270 | rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); |
271 | } | 271 | } |
272 | static inline void wrmsr_on_cpus(const cpumask_t *m, u32 msr_no, | 272 | static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, |
273 | struct msr *msrs) | 273 | struct msr *msrs) |
274 | { | 274 | { |
275 | wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); | 275 | wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); |
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 6473f5ccff85..642fe34b36a2 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h | |||
@@ -49,7 +49,8 @@ extern unsigned long max_pfn_mapped; | |||
49 | extern unsigned long init_memory_mapping(unsigned long start, | 49 | extern unsigned long init_memory_mapping(unsigned long start, |
50 | unsigned long end); | 50 | unsigned long end); |
51 | 51 | ||
52 | extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); | 52 | extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn, |
53 | int acpi, int k8); | ||
53 | extern void free_initmem(void); | 54 | extern void free_initmem(void); |
54 | 55 | ||
55 | #endif /* !__ASSEMBLY__ */ | 56 | #endif /* !__ASSEMBLY__ */ |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index ad7ce3fd5065..8d9f8548a870 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -28,9 +28,20 @@ | |||
28 | */ | 28 | */ |
29 | #define ARCH_PERFMON_EVENT_MASK 0xffff | 29 | #define ARCH_PERFMON_EVENT_MASK 0xffff |
30 | 30 | ||
31 | /* | ||
32 | * filter mask to validate fixed counter events. | ||
33 | * the following filters disqualify for fixed counters: | ||
34 | * - inv | ||
35 | * - edge | ||
36 | * - cnt-mask | ||
37 | * The other filters are supported by fixed counters. | ||
38 | * The any-thread option is supported starting with v3. | ||
39 | */ | ||
40 | #define ARCH_PERFMON_EVENT_FILTER_MASK 0xff840000 | ||
41 | |||
31 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c | 42 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c |
32 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | 43 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
33 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 | 44 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 |
34 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ | 45 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ |
35 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) | 46 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) |
36 | 47 | ||
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index af6fd360ab35..a34c785c5a63 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -16,6 +16,8 @@ | |||
16 | 16 | ||
17 | #ifndef __ASSEMBLY__ | 17 | #ifndef __ASSEMBLY__ |
18 | 18 | ||
19 | #include <asm/x86_init.h> | ||
20 | |||
19 | /* | 21 | /* |
20 | * ZERO_PAGE is a global shared page that is always zero: used | 22 | * ZERO_PAGE is a global shared page that is always zero: used |
21 | * for zero-mapped memory areas etc.. | 23 | * for zero-mapped memory areas etc.. |
@@ -270,9 +272,9 @@ static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, | |||
270 | unsigned long new_flags) | 272 | unsigned long new_flags) |
271 | { | 273 | { |
272 | /* | 274 | /* |
273 | * PAT type is always WB for ISA. So no need to check. | 275 | * PAT type is always WB for untracked ranges, so no need to check. |
274 | */ | 276 | */ |
275 | if (is_ISA_range(paddr, paddr + size - 1)) | 277 | if (x86_platform.is_untracked_pat_range(paddr, paddr + size)) |
276 | return 1; | 278 | return 1; |
277 | 279 | ||
278 | /* | 280 | /* |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c9786480f0fe..6f8ec1c37e0a 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -30,6 +30,7 @@ struct mm_struct; | |||
30 | #include <linux/math64.h> | 30 | #include <linux/math64.h> |
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | 32 | ||
33 | #define HBP_NUM 4 | ||
33 | /* | 34 | /* |
34 | * Default implementation of macro that returns current | 35 | * Default implementation of macro that returns current |
35 | * instruction pointer ("program counter"). | 36 | * instruction pointer ("program counter"). |
@@ -422,6 +423,8 @@ extern unsigned int xstate_size; | |||
422 | extern void free_thread_xstate(struct task_struct *); | 423 | extern void free_thread_xstate(struct task_struct *); |
423 | extern struct kmem_cache *task_xstate_cachep; | 424 | extern struct kmem_cache *task_xstate_cachep; |
424 | 425 | ||
426 | struct perf_event; | ||
427 | |||
425 | struct thread_struct { | 428 | struct thread_struct { |
426 | /* Cached TLS descriptors: */ | 429 | /* Cached TLS descriptors: */ |
427 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | 430 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; |
@@ -443,13 +446,10 @@ struct thread_struct { | |||
443 | unsigned long fs; | 446 | unsigned long fs; |
444 | #endif | 447 | #endif |
445 | unsigned long gs; | 448 | unsigned long gs; |
446 | /* Hardware debugging registers: */ | 449 | /* Save middle states of ptrace breakpoints */ |
447 | unsigned long debugreg0; | 450 | struct perf_event *ptrace_bps[HBP_NUM]; |
448 | unsigned long debugreg1; | 451 | /* Debug status used for traps, single steps, etc... */ |
449 | unsigned long debugreg2; | 452 | unsigned long debugreg6; |
450 | unsigned long debugreg3; | ||
451 | unsigned long debugreg6; | ||
452 | unsigned long debugreg7; | ||
453 | /* Fault info: */ | 453 | /* Fault info: */ |
454 | unsigned long cr2; | 454 | unsigned long cr2; |
455 | unsigned long trap_no; | 455 | unsigned long trap_no; |
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h index 621f56d73121..4009f6534f52 100644 --- a/arch/x86/include/asm/proto.h +++ b/arch/x86/include/asm/proto.h | |||
@@ -5,18 +5,19 @@ | |||
5 | 5 | ||
6 | /* misc architecture specific prototypes */ | 6 | /* misc architecture specific prototypes */ |
7 | 7 | ||
8 | extern void early_idt_handler(void); | 8 | void early_idt_handler(void); |
9 | 9 | ||
10 | extern void system_call(void); | 10 | void system_call(void); |
11 | extern void syscall_init(void); | 11 | void syscall_init(void); |
12 | 12 | ||
13 | extern void ia32_syscall(void); | 13 | void ia32_syscall(void); |
14 | extern void ia32_cstar_target(void); | 14 | void ia32_cstar_target(void); |
15 | extern void ia32_sysenter_target(void); | 15 | void ia32_sysenter_target(void); |
16 | 16 | ||
17 | extern void syscall32_cpu_init(void); | 17 | void syscall32_cpu_init(void); |
18 | 18 | ||
19 | extern void check_efer(void); | 19 | void x86_configure_nx(void); |
20 | void x86_report_nx(void); | ||
20 | 21 | ||
21 | extern int reboot_force; | 22 | extern int reboot_force; |
22 | 23 | ||
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 0f0d908349aa..3d11fd0f44c5 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #ifdef __KERNEL__ | 8 | #ifdef __KERNEL__ |
9 | #include <asm/segment.h> | 9 | #include <asm/segment.h> |
10 | #include <asm/page_types.h> | ||
10 | #endif | 11 | #endif |
11 | 12 | ||
12 | #ifndef __ASSEMBLY__ | 13 | #ifndef __ASSEMBLY__ |
@@ -216,6 +217,67 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) | |||
216 | return regs->sp; | 217 | return regs->sp; |
217 | } | 218 | } |
218 | 219 | ||
220 | /* Query offset/name of register from its name/offset */ | ||
221 | extern int regs_query_register_offset(const char *name); | ||
222 | extern const char *regs_query_register_name(unsigned int offset); | ||
223 | #define MAX_REG_OFFSET (offsetof(struct pt_regs, ss)) | ||
224 | |||
225 | /** | ||
226 | * regs_get_register() - get register value from its offset | ||
227 | * @regs: pt_regs from which register value is gotten. | ||
228 | * @offset: offset number of the register. | ||
229 | * | ||
230 | * regs_get_register returns the value of a register. The @offset is the | ||
231 | * offset of the register in struct pt_regs address which specified by @regs. | ||
232 | * If @offset is bigger than MAX_REG_OFFSET, this returns 0. | ||
233 | */ | ||
234 | static inline unsigned long regs_get_register(struct pt_regs *regs, | ||
235 | unsigned int offset) | ||
236 | { | ||
237 | if (unlikely(offset > MAX_REG_OFFSET)) | ||
238 | return 0; | ||
239 | return *(unsigned long *)((unsigned long)regs + offset); | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * regs_within_kernel_stack() - check the address in the stack | ||
244 | * @regs: pt_regs which contains kernel stack pointer. | ||
245 | * @addr: address which is checked. | ||
246 | * | ||
247 | * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). | ||
248 | * If @addr is within the kernel stack, it returns true. If not, returns false. | ||
249 | */ | ||
250 | static inline int regs_within_kernel_stack(struct pt_regs *regs, | ||
251 | unsigned long addr) | ||
252 | { | ||
253 | return ((addr & ~(THREAD_SIZE - 1)) == | ||
254 | (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); | ||
255 | } | ||
256 | |||
257 | /** | ||
258 | * regs_get_kernel_stack_nth() - get Nth entry of the stack | ||
259 | * @regs: pt_regs which contains kernel stack pointer. | ||
260 | * @n: stack entry number. | ||
261 | * | ||
262 | * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which | ||
263 | * is specified by @regs. If the @n th entry is NOT in the kernel stack, | ||
264 | * this returns 0. | ||
265 | */ | ||
266 | static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, | ||
267 | unsigned int n) | ||
268 | { | ||
269 | unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); | ||
270 | addr += n; | ||
271 | if (regs_within_kernel_stack(regs, (unsigned long)addr)) | ||
272 | return *addr; | ||
273 | else | ||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | /* Get Nth argument at function call */ | ||
278 | extern unsigned long regs_get_argument_nth(struct pt_regs *regs, | ||
279 | unsigned int n); | ||
280 | |||
219 | /* | 281 | /* |
220 | * These are defined as per linux/ptrace.h, which see. | 282 | * These are defined as per linux/ptrace.h, which see. |
221 | */ | 283 | */ |
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h index 1b7ee5d673c2..0a5242428659 100644 --- a/arch/x86/include/asm/sections.h +++ b/arch/x86/include/asm/sections.h | |||
@@ -2,7 +2,13 @@ | |||
2 | #define _ASM_X86_SECTIONS_H | 2 | #define _ASM_X86_SECTIONS_H |
3 | 3 | ||
4 | #include <asm-generic/sections.h> | 4 | #include <asm-generic/sections.h> |
5 | #include <asm/uaccess.h> | ||
5 | 6 | ||
6 | extern char __brk_base[], __brk_limit[]; | 7 | extern char __brk_base[], __brk_limit[]; |
8 | extern struct exception_table_entry __stop___ex_table[]; | ||
9 | |||
10 | #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) | ||
11 | extern char __end_rodata_hpage_align[]; | ||
12 | #endif | ||
7 | 13 | ||
8 | #endif /* _ASM_X86_SECTIONS_H */ | 14 | #endif /* _ASM_X86_SECTIONS_H */ |
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index ae907e617181..3d3e8353ee5c 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h | |||
@@ -177,10 +177,15 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) | |||
177 | */ | 177 | */ |
178 | 178 | ||
179 | #ifndef CONFIG_KMEMCHECK | 179 | #ifndef CONFIG_KMEMCHECK |
180 | |||
181 | #if (__GNUC__ >= 4) | ||
182 | #define memcpy(t, f, n) __builtin_memcpy(t, f, n) | ||
183 | #else | ||
180 | #define memcpy(t, f, n) \ | 184 | #define memcpy(t, f, n) \ |
181 | (__builtin_constant_p((n)) \ | 185 | (__builtin_constant_p((n)) \ |
182 | ? __constant_memcpy((t), (f), (n)) \ | 186 | ? __constant_memcpy((t), (f), (n)) \ |
183 | : __memcpy((t), (f), (n))) | 187 | : __memcpy((t), (f), (n))) |
188 | #endif | ||
184 | #else | 189 | #else |
185 | /* | 190 | /* |
186 | * kmemcheck becomes very happy if we use the REP instructions unconditionally, | 191 | * kmemcheck becomes very happy if we use the REP instructions unconditionally, |
@@ -316,11 +321,15 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern, | |||
316 | : __memset_generic((s), (c), (count))) | 321 | : __memset_generic((s), (c), (count))) |
317 | 322 | ||
318 | #define __HAVE_ARCH_MEMSET | 323 | #define __HAVE_ARCH_MEMSET |
324 | #if (__GNUC__ >= 4) | ||
325 | #define memset(s, c, count) __builtin_memset(s, c, count) | ||
326 | #else | ||
319 | #define memset(s, c, count) \ | 327 | #define memset(s, c, count) \ |
320 | (__builtin_constant_p(c) \ | 328 | (__builtin_constant_p(c) \ |
321 | ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \ | 329 | ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \ |
322 | (count)) \ | 330 | (count)) \ |
323 | : __memset((s), (c), (count))) | 331 | : __memset((s), (c), (count))) |
332 | #endif | ||
324 | 333 | ||
325 | /* | 334 | /* |
326 | * find the first occurrence of byte 'c', or 1 past the area if none | 335 | * find the first occurrence of byte 'c', or 1 past the area if none |
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 85574b7c1bc1..1fecb7e61130 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h | |||
@@ -57,7 +57,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area { | |||
57 | u16 intercept_dr_write; | 57 | u16 intercept_dr_write; |
58 | u32 intercept_exceptions; | 58 | u32 intercept_exceptions; |
59 | u64 intercept; | 59 | u64 intercept; |
60 | u8 reserved_1[44]; | 60 | u8 reserved_1[42]; |
61 | u16 pause_filter_count; | ||
61 | u64 iopm_base_pa; | 62 | u64 iopm_base_pa; |
62 | u64 msrpm_base_pa; | 63 | u64 msrpm_base_pa; |
63 | u64 tsc_offset; | 64 | u64 tsc_offset; |
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h index b9e4e20174fb..87ffcb12a1b8 100644 --- a/arch/x86/include/asm/swiotlb.h +++ b/arch/x86/include/asm/swiotlb.h | |||
@@ -3,17 +3,14 @@ | |||
3 | 3 | ||
4 | #include <linux/swiotlb.h> | 4 | #include <linux/swiotlb.h> |
5 | 5 | ||
6 | /* SWIOTLB interface */ | ||
7 | |||
8 | extern int swiotlb_force; | ||
9 | |||
10 | #ifdef CONFIG_SWIOTLB | 6 | #ifdef CONFIG_SWIOTLB |
11 | extern int swiotlb; | 7 | extern int swiotlb; |
12 | extern void pci_swiotlb_init(void); | 8 | extern int pci_swiotlb_init(void); |
13 | #else | 9 | #else |
14 | #define swiotlb 0 | 10 | #define swiotlb 0 |
15 | static inline void pci_swiotlb_init(void) | 11 | static inline int pci_swiotlb_init(void) |
16 | { | 12 | { |
13 | return 0; | ||
17 | } | 14 | } |
18 | #endif | 15 | #endif |
19 | 16 | ||
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index 72a6dcd1299b..9af9decb38c3 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h | |||
@@ -51,11 +51,6 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, | |||
51 | asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *, compat_size_t); | 51 | asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *, compat_size_t); |
52 | asmlinkage long sys32_rt_sigqueueinfo(int, int, compat_siginfo_t __user *); | 52 | asmlinkage long sys32_rt_sigqueueinfo(int, int, compat_siginfo_t __user *); |
53 | 53 | ||
54 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
55 | struct sysctl_ia32; | ||
56 | asmlinkage long sys32_sysctl(struct sysctl_ia32 __user *); | ||
57 | #endif | ||
58 | |||
59 | asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); | 54 | asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); |
60 | asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32); | 55 | asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32); |
61 | 56 | ||
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index f08f97374892..022a84386de8 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h | |||
@@ -128,8 +128,6 @@ do { \ | |||
128 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ | 128 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ |
129 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ | 129 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ |
130 | "call __switch_to\n\t" \ | 130 | "call __switch_to\n\t" \ |
131 | ".globl thread_return\n" \ | ||
132 | "thread_return:\n\t" \ | ||
133 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ | 131 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ |
134 | __switch_canary \ | 132 | __switch_canary \ |
135 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ | 133 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ |
@@ -157,19 +155,22 @@ extern void native_load_gs_index(unsigned); | |||
157 | * Load a segment. Fall back on loading the zero | 155 | * Load a segment. Fall back on loading the zero |
158 | * segment if something goes wrong.. | 156 | * segment if something goes wrong.. |
159 | */ | 157 | */ |
160 | #define loadsegment(seg, value) \ | 158 | #define loadsegment(seg, value) \ |
161 | asm volatile("\n" \ | 159 | do { \ |
162 | "1:\t" \ | 160 | unsigned short __val = (value); \ |
163 | "movl %k0,%%" #seg "\n" \ | 161 | \ |
164 | "2:\n" \ | 162 | asm volatile(" \n" \ |
165 | ".section .fixup,\"ax\"\n" \ | 163 | "1: movl %k0,%%" #seg " \n" \ |
166 | "3:\t" \ | 164 | \ |
167 | "movl %k1, %%" #seg "\n\t" \ | 165 | ".section .fixup,\"ax\" \n" \ |
168 | "jmp 2b\n" \ | 166 | "2: xorl %k0,%k0 \n" \ |
169 | ".previous\n" \ | 167 | " jmp 1b \n" \ |
170 | _ASM_EXTABLE(1b,3b) \ | 168 | ".previous \n" \ |
171 | : :"r" (value), "r" (0) : "memory") | 169 | \ |
172 | 170 | _ASM_EXTABLE(1b, 2b) \ | |
171 | \ | ||
172 | : "+r" (__val) : : "memory"); \ | ||
173 | } while (0) | ||
173 | 174 | ||
174 | /* | 175 | /* |
175 | * Save a segment register away | 176 | * Save a segment register away |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index d27d0a2fec4c..375c917c37d2 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -83,6 +83,7 @@ struct thread_info { | |||
83 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | 83 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
84 | #define TIF_SECCOMP 8 /* secure computing */ | 84 | #define TIF_SECCOMP 8 /* secure computing */ |
85 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ | 85 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ |
86 | #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ | ||
86 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ | 87 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ |
87 | #define TIF_IA32 17 /* 32bit process */ | 88 | #define TIF_IA32 17 /* 32bit process */ |
88 | #define TIF_FORK 18 /* ret_from_fork */ | 89 | #define TIF_FORK 18 /* ret_from_fork */ |
@@ -107,6 +108,7 @@ struct thread_info { | |||
107 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 108 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
108 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 109 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
109 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) | 110 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) |
111 | #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) | ||
110 | #define _TIF_NOTSC (1 << TIF_NOTSC) | 112 | #define _TIF_NOTSC (1 << TIF_NOTSC) |
111 | #define _TIF_IA32 (1 << TIF_IA32) | 113 | #define _TIF_IA32 (1 << TIF_IA32) |
112 | #define _TIF_FORK (1 << TIF_FORK) | 114 | #define _TIF_FORK (1 << TIF_FORK) |
@@ -142,13 +144,14 @@ struct thread_info { | |||
142 | 144 | ||
143 | /* Only used for 64 bit */ | 145 | /* Only used for 64 bit */ |
144 | #define _TIF_DO_NOTIFY_MASK \ | 146 | #define _TIF_DO_NOTIFY_MASK \ |
145 | (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_NOTIFY_RESUME) | 147 | (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME | \ |
148 | _TIF_USER_RETURN_NOTIFY) | ||
146 | 149 | ||
147 | /* flags to check in __switch_to() */ | 150 | /* flags to check in __switch_to() */ |
148 | #define _TIF_WORK_CTXSW \ | 151 | #define _TIF_WORK_CTXSW \ |
149 | (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC) | 152 | (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC) |
150 | 153 | ||
151 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW | 154 | #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) |
152 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) | 155 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) |
153 | 156 | ||
154 | #define PREEMPT_ACTIVE 0x10000000 | 157 | #define PREEMPT_ACTIVE 0x10000000 |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index d2c6c930b491..abd3e0ea762a 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -570,7 +570,6 @@ extern struct movsl_mask { | |||
570 | #ifdef CONFIG_X86_32 | 570 | #ifdef CONFIG_X86_32 |
571 | # include "uaccess_32.h" | 571 | # include "uaccess_32.h" |
572 | #else | 572 | #else |
573 | # define ARCH_HAS_SEARCH_EXTABLE | ||
574 | # include "uaccess_64.h" | 573 | # include "uaccess_64.h" |
575 | #endif | 574 | #endif |
576 | 575 | ||
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 632fb44b4cb5..0c9825e97f36 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h | |||
@@ -187,9 +187,34 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from, | |||
187 | 187 | ||
188 | unsigned long __must_check copy_to_user(void __user *to, | 188 | unsigned long __must_check copy_to_user(void __user *to, |
189 | const void *from, unsigned long n); | 189 | const void *from, unsigned long n); |
190 | unsigned long __must_check copy_from_user(void *to, | 190 | unsigned long __must_check _copy_from_user(void *to, |
191 | const void __user *from, | 191 | const void __user *from, |
192 | unsigned long n); | 192 | unsigned long n); |
193 | |||
194 | |||
195 | extern void copy_from_user_overflow(void) | ||
196 | #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS | ||
197 | __compiletime_error("copy_from_user() buffer size is not provably correct") | ||
198 | #else | ||
199 | __compiletime_warning("copy_from_user() buffer size is not provably correct") | ||
200 | #endif | ||
201 | ; | ||
202 | |||
203 | static inline unsigned long __must_check copy_from_user(void *to, | ||
204 | const void __user *from, | ||
205 | unsigned long n) | ||
206 | { | ||
207 | int sz = __compiletime_object_size(to); | ||
208 | int ret = -EFAULT; | ||
209 | |||
210 | if (likely(sz == -1 || sz >= n)) | ||
211 | ret = _copy_from_user(to, from, n); | ||
212 | else | ||
213 | copy_from_user_overflow(); | ||
214 | |||
215 | return ret; | ||
216 | } | ||
217 | |||
193 | long __must_check strncpy_from_user(char *dst, const char __user *src, | 218 | long __must_check strncpy_from_user(char *dst, const char __user *src, |
194 | long count); | 219 | long count); |
195 | long __must_check __strncpy_from_user(char *dst, | 220 | long __must_check __strncpy_from_user(char *dst, |
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index db24b215fc50..46324c6a4f6e 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -19,12 +19,37 @@ __must_check unsigned long | |||
19 | copy_user_generic(void *to, const void *from, unsigned len); | 19 | copy_user_generic(void *to, const void *from, unsigned len); |
20 | 20 | ||
21 | __must_check unsigned long | 21 | __must_check unsigned long |
22 | copy_to_user(void __user *to, const void *from, unsigned len); | 22 | _copy_to_user(void __user *to, const void *from, unsigned len); |
23 | __must_check unsigned long | 23 | __must_check unsigned long |
24 | copy_from_user(void *to, const void __user *from, unsigned len); | 24 | _copy_from_user(void *to, const void __user *from, unsigned len); |
25 | __must_check unsigned long | 25 | __must_check unsigned long |
26 | copy_in_user(void __user *to, const void __user *from, unsigned len); | 26 | copy_in_user(void __user *to, const void __user *from, unsigned len); |
27 | 27 | ||
28 | static inline unsigned long __must_check copy_from_user(void *to, | ||
29 | const void __user *from, | ||
30 | unsigned long n) | ||
31 | { | ||
32 | int sz = __compiletime_object_size(to); | ||
33 | int ret = -EFAULT; | ||
34 | |||
35 | might_fault(); | ||
36 | if (likely(sz == -1 || sz >= n)) | ||
37 | ret = _copy_from_user(to, from, n); | ||
38 | #ifdef CONFIG_DEBUG_VM | ||
39 | else | ||
40 | WARN(1, "Buffer overflow detected!\n"); | ||
41 | #endif | ||
42 | return ret; | ||
43 | } | ||
44 | |||
45 | static __always_inline __must_check | ||
46 | int copy_to_user(void __user *dst, const void *src, unsigned size) | ||
47 | { | ||
48 | might_fault(); | ||
49 | |||
50 | return _copy_to_user(dst, src, size); | ||
51 | } | ||
52 | |||
28 | static __always_inline __must_check | 53 | static __always_inline __must_check |
29 | int __copy_from_user(void *dst, const void __user *src, unsigned size) | 54 | int __copy_from_user(void *dst, const void __user *src, unsigned size) |
30 | { | 55 | { |
@@ -176,8 +201,11 @@ __must_check long strlen_user(const char __user *str); | |||
176 | __must_check unsigned long clear_user(void __user *mem, unsigned long len); | 201 | __must_check unsigned long clear_user(void __user *mem, unsigned long len); |
177 | __must_check unsigned long __clear_user(void __user *mem, unsigned long len); | 202 | __must_check unsigned long __clear_user(void __user *mem, unsigned long len); |
178 | 203 | ||
179 | __must_check long __copy_from_user_inatomic(void *dst, const void __user *src, | 204 | static __must_check __always_inline int |
180 | unsigned size); | 205 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) |
206 | { | ||
207 | return copy_user_generic(dst, (__force const void *)src, size); | ||
208 | } | ||
181 | 209 | ||
182 | static __must_check __always_inline int | 210 | static __must_check __always_inline int |
183 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) | 211 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) |
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index 6fb3c209a7e3..3baf379fa840 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h | |||
@@ -342,10 +342,11 @@ | |||
342 | #define __NR_pwritev 334 | 342 | #define __NR_pwritev 334 |
343 | #define __NR_rt_tgsigqueueinfo 335 | 343 | #define __NR_rt_tgsigqueueinfo 335 |
344 | #define __NR_perf_event_open 336 | 344 | #define __NR_perf_event_open 336 |
345 | #define __NR_recvmmsg 337 | ||
345 | 346 | ||
346 | #ifdef __KERNEL__ | 347 | #ifdef __KERNEL__ |
347 | 348 | ||
348 | #define NR_syscalls 337 | 349 | #define NR_syscalls 338 |
349 | 350 | ||
350 | #define __ARCH_WANT_IPC_PARSE_VERSION | 351 | #define __ARCH_WANT_IPC_PARSE_VERSION |
351 | #define __ARCH_WANT_OLD_READDIR | 352 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index 8d3ad0adbc68..4843f7ba754a 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -661,6 +661,8 @@ __SYSCALL(__NR_pwritev, sys_pwritev) | |||
661 | __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) | 661 | __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) |
662 | #define __NR_perf_event_open 298 | 662 | #define __NR_perf_event_open 298 |
663 | __SYSCALL(__NR_perf_event_open, sys_perf_event_open) | 663 | __SYSCALL(__NR_perf_event_open, sys_perf_event_open) |
664 | #define __NR_recvmmsg 299 | ||
665 | __SYSCALL(__NR_recvmmsg, sys_recvmmsg) | ||
664 | 666 | ||
665 | #ifndef __NO_STUBS | 667 | #ifndef __NO_STUBS |
666 | #define __ARCH_WANT_OLD_READDIR | 668 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/x86/include/asm/uv/uv_irq.h b/arch/x86/include/asm/uv/uv_irq.h index 9613c8c0b647..d6b17c760622 100644 --- a/arch/x86/include/asm/uv/uv_irq.h +++ b/arch/x86/include/asm/uv/uv_irq.h | |||
@@ -25,12 +25,14 @@ struct uv_IO_APIC_route_entry { | |||
25 | dest : 32; | 25 | dest : 32; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | extern struct irq_chip uv_irq_chip; | 28 | enum { |
29 | 29 | UV_AFFINITY_ALL, | |
30 | extern int arch_enable_uv_irq(char *, unsigned int, int, int, unsigned long); | 30 | UV_AFFINITY_NODE, |
31 | extern void arch_disable_uv_irq(int, unsigned long); | 31 | UV_AFFINITY_CPU |
32 | }; | ||
32 | 33 | ||
33 | extern int uv_setup_irq(char *, int, int, unsigned long); | 34 | extern int uv_irq_2_mmr_info(int, unsigned long *, int *); |
34 | extern void uv_teardown_irq(unsigned int, int, unsigned long); | 35 | extern int uv_setup_irq(char *, int, int, unsigned long, int); |
36 | extern void uv_teardown_irq(unsigned int); | ||
35 | 37 | ||
36 | #endif /* _ASM_X86_UV_UV_IRQ_H */ | 38 | #endif /* _ASM_X86_UV_UV_IRQ_H */ |
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 272514c2d456..2b4945419a84 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
@@ -56,6 +56,7 @@ | |||
56 | #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 | 56 | #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 |
57 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 | 57 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 |
58 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 | 58 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 |
59 | #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 | ||
59 | 60 | ||
60 | 61 | ||
61 | #define PIN_BASED_EXT_INTR_MASK 0x00000001 | 62 | #define PIN_BASED_EXT_INTR_MASK 0x00000001 |
@@ -144,6 +145,8 @@ enum vmcs_field { | |||
144 | VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, | 145 | VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, |
145 | TPR_THRESHOLD = 0x0000401c, | 146 | TPR_THRESHOLD = 0x0000401c, |
146 | SECONDARY_VM_EXEC_CONTROL = 0x0000401e, | 147 | SECONDARY_VM_EXEC_CONTROL = 0x0000401e, |
148 | PLE_GAP = 0x00004020, | ||
149 | PLE_WINDOW = 0x00004022, | ||
147 | VM_INSTRUCTION_ERROR = 0x00004400, | 150 | VM_INSTRUCTION_ERROR = 0x00004400, |
148 | VM_EXIT_REASON = 0x00004402, | 151 | VM_EXIT_REASON = 0x00004402, |
149 | VM_EXIT_INTR_INFO = 0x00004404, | 152 | VM_EXIT_INTR_INFO = 0x00004404, |
@@ -248,6 +251,7 @@ enum vmcs_field { | |||
248 | #define EXIT_REASON_MSR_READ 31 | 251 | #define EXIT_REASON_MSR_READ 31 |
249 | #define EXIT_REASON_MSR_WRITE 32 | 252 | #define EXIT_REASON_MSR_WRITE 32 |
250 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 | 253 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 |
254 | #define EXIT_REASON_PAUSE_INSTRUCTION 40 | ||
251 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 | 255 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 |
252 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 | 256 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 |
253 | #define EXIT_REASON_APIC_ACCESS 44 | 257 | #define EXIT_REASON_APIC_ACCESS 44 |
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 2c756fd4ab0e..ea0e8ea15e15 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -26,7 +26,7 @@ struct x86_init_mpparse { | |||
26 | void (*smp_read_mpc_oem)(struct mpc_table *mpc); | 26 | void (*smp_read_mpc_oem)(struct mpc_table *mpc); |
27 | void (*mpc_oem_pci_bus)(struct mpc_bus *m); | 27 | void (*mpc_oem_pci_bus)(struct mpc_bus *m); |
28 | void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); | 28 | void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); |
29 | void (*find_smp_config)(unsigned int reserve); | 29 | void (*find_smp_config)(void); |
30 | void (*get_smp_config)(unsigned int early); | 30 | void (*get_smp_config)(unsigned int early); |
31 | }; | 31 | }; |
32 | 32 | ||
@@ -91,6 +91,14 @@ struct x86_init_timers { | |||
91 | }; | 91 | }; |
92 | 92 | ||
93 | /** | 93 | /** |
94 | * struct x86_init_iommu - platform specific iommu setup | ||
95 | * @iommu_init: platform specific iommu setup | ||
96 | */ | ||
97 | struct x86_init_iommu { | ||
98 | int (*iommu_init)(void); | ||
99 | }; | ||
100 | |||
101 | /** | ||
94 | * struct x86_init_ops - functions for platform specific setup | 102 | * struct x86_init_ops - functions for platform specific setup |
95 | * | 103 | * |
96 | */ | 104 | */ |
@@ -101,6 +109,7 @@ struct x86_init_ops { | |||
101 | struct x86_init_oem oem; | 109 | struct x86_init_oem oem; |
102 | struct x86_init_paging paging; | 110 | struct x86_init_paging paging; |
103 | struct x86_init_timers timers; | 111 | struct x86_init_timers timers; |
112 | struct x86_init_iommu iommu; | ||
104 | }; | 113 | }; |
105 | 114 | ||
106 | /** | 115 | /** |
@@ -116,11 +125,14 @@ struct x86_cpuinit_ops { | |||
116 | * @calibrate_tsc: calibrate TSC | 125 | * @calibrate_tsc: calibrate TSC |
117 | * @get_wallclock: get time from HW clock like RTC etc. | 126 | * @get_wallclock: get time from HW clock like RTC etc. |
118 | * @set_wallclock: set time back to HW clock | 127 | * @set_wallclock: set time back to HW clock |
128 | * @is_untracked_pat_range exclude from PAT logic | ||
119 | */ | 129 | */ |
120 | struct x86_platform_ops { | 130 | struct x86_platform_ops { |
121 | unsigned long (*calibrate_tsc)(void); | 131 | unsigned long (*calibrate_tsc)(void); |
122 | unsigned long (*get_wallclock)(void); | 132 | unsigned long (*get_wallclock)(void); |
123 | int (*set_wallclock)(unsigned long nowtime); | 133 | int (*set_wallclock)(unsigned long nowtime); |
134 | void (*iommu_shutdown)(void); | ||
135 | bool (*is_untracked_pat_range)(u64 start, u64 end); | ||
124 | }; | 136 | }; |
125 | 137 | ||
126 | extern struct x86_init_ops x86_init; | 138 | extern struct x86_init_ops x86_init; |