diff options
Diffstat (limited to 'arch/x86/include')
57 files changed, 521 insertions, 714 deletions
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index bc6abb7bc7ee..76561d20ea2f 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
| 5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
| 6 | #include <linux/stringify.h> | 6 | #include <linux/stringify.h> |
| 7 | #include <linux/jump_label.h> | ||
| 7 | #include <asm/asm.h> | 8 | #include <asm/asm.h> |
| 8 | 9 | ||
| 9 | /* | 10 | /* |
| @@ -160,6 +161,8 @@ static inline void apply_paravirt(struct paravirt_patch_site *start, | |||
| 160 | #define __parainstructions_end NULL | 161 | #define __parainstructions_end NULL |
| 161 | #endif | 162 | #endif |
| 162 | 163 | ||
| 164 | extern void *text_poke_early(void *addr, const void *opcode, size_t len); | ||
| 165 | |||
| 163 | /* | 166 | /* |
| 164 | * Clear and restore the kernel write-protection flag on the local CPU. | 167 | * Clear and restore the kernel write-protection flag on the local CPU. |
| 165 | * Allows the kernel to edit read-only pages. | 168 | * Allows the kernel to edit read-only pages. |
| @@ -180,4 +183,12 @@ static inline void apply_paravirt(struct paravirt_patch_site *start, | |||
| 180 | extern void *text_poke(void *addr, const void *opcode, size_t len); | 183 | extern void *text_poke(void *addr, const void *opcode, size_t len); |
| 181 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); | 184 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); |
| 182 | 185 | ||
| 186 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) | ||
| 187 | #define IDEAL_NOP_SIZE_5 5 | ||
| 188 | extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; | ||
| 189 | extern void arch_init_ideal_nop5(void); | ||
| 190 | #else | ||
| 191 | static inline void arch_init_ideal_nop5(void) {} | ||
| 192 | #endif | ||
| 193 | |||
| 183 | #endif /* _ASM_X86_ALTERNATIVE_H */ | 194 | #endif /* _ASM_X86_ALTERNATIVE_H */ |
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h index 5af2982133b5..a6863a2dec1f 100644 --- a/arch/x86/include/asm/amd_iommu.h +++ b/arch/x86/include/asm/amd_iommu.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. |
| 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
| 4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
| 5 | * | 5 | * |
| @@ -24,11 +24,11 @@ | |||
| 24 | 24 | ||
| 25 | #ifdef CONFIG_AMD_IOMMU | 25 | #ifdef CONFIG_AMD_IOMMU |
| 26 | 26 | ||
| 27 | extern void amd_iommu_detect(void); | 27 | extern int amd_iommu_detect(void); |
| 28 | 28 | ||
| 29 | #else | 29 | #else |
| 30 | 30 | ||
| 31 | static inline void amd_iommu_detect(void) { } | 31 | static inline int amd_iommu_detect(void) { return -ENODEV; } |
| 32 | 32 | ||
| 33 | #endif | 33 | #endif |
| 34 | 34 | ||
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h index d2544f1d705d..916bc8111a01 100644 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ b/arch/x86/include/asm/amd_iommu_proto.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2009 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2009-2010 Advanced Micro Devices, Inc. |
| 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
| 4 | * | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
| @@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { } | |||
| 38 | 38 | ||
| 39 | #endif /* !CONFIG_AMD_IOMMU_STATS */ | 39 | #endif /* !CONFIG_AMD_IOMMU_STATS */ |
| 40 | 40 | ||
| 41 | static inline bool is_rd890_iommu(struct pci_dev *pdev) | ||
| 42 | { | ||
| 43 | return (pdev->vendor == PCI_VENDOR_ID_ATI) && | ||
| 44 | (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); | ||
| 45 | } | ||
| 46 | |||
| 41 | #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ | 47 | #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 7014e88bc779..e3509fc303bf 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. |
| 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
| 4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
| 5 | * | 5 | * |
| @@ -368,6 +368,9 @@ struct amd_iommu { | |||
| 368 | /* capabilities of that IOMMU read from ACPI */ | 368 | /* capabilities of that IOMMU read from ACPI */ |
| 369 | u32 cap; | 369 | u32 cap; |
| 370 | 370 | ||
| 371 | /* flags read from acpi table */ | ||
| 372 | u8 acpi_flags; | ||
| 373 | |||
| 371 | /* | 374 | /* |
| 372 | * Capability pointer. There could be more than one IOMMU per PCI | 375 | * Capability pointer. There could be more than one IOMMU per PCI |
| 373 | * device function if there are more than one AMD IOMMU capability | 376 | * device function if there are more than one AMD IOMMU capability |
| @@ -411,6 +414,24 @@ struct amd_iommu { | |||
| 411 | 414 | ||
| 412 | /* default dma_ops domain for that IOMMU */ | 415 | /* default dma_ops domain for that IOMMU */ |
| 413 | struct dma_ops_domain *default_dom; | 416 | struct dma_ops_domain *default_dom; |
| 417 | |||
| 418 | /* | ||
| 419 | * We can't rely on the BIOS to restore all values on reinit, so we | ||
| 420 | * need to stash them | ||
| 421 | */ | ||
| 422 | |||
| 423 | /* The iommu BAR */ | ||
| 424 | u32 stored_addr_lo; | ||
| 425 | u32 stored_addr_hi; | ||
| 426 | |||
| 427 | /* | ||
| 428 | * Each iommu has 6 l1s, each of which is documented as having 0x12 | ||
| 429 | * registers | ||
| 430 | */ | ||
| 431 | u32 stored_l1[6][0x12]; | ||
| 432 | |||
| 433 | /* The l2 indirect registers */ | ||
| 434 | u32 stored_l2[0x83]; | ||
| 414 | }; | 435 | }; |
| 415 | 436 | ||
| 416 | /* | 437 | /* |
diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/amd_nb.h index af00bd1d2089..c8517f81b21e 100644 --- a/arch/x86/include/asm/k8.h +++ b/arch/x86/include/asm/amd_nb.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | #ifndef _ASM_X86_K8_H | 1 | #ifndef _ASM_X86_AMD_NB_H |
| 2 | #define _ASM_X86_K8_H | 2 | #define _ASM_X86_AMD_NB_H |
| 3 | 3 | ||
| 4 | #include <linux/pci.h> | 4 | #include <linux/pci.h> |
| 5 | 5 | ||
| @@ -7,24 +7,27 @@ extern struct pci_device_id k8_nb_ids[]; | |||
| 7 | struct bootnode; | 7 | struct bootnode; |
| 8 | 8 | ||
| 9 | extern int early_is_k8_nb(u32 value); | 9 | extern int early_is_k8_nb(u32 value); |
| 10 | extern struct pci_dev **k8_northbridges; | ||
| 11 | extern int num_k8_northbridges; | ||
| 12 | extern int cache_k8_northbridges(void); | 10 | extern int cache_k8_northbridges(void); |
| 13 | extern void k8_flush_garts(void); | 11 | extern void k8_flush_garts(void); |
| 14 | extern int k8_get_nodes(struct bootnode *nodes); | 12 | extern int k8_get_nodes(struct bootnode *nodes); |
| 15 | extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); | 13 | extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); |
| 16 | extern int k8_scan_nodes(void); | 14 | extern int k8_scan_nodes(void); |
| 17 | 15 | ||
| 18 | #ifdef CONFIG_K8_NB | 16 | struct k8_northbridge_info { |
| 19 | extern int num_k8_northbridges; | 17 | u16 num; |
| 18 | u8 gart_supported; | ||
| 19 | struct pci_dev **nb_misc; | ||
| 20 | }; | ||
| 21 | extern struct k8_northbridge_info k8_northbridges; | ||
| 22 | |||
| 23 | #ifdef CONFIG_AMD_NB | ||
| 20 | 24 | ||
| 21 | static inline struct pci_dev *node_to_k8_nb_misc(int node) | 25 | static inline struct pci_dev *node_to_k8_nb_misc(int node) |
| 22 | { | 26 | { |
| 23 | return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL; | 27 | return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL; |
| 24 | } | 28 | } |
| 25 | 29 | ||
| 26 | #else | 30 | #else |
| 27 | #define num_k8_northbridges 0 | ||
| 28 | 31 | ||
| 29 | static inline struct pci_dev *node_to_k8_nb_misc(int node) | 32 | static inline struct pci_dev *node_to_k8_nb_misc(int node) |
| 30 | { | 33 | { |
| @@ -33,4 +36,4 @@ static inline struct pci_dev *node_to_k8_nb_misc(int node) | |||
| 33 | #endif | 36 | #endif |
| 34 | 37 | ||
| 35 | 38 | ||
| 36 | #endif /* _ASM_X86_K8_H */ | 39 | #endif /* _ASM_X86_AMD_NB_H */ |
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h index a69b1ac9eaf8..2fefa501d3ba 100644 --- a/arch/x86/include/asm/apb_timer.h +++ b/arch/x86/include/asm/apb_timer.h | |||
| @@ -54,7 +54,6 @@ extern struct clock_event_device *global_clock_event; | |||
| 54 | extern unsigned long apbt_quick_calibrate(void); | 54 | extern unsigned long apbt_quick_calibrate(void); |
| 55 | extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); | 55 | extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); |
| 56 | extern void apbt_setup_secondary_clock(void); | 56 | extern void apbt_setup_secondary_clock(void); |
| 57 | extern unsigned int boot_cpu_id; | ||
| 58 | 57 | ||
| 59 | extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); | 58 | extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); |
| 60 | extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr); | 59 | extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr); |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 1fa03e04ae44..286de34b0ed6 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
| @@ -252,9 +252,7 @@ static inline int apic_is_clustered_box(void) | |||
| 252 | } | 252 | } |
| 253 | #endif | 253 | #endif |
| 254 | 254 | ||
| 255 | extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); | 255 | extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); |
| 256 | extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask); | ||
| 257 | |||
| 258 | 256 | ||
| 259 | #else /* !CONFIG_X86_LOCAL_APIC */ | 257 | #else /* !CONFIG_X86_LOCAL_APIC */ |
| 260 | static inline void lapic_shutdown(void) { } | 258 | static inline void lapic_shutdown(void) { } |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 7fe3b3060f08..a859ca461fb0 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
| @@ -131,6 +131,7 @@ | |||
| 131 | #define APIC_EILVTn(n) (0x500 + 0x10 * n) | 131 | #define APIC_EILVTn(n) (0x500 + 0x10 * n) |
| 132 | #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ | 132 | #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ |
| 133 | #define APIC_EILVT_NR_AMD_10H 4 | 133 | #define APIC_EILVT_NR_AMD_10H 4 |
| 134 | #define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H | ||
| 134 | #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) | 135 | #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) |
| 135 | #define APIC_EILVT_MSG_FIX 0x0 | 136 | #define APIC_EILVT_MSG_FIX 0x0 |
| 136 | #define APIC_EILVT_MSG_SMI 0x2 | 137 | #define APIC_EILVT_MSG_SMI 0x2 |
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 545776efeb16..903683b07e42 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
| @@ -309,7 +309,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
| 309 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) | 309 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) |
| 310 | { | 310 | { |
| 311 | return ((1UL << (nr % BITS_PER_LONG)) & | 311 | return ((1UL << (nr % BITS_PER_LONG)) & |
| 312 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; | 312 | (addr[nr / BITS_PER_LONG])) != 0; |
| 313 | } | 313 | } |
| 314 | 314 | ||
| 315 | static inline int variable_test_bit(int nr, volatile const unsigned long *addr) | 315 | static inline int variable_test_bit(int nr, volatile const unsigned long *addr) |
| @@ -440,6 +440,8 @@ static inline int fls(int x) | |||
| 440 | 440 | ||
| 441 | #ifdef __KERNEL__ | 441 | #ifdef __KERNEL__ |
| 442 | 442 | ||
| 443 | #include <asm-generic/bitops/find.h> | ||
| 444 | |||
| 443 | #include <asm-generic/bitops/sched.h> | 445 | #include <asm-generic/bitops/sched.h> |
| 444 | 446 | ||
| 445 | #define ARCH_HAS_FAST_MULTIPLIER 1 | 447 | #define ARCH_HAS_FAST_MULTIPLIER 1 |
diff --git a/arch/x86/include/asm/calgary.h b/arch/x86/include/asm/calgary.h index 0918654305af..0d467b338835 100644 --- a/arch/x86/include/asm/calgary.h +++ b/arch/x86/include/asm/calgary.h | |||
| @@ -62,9 +62,9 @@ struct cal_chipset_ops { | |||
| 62 | extern int use_calgary; | 62 | extern int use_calgary; |
| 63 | 63 | ||
| 64 | #ifdef CONFIG_CALGARY_IOMMU | 64 | #ifdef CONFIG_CALGARY_IOMMU |
| 65 | extern void detect_calgary(void); | 65 | extern int detect_calgary(void); |
| 66 | #else | 66 | #else |
| 67 | static inline void detect_calgary(void) { return; } | 67 | static inline int detect_calgary(void) { return -ENODEV; } |
| 68 | #endif | 68 | #endif |
| 69 | 69 | ||
| 70 | #endif /* _ASM_X86_CALGARY_H */ | 70 | #endif /* _ASM_X86_CALGARY_H */ |
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index 306160e58b48..1d9cd27c2920 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h | |||
| @@ -205,7 +205,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) | |||
| 205 | return (u32)(unsigned long)uptr; | 205 | return (u32)(unsigned long)uptr; |
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | static inline void __user *compat_alloc_user_space(long len) | 208 | static inline void __user *arch_compat_alloc_user_space(long len) |
| 209 | { | 209 | { |
| 210 | struct pt_regs *regs = task_pt_regs(current); | 210 | struct pt_regs *regs = task_pt_regs(current); |
| 211 | return (void __user *)regs->sp - len; | 211 | return (void __user *)regs->sp - len; |
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index b185091bf19c..4fab24de26b1 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h | |||
| @@ -32,6 +32,5 @@ extern void arch_unregister_cpu(int); | |||
| 32 | 32 | ||
| 33 | DECLARE_PER_CPU(int, cpu_state); | 33 | DECLARE_PER_CPU(int, cpu_state); |
| 34 | 34 | ||
| 35 | extern unsigned int boot_cpu_id; | ||
| 36 | 35 | ||
| 37 | #endif /* _ASM_X86_CPU_H */ | 36 | #endif /* _ASM_X86_CPU_H */ |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 781a50b29a49..220e2ea08e80 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
| @@ -152,10 +152,14 @@ | |||
| 152 | #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ | 152 | #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ |
| 153 | #define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ | 153 | #define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ |
| 154 | #define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ | 154 | #define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ |
| 155 | #define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ | 155 | #define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */ |
| 156 | #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ | 156 | #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ |
| 157 | #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ | 157 | #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ |
| 158 | #define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */ | ||
| 159 | #define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */ | ||
| 158 | #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ | 160 | #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ |
| 161 | #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ | ||
| 162 | #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ | ||
| 159 | 163 | ||
| 160 | /* | 164 | /* |
| 161 | * Auxiliary flags: Linux defined - For features scattered in various | 165 | * Auxiliary flags: Linux defined - For features scattered in various |
| @@ -168,6 +172,7 @@ | |||
| 168 | #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ | 172 | #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ |
| 169 | #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ | 173 | #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ |
| 170 | #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ | 174 | #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ |
| 175 | #define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */ | ||
| 171 | 176 | ||
| 172 | /* Virtualization flags: Linux defined, word 8 */ | 177 | /* Virtualization flags: Linux defined, word 8 */ |
| 173 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ | 178 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ |
| @@ -179,6 +184,13 @@ | |||
| 179 | #define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */ | 184 | #define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */ |
| 180 | #define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ | 185 | #define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ |
| 181 | #define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ | 186 | #define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ |
| 187 | #define X86_FEATURE_TSCRATEMSR (8*32+ 9) /* "tsc_scale" AMD TSC scaling support */ | ||
| 188 | #define X86_FEATURE_VMCBCLEAN (8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */ | ||
| 189 | #define X86_FEATURE_FLUSHBYASID (8*32+11) /* AMD flush-by-ASID support */ | ||
| 190 | #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */ | ||
| 191 | #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */ | ||
| 192 | #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */ | ||
| 193 | |||
| 182 | 194 | ||
| 183 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ | 195 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ |
| 184 | #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ | 196 | #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ |
| @@ -296,6 +308,7 @@ extern const char * const x86_power_flags[32]; | |||
| 296 | 308 | ||
| 297 | #endif /* CONFIG_X86_64 */ | 309 | #endif /* CONFIG_X86_64 */ |
| 298 | 310 | ||
| 311 | #if __GNUC__ >= 4 | ||
| 299 | /* | 312 | /* |
| 300 | * Static testing of CPU features. Used the same as boot_cpu_has(). | 313 | * Static testing of CPU features. Used the same as boot_cpu_has(). |
| 301 | * These are only valid after alternatives have run, but will statically | 314 | * These are only valid after alternatives have run, but will statically |
| @@ -304,7 +317,7 @@ extern const char * const x86_power_flags[32]; | |||
| 304 | */ | 317 | */ |
| 305 | static __always_inline __pure bool __static_cpu_has(u16 bit) | 318 | static __always_inline __pure bool __static_cpu_has(u16 bit) |
| 306 | { | 319 | { |
| 307 | #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) | 320 | #if __GNUC__ > 4 || __GNUC_MINOR__ >= 5 |
| 308 | asm goto("1: jmp %l[t_no]\n" | 321 | asm goto("1: jmp %l[t_no]\n" |
| 309 | "2:\n" | 322 | "2:\n" |
| 310 | ".section .altinstructions,\"a\"\n" | 323 | ".section .altinstructions,\"a\"\n" |
| @@ -345,7 +358,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) | |||
| 345 | #endif | 358 | #endif |
| 346 | } | 359 | } |
| 347 | 360 | ||
| 348 | #if __GNUC__ >= 4 | ||
| 349 | #define static_cpu_has(bit) \ | 361 | #define static_cpu_has(bit) \ |
| 350 | ( \ | 362 | ( \ |
| 351 | __builtin_constant_p(boot_cpu_has(bit)) ? \ | 363 | __builtin_constant_p(boot_cpu_has(bit)) ? \ |
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index ec8a52d14ab1..5be1542fbfaf 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h | |||
| @@ -112,23 +112,13 @@ static inline void early_memtest(unsigned long start, unsigned long end) | |||
| 112 | } | 112 | } |
| 113 | #endif | 113 | #endif |
| 114 | 114 | ||
| 115 | extern unsigned long end_user_pfn; | ||
| 116 | |||
| 117 | extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); | ||
| 118 | extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align); | ||
| 119 | extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); | ||
| 120 | #include <linux/early_res.h> | ||
| 121 | |||
| 122 | extern unsigned long e820_end_of_ram_pfn(void); | 115 | extern unsigned long e820_end_of_ram_pfn(void); |
| 123 | extern unsigned long e820_end_of_low_ram_pfn(void); | 116 | extern unsigned long e820_end_of_low_ram_pfn(void); |
| 124 | extern int e820_find_active_region(const struct e820entry *ei, | 117 | extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); |
| 125 | unsigned long start_pfn, | 118 | |
| 126 | unsigned long last_pfn, | 119 | void memblock_x86_fill(void); |
| 127 | unsigned long *ei_startpfn, | 120 | void memblock_find_dma_reserve(void); |
| 128 | unsigned long *ei_endpfn); | 121 | |
| 129 | extern void e820_register_active_regions(int nid, unsigned long start_pfn, | ||
| 130 | unsigned long end_pfn); | ||
| 131 | extern u64 e820_hole_size(u64 start, u64 end); | ||
| 132 | extern void finish_e820_parsing(void); | 122 | extern void finish_e820_parsing(void); |
| 133 | extern void e820_reserve_resources(void); | 123 | extern void e820_reserve_resources(void); |
| 134 | extern void e820_reserve_resources_late(void); | 124 | extern void e820_reserve_resources_late(void); |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 8406ed7f9926..8e4a16508d4e 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
| @@ -90,7 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, | |||
| 90 | #endif /* CONFIG_X86_32 */ | 90 | #endif /* CONFIG_X86_32 */ |
| 91 | 91 | ||
| 92 | extern int add_efi_memmap; | 92 | extern int add_efi_memmap; |
| 93 | extern void efi_reserve_early(void); | 93 | extern void efi_memblock_x86_reserve_range(void); |
| 94 | extern void efi_call_phys_prelog(void); | 94 | extern void efi_call_phys_prelog(void); |
| 95 | extern void efi_call_phys_epilog(void); | 95 | extern void efi_call_phys_epilog(void); |
| 96 | 96 | ||
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 4d2966e7e858..57650ab4a5f5 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h | |||
| @@ -38,8 +38,8 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) | |||
| 38 | BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) | 38 | BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) |
| 39 | BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) | 39 | BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) |
| 40 | 40 | ||
| 41 | #ifdef CONFIG_PERF_EVENTS | 41 | #ifdef CONFIG_IRQ_WORK |
| 42 | BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) | 42 | BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR) |
| 43 | #endif | 43 | #endif |
| 44 | 44 | ||
| 45 | #ifdef CONFIG_X86_THERMAL_VECTOR | 45 | #ifdef CONFIG_X86_THERMAL_VECTOR |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index d07b44f7d1dc..4d293dced62f 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
| @@ -214,5 +214,20 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) | |||
| 214 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | 214 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); |
| 215 | return __virt_to_fix(vaddr); | 215 | return __virt_to_fix(vaddr); |
| 216 | } | 216 | } |
| 217 | |||
| 218 | /* Return an pointer with offset calculated */ | ||
| 219 | static inline unsigned long __set_fixmap_offset(enum fixed_addresses idx, | ||
| 220 | phys_addr_t phys, pgprot_t flags) | ||
| 221 | { | ||
| 222 | __set_fixmap(idx, phys, flags); | ||
| 223 | return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1)); | ||
| 224 | } | ||
| 225 | |||
| 226 | #define set_fixmap_offset(idx, phys) \ | ||
| 227 | __set_fixmap_offset(idx, phys, PAGE_KERNEL) | ||
| 228 | |||
| 229 | #define set_fixmap_offset_nocache(idx, phys) \ | ||
| 230 | __set_fixmap_offset(idx, phys, PAGE_KERNEL_NOCACHE) | ||
| 231 | |||
| 217 | #endif /* !__ASSEMBLY__ */ | 232 | #endif /* !__ASSEMBLY__ */ |
| 218 | #endif /* _ASM_X86_FIXMAP_H */ | 233 | #endif /* _ASM_X86_FIXMAP_H */ |
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h index 4ac5b0f33fc1..43085bfc99c3 100644 --- a/arch/x86/include/asm/gart.h +++ b/arch/x86/include/asm/gart.h | |||
| @@ -17,6 +17,7 @@ extern int fix_aperture; | |||
| 17 | #define GARTEN (1<<0) | 17 | #define GARTEN (1<<0) |
| 18 | #define DISGARTCPU (1<<4) | 18 | #define DISGARTCPU (1<<4) |
| 19 | #define DISGARTIO (1<<5) | 19 | #define DISGARTIO (1<<5) |
| 20 | #define DISTLBWALKPRB (1<<6) | ||
| 20 | 21 | ||
| 21 | /* GART cache control register bits. */ | 22 | /* GART cache control register bits. */ |
| 22 | #define INVGART (1<<0) | 23 | #define INVGART (1<<0) |
| @@ -27,7 +28,6 @@ extern int fix_aperture; | |||
| 27 | #define AMD64_GARTAPERTUREBASE 0x94 | 28 | #define AMD64_GARTAPERTUREBASE 0x94 |
| 28 | #define AMD64_GARTTABLEBASE 0x98 | 29 | #define AMD64_GARTTABLEBASE 0x98 |
| 29 | #define AMD64_GARTCACHECTL 0x9c | 30 | #define AMD64_GARTCACHECTL 0x9c |
| 30 | #define AMD64_GARTEN (1<<0) | ||
| 31 | 31 | ||
| 32 | #ifdef CONFIG_GART_IOMMU | 32 | #ifdef CONFIG_GART_IOMMU |
| 33 | extern int gart_iommu_aperture; | 33 | extern int gart_iommu_aperture; |
| @@ -37,7 +37,7 @@ extern int gart_iommu_aperture_disabled; | |||
| 37 | extern void early_gart_iommu_check(void); | 37 | extern void early_gart_iommu_check(void); |
| 38 | extern int gart_iommu_init(void); | 38 | extern int gart_iommu_init(void); |
| 39 | extern void __init gart_parse_options(char *); | 39 | extern void __init gart_parse_options(char *); |
| 40 | extern void gart_iommu_hole_init(void); | 40 | extern int gart_iommu_hole_init(void); |
| 41 | 41 | ||
| 42 | #else | 42 | #else |
| 43 | #define gart_iommu_aperture 0 | 43 | #define gart_iommu_aperture 0 |
| @@ -50,13 +50,27 @@ static inline void early_gart_iommu_check(void) | |||
| 50 | static inline void gart_parse_options(char *options) | 50 | static inline void gart_parse_options(char *options) |
| 51 | { | 51 | { |
| 52 | } | 52 | } |
| 53 | static inline void gart_iommu_hole_init(void) | 53 | static inline int gart_iommu_hole_init(void) |
| 54 | { | 54 | { |
| 55 | return -ENODEV; | ||
| 55 | } | 56 | } |
| 56 | #endif | 57 | #endif |
| 57 | 58 | ||
| 58 | extern int agp_amd64_init(void); | 59 | extern int agp_amd64_init(void); |
| 59 | 60 | ||
| 61 | static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order) | ||
| 62 | { | ||
| 63 | u32 ctl; | ||
| 64 | |||
| 65 | /* | ||
| 66 | * Don't enable translation but enable GART IO and CPU accesses. | ||
| 67 | * Also, set DISTLBWALKPRB since GART tables memory is UC. | ||
| 68 | */ | ||
| 69 | ctl = DISTLBWALKPRB | order << 1; | ||
| 70 | |||
| 71 | pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); | ||
| 72 | } | ||
| 73 | |||
| 60 | static inline void enable_gart_translation(struct pci_dev *dev, u64 addr) | 74 | static inline void enable_gart_translation(struct pci_dev *dev, u64 addr) |
| 61 | { | 75 | { |
| 62 | u32 tmp, ctl; | 76 | u32 tmp, ctl; |
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index aeab29aee617..55e4de613f0e 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h | |||
| @@ -14,7 +14,7 @@ typedef struct { | |||
| 14 | #endif | 14 | #endif |
| 15 | unsigned int x86_platform_ipis; /* arch dependent */ | 15 | unsigned int x86_platform_ipis; /* arch dependent */ |
| 16 | unsigned int apic_perf_irqs; | 16 | unsigned int apic_perf_irqs; |
| 17 | unsigned int apic_pending_irqs; | 17 | unsigned int apic_irq_work_irqs; |
| 18 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
| 19 | unsigned int irq_resched_count; | 19 | unsigned int irq_resched_count; |
| 20 | unsigned int irq_call_count; | 20 | unsigned int irq_call_count; |
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 004e6e25e913..2c392d663dce 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h | |||
| @@ -68,17 +68,18 @@ extern unsigned long force_hpet_address; | |||
| 68 | extern u8 hpet_blockid; | 68 | extern u8 hpet_blockid; |
| 69 | extern int hpet_force_user; | 69 | extern int hpet_force_user; |
| 70 | extern u8 hpet_msi_disable; | 70 | extern u8 hpet_msi_disable; |
| 71 | extern u8 hpet_readback_cmp; | ||
| 72 | extern int is_hpet_enabled(void); | 71 | extern int is_hpet_enabled(void); |
| 73 | extern int hpet_enable(void); | 72 | extern int hpet_enable(void); |
| 74 | extern void hpet_disable(void); | 73 | extern void hpet_disable(void); |
| 75 | extern unsigned int hpet_readl(unsigned int a); | 74 | extern unsigned int hpet_readl(unsigned int a); |
| 76 | extern void force_hpet_resume(void); | 75 | extern void force_hpet_resume(void); |
| 77 | 76 | ||
| 78 | extern void hpet_msi_unmask(unsigned int irq); | 77 | struct irq_data; |
| 79 | extern void hpet_msi_mask(unsigned int irq); | 78 | extern void hpet_msi_unmask(struct irq_data *data); |
| 80 | extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg); | 79 | extern void hpet_msi_mask(struct irq_data *data); |
| 81 | extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg); | 80 | struct hpet_dev; |
| 81 | extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg); | ||
| 82 | extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg); | ||
| 82 | 83 | ||
| 83 | #ifdef CONFIG_PCI_MSI | 84 | #ifdef CONFIG_PCI_MSI |
| 84 | extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); | 85 | extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); |
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h index 528a11e8d3e3..824ca07860d0 100644 --- a/arch/x86/include/asm/hw_breakpoint.h +++ b/arch/x86/include/asm/hw_breakpoint.h | |||
| @@ -20,7 +20,7 @@ struct arch_hw_breakpoint { | |||
| 20 | #include <linux/list.h> | 20 | #include <linux/list.h> |
| 21 | 21 | ||
| 22 | /* Available HW breakpoint length encodings */ | 22 | /* Available HW breakpoint length encodings */ |
| 23 | #define X86_BREAKPOINT_LEN_X 0x00 | 23 | #define X86_BREAKPOINT_LEN_X 0x40 |
| 24 | #define X86_BREAKPOINT_LEN_1 0x40 | 24 | #define X86_BREAKPOINT_LEN_1 0x40 |
| 25 | #define X86_BREAKPOINT_LEN_2 0x44 | 25 | #define X86_BREAKPOINT_LEN_2 0x44 |
| 26 | #define X86_BREAKPOINT_LEN_4 0x4c | 26 | #define X86_BREAKPOINT_LEN_4 0x4c |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 46c0fe05f230..0274ec5a7e62 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | extern void apic_timer_interrupt(void); | 29 | extern void apic_timer_interrupt(void); |
| 30 | extern void x86_platform_ipi(void); | 30 | extern void x86_platform_ipi(void); |
| 31 | extern void error_interrupt(void); | 31 | extern void error_interrupt(void); |
| 32 | extern void perf_pending_interrupt(void); | 32 | extern void irq_work_interrupt(void); |
| 33 | 33 | ||
| 34 | extern void spurious_interrupt(void); | 34 | extern void spurious_interrupt(void); |
| 35 | extern void thermal_interrupt(void); | 35 | extern void thermal_interrupt(void); |
| @@ -78,6 +78,13 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, | |||
| 78 | irq_attr->polarity = polarity; | 78 | irq_attr->polarity = polarity; |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | struct irq_2_iommu { | ||
| 82 | struct intel_iommu *iommu; | ||
| 83 | u16 irte_index; | ||
| 84 | u16 sub_handle; | ||
| 85 | u8 irte_mask; | ||
| 86 | }; | ||
| 87 | |||
| 81 | /* | 88 | /* |
| 82 | * This is performance-critical, we want to do it O(1) | 89 | * This is performance-critical, we want to do it O(1) |
| 83 | * | 90 | * |
| @@ -89,15 +96,17 @@ struct irq_cfg { | |||
| 89 | cpumask_var_t old_domain; | 96 | cpumask_var_t old_domain; |
| 90 | u8 vector; | 97 | u8 vector; |
| 91 | u8 move_in_progress : 1; | 98 | u8 move_in_progress : 1; |
| 99 | #ifdef CONFIG_INTR_REMAP | ||
| 100 | struct irq_2_iommu irq_2_iommu; | ||
| 101 | #endif | ||
| 92 | }; | 102 | }; |
| 93 | 103 | ||
| 94 | extern struct irq_cfg *irq_cfg(unsigned int); | ||
| 95 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); | 104 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); |
| 96 | extern void send_cleanup_vector(struct irq_cfg *); | 105 | extern void send_cleanup_vector(struct irq_cfg *); |
| 97 | 106 | ||
| 98 | struct irq_desc; | 107 | struct irq_data; |
| 99 | extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *, | 108 | int __ioapic_set_affinity(struct irq_data *, const struct cpumask *, |
| 100 | unsigned int *dest_id); | 109 | unsigned int *dest_id); |
| 101 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); | 110 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); |
| 102 | extern void setup_ioapic_dest(void); | 111 | extern void setup_ioapic_dest(void); |
| 103 | 112 | ||
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index a73a8d5a5e69..4aa2bb3b242a 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
| @@ -55,6 +55,12 @@ extern int save_i387_xstate_ia32(void __user *buf); | |||
| 55 | extern int restore_i387_xstate_ia32(void __user *buf); | 55 | extern int restore_i387_xstate_ia32(void __user *buf); |
| 56 | #endif | 56 | #endif |
| 57 | 57 | ||
| 58 | #ifdef CONFIG_MATH_EMULATION | ||
| 59 | extern void finit_soft_fpu(struct i387_soft_struct *soft); | ||
| 60 | #else | ||
| 61 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | ||
| 62 | #endif | ||
| 63 | |||
| 58 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | 64 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ |
| 59 | 65 | ||
| 60 | static __always_inline __pure bool use_xsaveopt(void) | 66 | static __always_inline __pure bool use_xsaveopt(void) |
| @@ -67,6 +73,11 @@ static __always_inline __pure bool use_xsave(void) | |||
| 67 | return static_cpu_has(X86_FEATURE_XSAVE); | 73 | return static_cpu_has(X86_FEATURE_XSAVE); |
| 68 | } | 74 | } |
| 69 | 75 | ||
| 76 | static __always_inline __pure bool use_fxsr(void) | ||
| 77 | { | ||
| 78 | return static_cpu_has(X86_FEATURE_FXSR); | ||
| 79 | } | ||
| 80 | |||
| 70 | extern void __sanitize_i387_state(struct task_struct *); | 81 | extern void __sanitize_i387_state(struct task_struct *); |
| 71 | 82 | ||
| 72 | static inline void sanitize_i387_state(struct task_struct *tsk) | 83 | static inline void sanitize_i387_state(struct task_struct *tsk) |
| @@ -77,19 +88,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk) | |||
| 77 | } | 88 | } |
| 78 | 89 | ||
| 79 | #ifdef CONFIG_X86_64 | 90 | #ifdef CONFIG_X86_64 |
| 80 | |||
| 81 | /* Ignore delayed exceptions from user space */ | ||
| 82 | static inline void tolerant_fwait(void) | ||
| 83 | { | ||
| 84 | asm volatile("1: fwait\n" | ||
| 85 | "2:\n" | ||
| 86 | _ASM_EXTABLE(1b, 2b)); | ||
| 87 | } | ||
| 88 | |||
| 89 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | 91 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
| 90 | { | 92 | { |
| 91 | int err; | 93 | int err; |
| 92 | 94 | ||
| 95 | /* See comment in fxsave() below. */ | ||
| 93 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" | 96 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" |
| 94 | "2:\n" | 97 | "2:\n" |
| 95 | ".section .fixup,\"ax\"\n" | 98 | ".section .fixup,\"ax\"\n" |
| @@ -98,44 +101,10 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |||
| 98 | ".previous\n" | 101 | ".previous\n" |
| 99 | _ASM_EXTABLE(1b, 3b) | 102 | _ASM_EXTABLE(1b, 3b) |
| 100 | : [err] "=r" (err) | 103 | : [err] "=r" (err) |
| 101 | #if 0 /* See comment in fxsave() below. */ | 104 | : [fx] "R" (fx), "m" (*fx), "0" (0)); |
| 102 | : [fx] "r" (fx), "m" (*fx), "0" (0)); | ||
| 103 | #else | ||
| 104 | : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); | ||
| 105 | #endif | ||
| 106 | return err; | 105 | return err; |
| 107 | } | 106 | } |
| 108 | 107 | ||
| 109 | /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
| 110 | is pending. Clear the x87 state here by setting it to fixed | ||
| 111 | values. The kernel data segment can be sometimes 0 and sometimes | ||
| 112 | new user value. Both should be ok. | ||
| 113 | Use the PDA as safe address because it should be already in L1. */ | ||
| 114 | static inline void fpu_clear(struct fpu *fpu) | ||
| 115 | { | ||
| 116 | struct xsave_struct *xstate = &fpu->state->xsave; | ||
| 117 | struct i387_fxsave_struct *fx = &fpu->state->fxsave; | ||
| 118 | |||
| 119 | /* | ||
| 120 | * xsave header may indicate the init state of the FP. | ||
| 121 | */ | ||
| 122 | if (use_xsave() && | ||
| 123 | !(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) | ||
| 124 | return; | ||
| 125 | |||
| 126 | if (unlikely(fx->swd & X87_FSW_ES)) | ||
| 127 | asm volatile("fnclex"); | ||
| 128 | alternative_input(ASM_NOP8 ASM_NOP2, | ||
| 129 | " emms\n" /* clear stack tags */ | ||
| 130 | " fildl %%gs:0", /* load to clear state */ | ||
| 131 | X86_FEATURE_FXSAVE_LEAK); | ||
| 132 | } | ||
| 133 | |||
| 134 | static inline void clear_fpu_state(struct task_struct *tsk) | ||
| 135 | { | ||
| 136 | fpu_clear(&tsk->thread.fpu); | ||
| 137 | } | ||
| 138 | |||
| 139 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | 108 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) |
| 140 | { | 109 | { |
| 141 | int err; | 110 | int err; |
| @@ -149,6 +118,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | |||
| 149 | if (unlikely(err)) | 118 | if (unlikely(err)) |
| 150 | return -EFAULT; | 119 | return -EFAULT; |
| 151 | 120 | ||
| 121 | /* See comment in fxsave() below. */ | ||
| 152 | asm volatile("1: rex64/fxsave (%[fx])\n\t" | 122 | asm volatile("1: rex64/fxsave (%[fx])\n\t" |
| 153 | "2:\n" | 123 | "2:\n" |
| 154 | ".section .fixup,\"ax\"\n" | 124 | ".section .fixup,\"ax\"\n" |
| @@ -157,11 +127,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | |||
| 157 | ".previous\n" | 127 | ".previous\n" |
| 158 | _ASM_EXTABLE(1b, 3b) | 128 | _ASM_EXTABLE(1b, 3b) |
| 159 | : [err] "=r" (err), "=m" (*fx) | 129 | : [err] "=r" (err), "=m" (*fx) |
| 160 | #if 0 /* See comment in fxsave() below. */ | 130 | : [fx] "R" (fx), "0" (0)); |
| 161 | : [fx] "r" (fx), "0" (0)); | ||
| 162 | #else | ||
| 163 | : [fx] "cdaSDb" (fx), "0" (0)); | ||
| 164 | #endif | ||
| 165 | if (unlikely(err) && | 131 | if (unlikely(err) && |
| 166 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) | 132 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) |
| 167 | err = -EFAULT; | 133 | err = -EFAULT; |
| @@ -175,56 +141,29 @@ static inline void fpu_fxsave(struct fpu *fpu) | |||
| 175 | uses any extended registers for addressing, a second REX prefix | 141 | uses any extended registers for addressing, a second REX prefix |
| 176 | will be generated (to the assembler, rex64 followed by semicolon | 142 | will be generated (to the assembler, rex64 followed by semicolon |
| 177 | is a separate instruction), and hence the 64-bitness is lost. */ | 143 | is a separate instruction), and hence the 64-bitness is lost. */ |
| 178 | #if 0 | 144 | |
| 145 | #ifdef CONFIG_AS_FXSAVEQ | ||
| 179 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported | 146 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported |
| 180 | starting with gas 2.16. */ | 147 | starting with gas 2.16. */ |
| 181 | __asm__ __volatile__("fxsaveq %0" | 148 | __asm__ __volatile__("fxsaveq %0" |
| 182 | : "=m" (fpu->state->fxsave)); | 149 | : "=m" (fpu->state->fxsave)); |
| 183 | #elif 0 | 150 | #else |
| 184 | /* Using, as a workaround, the properly prefixed form below isn't | 151 | /* Using, as a workaround, the properly prefixed form below isn't |
| 185 | accepted by any binutils version so far released, complaining that | 152 | accepted by any binutils version so far released, complaining that |
| 186 | the same type of prefix is used twice if an extended register is | 153 | the same type of prefix is used twice if an extended register is |
| 187 | needed for addressing (fix submitted to mainline 2005-11-21). */ | 154 | needed for addressing (fix submitted to mainline 2005-11-21). |
| 188 | __asm__ __volatile__("rex64/fxsave %0" | 155 | asm volatile("rex64/fxsave %0" |
| 189 | : "=m" (fpu->state->fxsave)); | 156 | : "=m" (fpu->state->fxsave)); |
| 190 | #else | 157 | This, however, we can work around by forcing the compiler to select |
| 191 | /* This, however, we can work around by forcing the compiler to select | ||
| 192 | an addressing mode that doesn't require extended registers. */ | 158 | an addressing mode that doesn't require extended registers. */ |
| 193 | __asm__ __volatile__("rex64/fxsave (%1)" | 159 | asm volatile("rex64/fxsave (%[fx])" |
| 194 | : "=m" (fpu->state->fxsave) | 160 | : "=m" (fpu->state->fxsave) |
| 195 | : "cdaSDb" (&fpu->state->fxsave)); | 161 | : [fx] "R" (&fpu->state->fxsave)); |
| 196 | #endif | 162 | #endif |
| 197 | } | 163 | } |
| 198 | 164 | ||
| 199 | static inline void fpu_save_init(struct fpu *fpu) | ||
| 200 | { | ||
| 201 | if (use_xsave()) | ||
| 202 | fpu_xsave(fpu); | ||
| 203 | else | ||
| 204 | fpu_fxsave(fpu); | ||
| 205 | |||
| 206 | fpu_clear(fpu); | ||
| 207 | } | ||
| 208 | |||
| 209 | static inline void __save_init_fpu(struct task_struct *tsk) | ||
| 210 | { | ||
| 211 | fpu_save_init(&tsk->thread.fpu); | ||
| 212 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
| 213 | } | ||
| 214 | |||
| 215 | #else /* CONFIG_X86_32 */ | 165 | #else /* CONFIG_X86_32 */ |
| 216 | 166 | ||
| 217 | #ifdef CONFIG_MATH_EMULATION | ||
| 218 | extern void finit_soft_fpu(struct i387_soft_struct *soft); | ||
| 219 | #else | ||
| 220 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | ||
| 221 | #endif | ||
| 222 | |||
| 223 | static inline void tolerant_fwait(void) | ||
| 224 | { | ||
| 225 | asm volatile("fnclex ; fwait"); | ||
| 226 | } | ||
| 227 | |||
| 228 | /* perform fxrstor iff the processor has extended states, otherwise frstor */ | 167 | /* perform fxrstor iff the processor has extended states, otherwise frstor */ |
| 229 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | 168 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
| 230 | { | 169 | { |
| @@ -241,6 +180,14 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |||
| 241 | return 0; | 180 | return 0; |
| 242 | } | 181 | } |
| 243 | 182 | ||
| 183 | static inline void fpu_fxsave(struct fpu *fpu) | ||
| 184 | { | ||
| 185 | asm volatile("fxsave %[fx]" | ||
| 186 | : [fx] "=m" (fpu->state->fxsave)); | ||
| 187 | } | ||
| 188 | |||
| 189 | #endif /* CONFIG_X86_64 */ | ||
| 190 | |||
| 244 | /* We need a safe address that is cheap to find and that is already | 191 | /* We need a safe address that is cheap to find and that is already |
| 245 | in L1 during context switch. The best choices are unfortunately | 192 | in L1 during context switch. The best choices are unfortunately |
| 246 | different for UP and SMP */ | 193 | different for UP and SMP */ |
| @@ -256,47 +203,33 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |||
| 256 | static inline void fpu_save_init(struct fpu *fpu) | 203 | static inline void fpu_save_init(struct fpu *fpu) |
| 257 | { | 204 | { |
| 258 | if (use_xsave()) { | 205 | if (use_xsave()) { |
| 259 | struct xsave_struct *xstate = &fpu->state->xsave; | ||
| 260 | struct i387_fxsave_struct *fx = &fpu->state->fxsave; | ||
| 261 | |||
| 262 | fpu_xsave(fpu); | 206 | fpu_xsave(fpu); |
| 263 | 207 | ||
| 264 | /* | 208 | /* |
| 265 | * xsave header may indicate the init state of the FP. | 209 | * xsave header may indicate the init state of the FP. |
| 266 | */ | 210 | */ |
| 267 | if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) | 211 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) |
| 268 | goto end; | 212 | return; |
| 269 | 213 | } else if (use_fxsr()) { | |
| 270 | if (unlikely(fx->swd & X87_FSW_ES)) | 214 | fpu_fxsave(fpu); |
| 271 | asm volatile("fnclex"); | 215 | } else { |
| 272 | 216 | asm volatile("fsave %[fx]; fwait" | |
| 273 | /* | 217 | : [fx] "=m" (fpu->state->fsave)); |
| 274 | * we can do a simple return here or be paranoid :) | 218 | return; |
| 275 | */ | ||
| 276 | goto clear_state; | ||
| 277 | } | 219 | } |
| 278 | 220 | ||
| 279 | /* Use more nops than strictly needed in case the compiler | 221 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) |
| 280 | varies code */ | 222 | asm volatile("fnclex"); |
| 281 | alternative_input( | 223 | |
| 282 | "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4, | ||
| 283 | "fxsave %[fx]\n" | ||
| 284 | "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", | ||
| 285 | X86_FEATURE_FXSR, | ||
| 286 | [fx] "m" (fpu->state->fxsave), | ||
| 287 | [fsw] "m" (fpu->state->fxsave.swd) : "memory"); | ||
| 288 | clear_state: | ||
| 289 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | 224 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception |
| 290 | is pending. Clear the x87 state here by setting it to fixed | 225 | is pending. Clear the x87 state here by setting it to fixed |
| 291 | values. safe_address is a random variable that should be in L1 */ | 226 | values. safe_address is a random variable that should be in L1 */ |
| 292 | alternative_input( | 227 | alternative_input( |
| 293 | GENERIC_NOP8 GENERIC_NOP2, | 228 | ASM_NOP8 ASM_NOP2, |
| 294 | "emms\n\t" /* clear stack tags */ | 229 | "emms\n\t" /* clear stack tags */ |
| 295 | "fildl %[addr]", /* set F?P to defined value */ | 230 | "fildl %P[addr]", /* set F?P to defined value */ |
| 296 | X86_FEATURE_FXSAVE_LEAK, | 231 | X86_FEATURE_FXSAVE_LEAK, |
| 297 | [addr] "m" (safe_address)); | 232 | [addr] "m" (safe_address)); |
| 298 | end: | ||
| 299 | ; | ||
| 300 | } | 233 | } |
| 301 | 234 | ||
| 302 | static inline void __save_init_fpu(struct task_struct *tsk) | 235 | static inline void __save_init_fpu(struct task_struct *tsk) |
| @@ -305,9 +238,6 @@ static inline void __save_init_fpu(struct task_struct *tsk) | |||
| 305 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 238 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
| 306 | } | 239 | } |
| 307 | 240 | ||
| 308 | |||
| 309 | #endif /* CONFIG_X86_64 */ | ||
| 310 | |||
| 311 | static inline int fpu_fxrstor_checking(struct fpu *fpu) | 241 | static inline int fpu_fxrstor_checking(struct fpu *fpu) |
| 312 | { | 242 | { |
| 313 | return fxrstor_checking(&fpu->state->fxsave); | 243 | return fxrstor_checking(&fpu->state->fxsave); |
| @@ -344,7 +274,10 @@ static inline void __unlazy_fpu(struct task_struct *tsk) | |||
| 344 | static inline void __clear_fpu(struct task_struct *tsk) | 274 | static inline void __clear_fpu(struct task_struct *tsk) |
| 345 | { | 275 | { |
| 346 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 276 | if (task_thread_info(tsk)->status & TS_USEDFPU) { |
| 347 | tolerant_fwait(); | 277 | /* Ignore delayed exceptions from user space */ |
| 278 | asm volatile("1: fwait\n" | ||
| 279 | "2:\n" | ||
| 280 | _ASM_EXTABLE(1b, 2b)); | ||
| 348 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 281 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
| 349 | stts(); | 282 | stts(); |
| 350 | } | 283 | } |
| @@ -405,19 +338,6 @@ static inline void irq_ts_restore(int TS_state) | |||
| 405 | stts(); | 338 | stts(); |
| 406 | } | 339 | } |
| 407 | 340 | ||
| 408 | #ifdef CONFIG_X86_64 | ||
| 409 | |||
| 410 | static inline void save_init_fpu(struct task_struct *tsk) | ||
| 411 | { | ||
| 412 | __save_init_fpu(tsk); | ||
| 413 | stts(); | ||
| 414 | } | ||
| 415 | |||
| 416 | #define unlazy_fpu __unlazy_fpu | ||
| 417 | #define clear_fpu __clear_fpu | ||
| 418 | |||
| 419 | #else /* CONFIG_X86_32 */ | ||
| 420 | |||
| 421 | /* | 341 | /* |
| 422 | * These disable preemption on their own and are safe | 342 | * These disable preemption on their own and are safe |
| 423 | */ | 343 | */ |
| @@ -443,8 +363,6 @@ static inline void clear_fpu(struct task_struct *tsk) | |||
| 443 | preempt_enable(); | 363 | preempt_enable(); |
| 444 | } | 364 | } |
| 445 | 365 | ||
| 446 | #endif /* CONFIG_X86_64 */ | ||
| 447 | |||
| 448 | /* | 366 | /* |
| 449 | * i387 state interaction | 367 | * i387 state interaction |
| 450 | */ | 368 | */ |
| @@ -508,7 +426,4 @@ extern void fpu_finit(struct fpu *fpu); | |||
| 508 | 426 | ||
| 509 | #endif /* __ASSEMBLY__ */ | 427 | #endif /* __ASSEMBLY__ */ |
| 510 | 428 | ||
| 511 | #define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5 | ||
| 512 | #define PSHUFB_XMM5_XMM6 .byte 0x66, 0x0f, 0x38, 0x00, 0xf5 | ||
| 513 | |||
| 514 | #endif /* _ASM_X86_I387_H */ | 429 | #endif /* _ASM_X86_I387_H */ |
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h index 1655147646aa..a20365953bf8 100644 --- a/arch/x86/include/asm/i8259.h +++ b/arch/x86/include/asm/i8259.h | |||
| @@ -55,6 +55,8 @@ extern struct irq_chip i8259A_chip; | |||
| 55 | struct legacy_pic { | 55 | struct legacy_pic { |
| 56 | int nr_legacy_irqs; | 56 | int nr_legacy_irqs; |
| 57 | struct irq_chip *chip; | 57 | struct irq_chip *chip; |
| 58 | void (*mask)(unsigned int irq); | ||
| 59 | void (*unmask)(unsigned int irq); | ||
| 58 | void (*mask_all)(void); | 60 | void (*mask_all)(void); |
| 59 | void (*restore_mask)(void); | 61 | void (*restore_mask)(void); |
| 60 | void (*init)(int auto_eoi); | 62 | void (*init)(int auto_eoi); |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 30a3e9776123..f0203f4791a8 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
| @@ -206,6 +206,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) | |||
| 206 | 206 | ||
| 207 | extern void iounmap(volatile void __iomem *addr); | 207 | extern void iounmap(volatile void __iomem *addr); |
| 208 | 208 | ||
| 209 | extern void set_iounmap_nonlazy(void); | ||
| 209 | 210 | ||
| 210 | #ifdef __KERNEL__ | 211 | #ifdef __KERNEL__ |
| 211 | 212 | ||
| @@ -348,6 +349,7 @@ extern void __iomem *early_memremap(resource_size_t phys_addr, | |||
| 348 | unsigned long size); | 349 | unsigned long size); |
| 349 | extern void early_iounmap(void __iomem *addr, unsigned long size); | 350 | extern void early_iounmap(void __iomem *addr, unsigned long size); |
| 350 | extern void fixup_early_ioremap(void); | 351 | extern void fixup_early_ioremap(void); |
| 352 | extern bool is_early_ioremap_ptep(pte_t *ptep); | ||
| 351 | 353 | ||
| 352 | #define IO_SPACE_LIMIT 0xffff | 354 | #define IO_SPACE_LIMIT 0xffff |
| 353 | 355 | ||
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 9cb2edb87c2f..c8be4566c3d2 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
| @@ -170,12 +170,6 @@ extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); | |||
| 170 | 170 | ||
| 171 | extern void probe_nr_irqs_gsi(void); | 171 | extern void probe_nr_irqs_gsi(void); |
| 172 | 172 | ||
| 173 | extern int setup_ioapic_entry(int apic, int irq, | ||
| 174 | struct IO_APIC_route_entry *entry, | ||
| 175 | unsigned int destination, int trigger, | ||
| 176 | int polarity, int vector, int pin); | ||
| 177 | extern void ioapic_write_entry(int apic, int pin, | ||
| 178 | struct IO_APIC_route_entry e); | ||
| 179 | extern void setup_ioapic_ids_from_mpc(void); | 173 | extern void setup_ioapic_ids_from_mpc(void); |
| 180 | 174 | ||
| 181 | struct mp_ioapic_gsi{ | 175 | struct mp_ioapic_gsi{ |
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h index f35eb45d6576..c4191b3b7056 100644 --- a/arch/x86/include/asm/iomap.h +++ b/arch/x86/include/asm/iomap.h | |||
| @@ -26,11 +26,11 @@ | |||
| 26 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
| 27 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
| 28 | 28 | ||
| 29 | void * | 29 | void __iomem * |
| 30 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | 30 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); |
| 31 | 31 | ||
| 32 | void | 32 | void |
| 33 | iounmap_atomic(void *kvaddr, enum km_type type); | 33 | iounmap_atomic(void __iomem *kvaddr, enum km_type type); |
| 34 | 34 | ||
| 35 | int | 35 | int |
| 36 | iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); | 36 | iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); |
diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h new file mode 100644 index 000000000000..f229b13a5f30 --- /dev/null +++ b/arch/x86/include/asm/iommu_table.h | |||
| @@ -0,0 +1,100 @@ | |||
| 1 | #ifndef _ASM_X86_IOMMU_TABLE_H | ||
| 2 | #define _ASM_X86_IOMMU_TABLE_H | ||
| 3 | |||
| 4 | #include <asm/swiotlb.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | * History lesson: | ||
| 8 | * The execution chain of IOMMUs in 2.6.36 looks as so: | ||
| 9 | * | ||
| 10 | * [xen-swiotlb] | ||
| 11 | * | | ||
| 12 | * +----[swiotlb *]--+ | ||
| 13 | * / | \ | ||
| 14 | * / | \ | ||
| 15 | * [GART] [Calgary] [Intel VT-d] | ||
| 16 | * / | ||
| 17 | * / | ||
| 18 | * [AMD-Vi] | ||
| 19 | * | ||
| 20 | * *: if SWIOTLB detected 'iommu=soft'/'swiotlb=force' it would skip | ||
| 21 | * over the rest of IOMMUs and unconditionally initialize the SWIOTLB. | ||
| 22 | * Also it would surreptitiously initialize set the swiotlb=1 if there were | ||
| 23 | * more than 4GB and if the user did not pass in 'iommu=off'. The swiotlb | ||
| 24 | * flag would be turned off by all IOMMUs except the Calgary one. | ||
| 25 | * | ||
| 26 | * The IOMMU_INIT* macros allow a similar tree (or more complex if desired) | ||
| 27 | * to be built by defining who we depend on. | ||
| 28 | * | ||
| 29 | * And all that needs to be done is to use one of the macros in the IOMMU | ||
| 30 | * and the pci-dma.c will take care of the rest. | ||
| 31 | */ | ||
| 32 | |||
| 33 | struct iommu_table_entry { | ||
| 34 | initcall_t detect; | ||
| 35 | initcall_t depend; | ||
| 36 | void (*early_init)(void); /* No memory allocate available. */ | ||
| 37 | void (*late_init)(void); /* Yes, can allocate memory. */ | ||
| 38 | #define IOMMU_FINISH_IF_DETECTED (1<<0) | ||
| 39 | #define IOMMU_DETECTED (1<<1) | ||
| 40 | int flags; | ||
| 41 | }; | ||
| 42 | /* | ||
| 43 | * Macro fills out an entry in the .iommu_table that is equivalent | ||
| 44 | * to the fields that 'struct iommu_table_entry' has. The entries | ||
| 45 | * that are put in the .iommu_table section are not put in any order | ||
| 46 | * hence during boot-time we will have to resort them based on | ||
| 47 | * dependency. */ | ||
| 48 | |||
| 49 | |||
| 50 | #define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\ | ||
| 51 | static const struct iommu_table_entry const \ | ||
| 52 | __iommu_entry_##_detect __used \ | ||
| 53 | __attribute__ ((unused, __section__(".iommu_table"), \ | ||
| 54 | aligned((sizeof(void *))))) \ | ||
| 55 | = {_detect, _depend, _early_init, _late_init, \ | ||
| 56 | _finish ? IOMMU_FINISH_IF_DETECTED : 0} | ||
| 57 | /* | ||
| 58 | * The simplest IOMMU definition. Provide the detection routine | ||
| 59 | * and it will be run after the SWIOTLB and the other IOMMUs | ||
| 60 | * that utilize this macro. If the IOMMU is detected (ie, the | ||
| 61 | * detect routine returns a positive value), the other IOMMUs | ||
| 62 | * are also checked. You can use IOMMU_INIT_POST_FINISH if you prefer | ||
| 63 | * to stop detecting the other IOMMUs after yours has been detected. | ||
| 64 | */ | ||
| 65 | #define IOMMU_INIT_POST(_detect) \ | ||
| 66 | __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 0) | ||
| 67 | |||
| 68 | #define IOMMU_INIT_POST_FINISH(detect) \ | ||
| 69 | __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 1) | ||
| 70 | |||
| 71 | /* | ||
| 72 | * A more sophisticated version of IOMMU_INIT. This variant requires: | ||
| 73 | * a). A detection routine function. | ||
| 74 | * b). The name of the detection routine we depend on to get called | ||
| 75 | * before us. | ||
| 76 | * c). The init routine which gets called if the detection routine | ||
| 77 | * returns a positive value from the pci_iommu_alloc. This means | ||
| 78 | * no presence of a memory allocator. | ||
| 79 | * d). Similar to the 'init', except that this gets called from pci_iommu_init | ||
| 80 | * where we do have a memory allocator. | ||
| 81 | * | ||
| 82 | * The standard vs the _FINISH differs in that the _FINISH variant will | ||
| 83 | * continue detecting other IOMMUs in the call list after the | ||
| 84 | * the detection routine returns a positive number. The _FINISH will | ||
| 85 | * stop the execution chain. Both will still call the 'init' and | ||
| 86 | * 'late_init' functions if they are set. | ||
| 87 | */ | ||
| 88 | #define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init) \ | ||
| 89 | __IOMMU_INIT(_detect, _depend, _init, _late_init, 1) | ||
| 90 | |||
| 91 | #define IOMMU_INIT(_detect, _depend, _init, _late_init) \ | ||
| 92 | __IOMMU_INIT(_detect, _depend, _init, _late_init, 0) | ||
| 93 | |||
| 94 | void sort_iommu_table(struct iommu_table_entry *start, | ||
| 95 | struct iommu_table_entry *finish); | ||
| 96 | |||
| 97 | void check_iommu_entries(struct iommu_table_entry *start, | ||
| 98 | struct iommu_table_entry *finish); | ||
| 99 | |||
| 100 | #endif /* _ASM_X86_IOMMU_TABLE_H */ | ||
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 5458380b6ef8..0bf5b0083650 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
| @@ -19,18 +19,16 @@ static inline int irq_canonicalize(int irq) | |||
| 19 | # define ARCH_HAS_NMI_WATCHDOG | 19 | # define ARCH_HAS_NMI_WATCHDOG |
| 20 | #endif | 20 | #endif |
| 21 | 21 | ||
| 22 | #ifdef CONFIG_4KSTACKS | 22 | #ifdef CONFIG_X86_32 |
| 23 | extern void irq_ctx_init(int cpu); | 23 | extern void irq_ctx_init(int cpu); |
| 24 | extern void irq_ctx_exit(int cpu); | 24 | extern void irq_ctx_exit(int cpu); |
| 25 | # define __ARCH_HAS_DO_SOFTIRQ | ||
| 26 | #else | 25 | #else |
| 27 | # define irq_ctx_init(cpu) do { } while (0) | 26 | # define irq_ctx_init(cpu) do { } while (0) |
| 28 | # define irq_ctx_exit(cpu) do { } while (0) | 27 | # define irq_ctx_exit(cpu) do { } while (0) |
| 29 | # ifdef CONFIG_X86_64 | ||
| 30 | # define __ARCH_HAS_DO_SOFTIRQ | ||
| 31 | # endif | ||
| 32 | #endif | 28 | #endif |
| 33 | 29 | ||
| 30 | #define __ARCH_HAS_DO_SOFTIRQ | ||
| 31 | |||
| 34 | #ifdef CONFIG_HOTPLUG_CPU | 32 | #ifdef CONFIG_HOTPLUG_CPU |
| 35 | #include <linux/cpumask.h> | 33 | #include <linux/cpumask.h> |
| 36 | extern void fixup_irqs(void); | 34 | extern void fixup_irqs(void); |
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index f275e2244505..1c23360fb2d8 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h | |||
| @@ -3,4 +3,39 @@ | |||
| 3 | 3 | ||
| 4 | #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) | 4 | #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) |
| 5 | 5 | ||
| 6 | #ifdef CONFIG_INTR_REMAP | ||
| 7 | static inline void prepare_irte(struct irte *irte, int vector, | ||
| 8 | unsigned int dest) | ||
| 9 | { | ||
| 10 | memset(irte, 0, sizeof(*irte)); | ||
| 11 | |||
| 12 | irte->present = 1; | ||
| 13 | irte->dst_mode = apic->irq_dest_mode; | ||
| 14 | /* | ||
| 15 | * Trigger mode in the IRTE will always be edge, and for IO-APIC, the | ||
| 16 | * actual level or edge trigger will be setup in the IO-APIC | ||
| 17 | * RTE. This will help simplify level triggered irq migration. | ||
| 18 | * For more details, see the comments (in io_apic.c) explainig IO-APIC | ||
| 19 | * irq migration in the presence of interrupt-remapping. | ||
| 20 | */ | ||
| 21 | irte->trigger_mode = 0; | ||
| 22 | irte->dlvry_mode = apic->irq_delivery_mode; | ||
| 23 | irte->vector = vector; | ||
| 24 | irte->dest_id = IRTE_DEST(dest); | ||
| 25 | irte->redir_hint = 1; | ||
| 26 | } | ||
| 27 | static inline bool irq_remapped(struct irq_cfg *cfg) | ||
| 28 | { | ||
| 29 | return cfg->irq_2_iommu.iommu != NULL; | ||
| 30 | } | ||
| 31 | #else | ||
| 32 | static void prepare_irte(struct irte *irte, int vector, unsigned int dest) | ||
| 33 | { | ||
| 34 | } | ||
| 35 | static inline bool irq_remapped(struct irq_cfg *cfg) | ||
| 36 | { | ||
| 37 | return false; | ||
| 38 | } | ||
| 39 | #endif | ||
| 40 | |||
| 6 | #endif /* _ASM_X86_IRQ_REMAPPING_H */ | 41 | #endif /* _ASM_X86_IRQ_REMAPPING_H */ |
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index e2ca30092557..6af0894dafb4 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
| @@ -114,9 +114,9 @@ | |||
| 114 | #define X86_PLATFORM_IPI_VECTOR 0xed | 114 | #define X86_PLATFORM_IPI_VECTOR 0xed |
| 115 | 115 | ||
| 116 | /* | 116 | /* |
| 117 | * Performance monitoring pending work vector: | 117 | * IRQ work vector: |
| 118 | */ | 118 | */ |
| 119 | #define LOCAL_PENDING_VECTOR 0xec | 119 | #define IRQ_WORK_VECTOR 0xec |
| 120 | 120 | ||
| 121 | #define UV_BAU_MESSAGE 0xea | 121 | #define UV_BAU_MESSAGE 0xea |
| 122 | 122 | ||
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 9e2b952f810a..5745ce8bf108 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h | |||
| @@ -61,22 +61,22 @@ static inline void native_halt(void) | |||
| 61 | #else | 61 | #else |
| 62 | #ifndef __ASSEMBLY__ | 62 | #ifndef __ASSEMBLY__ |
| 63 | 63 | ||
| 64 | static inline unsigned long __raw_local_save_flags(void) | 64 | static inline unsigned long arch_local_save_flags(void) |
| 65 | { | 65 | { |
| 66 | return native_save_fl(); | 66 | return native_save_fl(); |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | static inline void raw_local_irq_restore(unsigned long flags) | 69 | static inline void arch_local_irq_restore(unsigned long flags) |
| 70 | { | 70 | { |
| 71 | native_restore_fl(flags); | 71 | native_restore_fl(flags); |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | static inline void raw_local_irq_disable(void) | 74 | static inline void arch_local_irq_disable(void) |
| 75 | { | 75 | { |
| 76 | native_irq_disable(); | 76 | native_irq_disable(); |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | static inline void raw_local_irq_enable(void) | 79 | static inline void arch_local_irq_enable(void) |
| 80 | { | 80 | { |
| 81 | native_irq_enable(); | 81 | native_irq_enable(); |
| 82 | } | 82 | } |
| @@ -85,7 +85,7 @@ static inline void raw_local_irq_enable(void) | |||
| 85 | * Used in the idle loop; sti takes one instruction cycle | 85 | * Used in the idle loop; sti takes one instruction cycle |
| 86 | * to complete: | 86 | * to complete: |
| 87 | */ | 87 | */ |
| 88 | static inline void raw_safe_halt(void) | 88 | static inline void arch_safe_halt(void) |
| 89 | { | 89 | { |
| 90 | native_safe_halt(); | 90 | native_safe_halt(); |
| 91 | } | 91 | } |
| @@ -102,12 +102,10 @@ static inline void halt(void) | |||
| 102 | /* | 102 | /* |
| 103 | * For spinlocks, etc: | 103 | * For spinlocks, etc: |
| 104 | */ | 104 | */ |
| 105 | static inline unsigned long __raw_local_irq_save(void) | 105 | static inline unsigned long arch_local_irq_save(void) |
| 106 | { | 106 | { |
| 107 | unsigned long flags = __raw_local_save_flags(); | 107 | unsigned long flags = arch_local_save_flags(); |
| 108 | 108 | arch_local_irq_disable(); | |
| 109 | raw_local_irq_disable(); | ||
| 110 | |||
| 111 | return flags; | 109 | return flags; |
| 112 | } | 110 | } |
| 113 | #else | 111 | #else |
| @@ -153,22 +151,16 @@ static inline unsigned long __raw_local_irq_save(void) | |||
| 153 | #endif /* CONFIG_PARAVIRT */ | 151 | #endif /* CONFIG_PARAVIRT */ |
| 154 | 152 | ||
| 155 | #ifndef __ASSEMBLY__ | 153 | #ifndef __ASSEMBLY__ |
| 156 | #define raw_local_save_flags(flags) \ | 154 | static inline int arch_irqs_disabled_flags(unsigned long flags) |
| 157 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
| 158 | |||
| 159 | #define raw_local_irq_save(flags) \ | ||
| 160 | do { (flags) = __raw_local_irq_save(); } while (0) | ||
| 161 | |||
| 162 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
| 163 | { | 155 | { |
| 164 | return !(flags & X86_EFLAGS_IF); | 156 | return !(flags & X86_EFLAGS_IF); |
| 165 | } | 157 | } |
| 166 | 158 | ||
| 167 | static inline int raw_irqs_disabled(void) | 159 | static inline int arch_irqs_disabled(void) |
| 168 | { | 160 | { |
| 169 | unsigned long flags = __raw_local_save_flags(); | 161 | unsigned long flags = arch_local_save_flags(); |
| 170 | 162 | ||
| 171 | return raw_irqs_disabled_flags(flags); | 163 | return arch_irqs_disabled_flags(flags); |
| 172 | } | 164 | } |
| 173 | 165 | ||
| 174 | #else | 166 | #else |
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h new file mode 100644 index 000000000000..f52d42e80585 --- /dev/null +++ b/arch/x86/include/asm/jump_label.h | |||
| @@ -0,0 +1,37 @@ | |||
| 1 | #ifndef _ASM_X86_JUMP_LABEL_H | ||
| 2 | #define _ASM_X86_JUMP_LABEL_H | ||
| 3 | |||
| 4 | #ifdef __KERNEL__ | ||
| 5 | |||
| 6 | #include <linux/types.h> | ||
| 7 | #include <asm/nops.h> | ||
| 8 | |||
| 9 | #define JUMP_LABEL_NOP_SIZE 5 | ||
| 10 | |||
| 11 | # define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" | ||
| 12 | |||
| 13 | # define JUMP_LABEL(key, label) \ | ||
| 14 | do { \ | ||
| 15 | asm goto("1:" \ | ||
| 16 | JUMP_LABEL_INITIAL_NOP \ | ||
| 17 | ".pushsection __jump_table, \"a\" \n\t"\ | ||
| 18 | _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ | ||
| 19 | ".popsection \n\t" \ | ||
| 20 | : : "i" (key) : : label); \ | ||
| 21 | } while (0) | ||
| 22 | |||
| 23 | #endif /* __KERNEL__ */ | ||
| 24 | |||
| 25 | #ifdef CONFIG_X86_64 | ||
| 26 | typedef u64 jump_label_t; | ||
| 27 | #else | ||
| 28 | typedef u32 jump_label_t; | ||
| 29 | #endif | ||
| 30 | |||
| 31 | struct jump_entry { | ||
| 32 | jump_label_t code; | ||
| 33 | jump_label_t target; | ||
| 34 | jump_label_t key; | ||
| 35 | }; | ||
| 36 | |||
| 37 | #endif | ||
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 51cfd730ac5d..1f99ecfc48e1 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
| @@ -152,9 +152,14 @@ struct x86_emulate_ops { | |||
| 152 | struct operand { | 152 | struct operand { |
| 153 | enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; | 153 | enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; |
| 154 | unsigned int bytes; | 154 | unsigned int bytes; |
| 155 | unsigned long orig_val, *ptr; | 155 | union { |
| 156 | unsigned long orig_val; | ||
| 157 | u64 orig_val64; | ||
| 158 | }; | ||
| 159 | unsigned long *ptr; | ||
| 156 | union { | 160 | union { |
| 157 | unsigned long val; | 161 | unsigned long val; |
| 162 | u64 val64; | ||
| 158 | char valptr[sizeof(unsigned long) + 2]; | 163 | char valptr[sizeof(unsigned long) + 2]; |
| 159 | }; | 164 | }; |
| 160 | }; | 165 | }; |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 502e53f999cf..c52e2eb40a1e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -652,20 +652,6 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | |||
| 652 | return (struct kvm_mmu_page *)page_private(page); | 652 | return (struct kvm_mmu_page *)page_private(page); |
| 653 | } | 653 | } |
| 654 | 654 | ||
| 655 | static inline u16 kvm_read_fs(void) | ||
| 656 | { | ||
| 657 | u16 seg; | ||
| 658 | asm("mov %%fs, %0" : "=g"(seg)); | ||
| 659 | return seg; | ||
| 660 | } | ||
| 661 | |||
| 662 | static inline u16 kvm_read_gs(void) | ||
| 663 | { | ||
| 664 | u16 seg; | ||
| 665 | asm("mov %%gs, %0" : "=g"(seg)); | ||
| 666 | return seg; | ||
| 667 | } | ||
| 668 | |||
| 669 | static inline u16 kvm_read_ldt(void) | 655 | static inline u16 kvm_read_ldt(void) |
| 670 | { | 656 | { |
| 671 | u16 ldt; | 657 | u16 ldt; |
| @@ -673,16 +659,6 @@ static inline u16 kvm_read_ldt(void) | |||
| 673 | return ldt; | 659 | return ldt; |
| 674 | } | 660 | } |
| 675 | 661 | ||
| 676 | static inline void kvm_load_fs(u16 sel) | ||
| 677 | { | ||
| 678 | asm("mov %0, %%fs" : : "rm"(sel)); | ||
| 679 | } | ||
| 680 | |||
| 681 | static inline void kvm_load_gs(u16 sel) | ||
| 682 | { | ||
| 683 | asm("mov %0, %%gs" : : "rm"(sel)); | ||
| 684 | } | ||
| 685 | |||
| 686 | static inline void kvm_load_ldt(u16 sel) | 662 | static inline void kvm_load_ldt(u16 sel) |
| 687 | { | 663 | { |
| 688 | asm("lldt %0" : : "rm"(sel)); | 664 | asm("lldt %0" : : "rm"(sel)); |
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h new file mode 100644 index 000000000000..19ae14ba6978 --- /dev/null +++ b/arch/x86/include/asm/memblock.h | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | #ifndef _X86_MEMBLOCK_H | ||
| 2 | #define _X86_MEMBLOCK_H | ||
| 3 | |||
| 4 | #define ARCH_DISCARD_MEMBLOCK | ||
| 5 | |||
| 6 | u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); | ||
| 7 | void memblock_x86_to_bootmem(u64 start, u64 end); | ||
| 8 | |||
| 9 | void memblock_x86_reserve_range(u64 start, u64 end, char *name); | ||
| 10 | void memblock_x86_free_range(u64 start, u64 end); | ||
| 11 | struct range; | ||
| 12 | int __get_free_all_memory_range(struct range **range, int nodeid, | ||
| 13 | unsigned long start_pfn, unsigned long end_pfn); | ||
| 14 | int get_free_all_memory_range(struct range **rangep, int nodeid); | ||
| 15 | |||
| 16 | void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, | ||
| 17 | unsigned long last_pfn); | ||
| 18 | u64 memblock_x86_hole_size(u64 start, u64 end); | ||
| 19 | u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); | ||
| 20 | u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); | ||
| 21 | u64 memblock_x86_memory_in_range(u64 addr, u64 limit); | ||
| 22 | |||
| 23 | #endif | ||
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index 3e2ce58a31a3..67763c5d8b4e 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h | |||
| @@ -60,12 +60,7 @@ | |||
| 60 | #endif | 60 | #endif |
| 61 | 61 | ||
| 62 | #ifdef CONFIG_X86_32 | 62 | #ifdef CONFIG_X86_32 |
| 63 | # ifdef CONFIG_4KSTACKS | 63 | # define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY |
| 64 | # define MODULE_STACKSIZE "4KSTACKS " | ||
| 65 | # else | ||
| 66 | # define MODULE_STACKSIZE "" | ||
| 67 | # endif | ||
| 68 | # define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE | ||
| 69 | #endif | 64 | #endif |
| 70 | 65 | ||
| 71 | #endif /* _ASM_X86_MODULE_H */ | 66 | #endif /* _ASM_X86_MODULE_H */ |
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h index 16350740edf6..4a711a684b17 100644 --- a/arch/x86/include/asm/mrst.h +++ b/arch/x86/include/asm/mrst.h | |||
| @@ -10,6 +10,9 @@ | |||
| 10 | */ | 10 | */ |
| 11 | #ifndef _ASM_X86_MRST_H | 11 | #ifndef _ASM_X86_MRST_H |
| 12 | #define _ASM_X86_MRST_H | 12 | #define _ASM_X86_MRST_H |
| 13 | |||
| 14 | #include <linux/sfi.h> | ||
| 15 | |||
| 13 | extern int pci_mrst_init(void); | 16 | extern int pci_mrst_init(void); |
| 14 | int __init sfi_parse_mrtc(struct sfi_table_header *table); | 17 | int __init sfi_parse_mrtc(struct sfi_table_header *table); |
| 15 | 18 | ||
| @@ -26,7 +29,7 @@ enum mrst_cpu_type { | |||
| 26 | }; | 29 | }; |
| 27 | 30 | ||
| 28 | extern enum mrst_cpu_type __mrst_cpu_chip; | 31 | extern enum mrst_cpu_type __mrst_cpu_chip; |
| 29 | static enum mrst_cpu_type mrst_identify_cpu(void) | 32 | static inline enum mrst_cpu_type mrst_identify_cpu(void) |
| 30 | { | 33 | { |
| 31 | return __mrst_cpu_chip; | 34 | return __mrst_cpu_chip; |
| 32 | } | 35 | } |
| @@ -42,4 +45,9 @@ extern enum mrst_timer_options mrst_timer_options; | |||
| 42 | #define SFI_MTMR_MAX_NUM 8 | 45 | #define SFI_MTMR_MAX_NUM 8 |
| 43 | #define SFI_MRTC_MAX 8 | 46 | #define SFI_MRTC_MAX 8 |
| 44 | 47 | ||
| 48 | extern struct console early_mrst_console; | ||
| 49 | extern void mrst_early_console_init(void); | ||
| 50 | |||
| 51 | extern struct console early_hsu_console; | ||
| 52 | extern void hsu_early_console_init(void); | ||
| 45 | #endif /* _ASM_X86_MRST_H */ | 53 | #endif /* _ASM_X86_MRST_H */ |
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h new file mode 100644 index 000000000000..bcdff997668c --- /dev/null +++ b/arch/x86/include/asm/mwait.h | |||
| @@ -0,0 +1,15 @@ | |||
| 1 | #ifndef _ASM_X86_MWAIT_H | ||
| 2 | #define _ASM_X86_MWAIT_H | ||
| 3 | |||
| 4 | #define MWAIT_SUBSTATE_MASK 0xf | ||
| 5 | #define MWAIT_CSTATE_MASK 0xf | ||
| 6 | #define MWAIT_SUBSTATE_SIZE 4 | ||
| 7 | #define MWAIT_MAX_NUM_CSTATES 8 | ||
| 8 | |||
| 9 | #define CPUID_MWAIT_LEAF 5 | ||
| 10 | #define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1 | ||
| 11 | #define CPUID5_ECX_INTERRUPT_BREAK 0x2 | ||
| 12 | |||
| 13 | #define MWAIT_ECX_INTERRUPT_BREAK 0x1 | ||
| 14 | |||
| 15 | #endif /* _ASM_X86_MWAIT_H */ | ||
diff --git a/arch/x86/include/asm/olpc_ofw.h b/arch/x86/include/asm/olpc_ofw.h index 08fde475cb3b..2a8478140bb3 100644 --- a/arch/x86/include/asm/olpc_ofw.h +++ b/arch/x86/include/asm/olpc_ofw.h | |||
| @@ -21,10 +21,14 @@ extern void olpc_ofw_detect(void); | |||
| 21 | /* install OFW's pde permanently into the kernel's pgtable */ | 21 | /* install OFW's pde permanently into the kernel's pgtable */ |
| 22 | extern void setup_olpc_ofw_pgd(void); | 22 | extern void setup_olpc_ofw_pgd(void); |
| 23 | 23 | ||
| 24 | /* check if OFW was detected during boot */ | ||
| 25 | extern bool olpc_ofw_present(void); | ||
| 26 | |||
| 24 | #else /* !CONFIG_OLPC_OPENFIRMWARE */ | 27 | #else /* !CONFIG_OLPC_OPENFIRMWARE */ |
| 25 | 28 | ||
| 26 | static inline void olpc_ofw_detect(void) { } | 29 | static inline void olpc_ofw_detect(void) { } |
| 27 | static inline void setup_olpc_ofw_pgd(void) { } | 30 | static inline void setup_olpc_ofw_pgd(void) { } |
| 31 | static inline bool olpc_ofw_present(void) { return false; } | ||
| 28 | 32 | ||
| 29 | #endif /* !CONFIG_OLPC_OPENFIRMWARE */ | 33 | #endif /* !CONFIG_OLPC_OPENFIRMWARE */ |
| 30 | 34 | ||
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index 6f1b7331313f..ade619ff9e2a 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h | |||
| @@ -15,11 +15,7 @@ | |||
| 15 | */ | 15 | */ |
| 16 | #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) | 16 | #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) |
| 17 | 17 | ||
| 18 | #ifdef CONFIG_4KSTACKS | ||
| 19 | #define THREAD_ORDER 0 | ||
| 20 | #else | ||
| 21 | #define THREAD_ORDER 1 | 18 | #define THREAD_ORDER 1 |
| 22 | #endif | ||
| 23 | #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) | 19 | #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) |
| 24 | 20 | ||
| 25 | #define STACKFAULT_STACK 0 | 21 | #define STACKFAULT_STACK 0 |
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index a667f24c7254..1df66211fd1b 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) | 8 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
| 9 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 9 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
| 10 | 10 | ||
| 11 | #define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1) | 11 | #define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) |
| 12 | #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) | 12 | #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) |
| 13 | 13 | ||
| 14 | /* Cast PAGE_MASK to a signed type so that it is sign-extended if | 14 | /* Cast PAGE_MASK to a signed type so that it is sign-extended if |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5653f43d90e5..18e3b8a8709f 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
| @@ -105,7 +105,7 @@ static inline void write_cr8(unsigned long x) | |||
| 105 | } | 105 | } |
| 106 | #endif | 106 | #endif |
| 107 | 107 | ||
| 108 | static inline void raw_safe_halt(void) | 108 | static inline void arch_safe_halt(void) |
| 109 | { | 109 | { |
| 110 | PVOP_VCALL0(pv_irq_ops.safe_halt); | 110 | PVOP_VCALL0(pv_irq_ops.safe_halt); |
| 111 | } | 111 | } |
| @@ -416,11 +416,6 @@ static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) | |||
| 416 | PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); | 416 | PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); |
| 417 | } | 417 | } |
| 418 | 418 | ||
| 419 | static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn, | ||
| 420 | unsigned long start, unsigned long count) | ||
| 421 | { | ||
| 422 | PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count); | ||
| 423 | } | ||
| 424 | static inline void paravirt_release_pmd(unsigned long pfn) | 419 | static inline void paravirt_release_pmd(unsigned long pfn) |
| 425 | { | 420 | { |
| 426 | PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); | 421 | PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); |
| @@ -829,32 +824,32 @@ static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) | |||
| 829 | #define __PV_IS_CALLEE_SAVE(func) \ | 824 | #define __PV_IS_CALLEE_SAVE(func) \ |
| 830 | ((struct paravirt_callee_save) { func }) | 825 | ((struct paravirt_callee_save) { func }) |
| 831 | 826 | ||
| 832 | static inline unsigned long __raw_local_save_flags(void) | 827 | static inline unsigned long arch_local_save_flags(void) |
| 833 | { | 828 | { |
| 834 | return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); | 829 | return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); |
| 835 | } | 830 | } |
| 836 | 831 | ||
| 837 | static inline void raw_local_irq_restore(unsigned long f) | 832 | static inline void arch_local_irq_restore(unsigned long f) |
| 838 | { | 833 | { |
| 839 | PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); | 834 | PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); |
| 840 | } | 835 | } |
| 841 | 836 | ||
| 842 | static inline void raw_local_irq_disable(void) | 837 | static inline void arch_local_irq_disable(void) |
| 843 | { | 838 | { |
| 844 | PVOP_VCALLEE0(pv_irq_ops.irq_disable); | 839 | PVOP_VCALLEE0(pv_irq_ops.irq_disable); |
| 845 | } | 840 | } |
| 846 | 841 | ||
| 847 | static inline void raw_local_irq_enable(void) | 842 | static inline void arch_local_irq_enable(void) |
| 848 | { | 843 | { |
| 849 | PVOP_VCALLEE0(pv_irq_ops.irq_enable); | 844 | PVOP_VCALLEE0(pv_irq_ops.irq_enable); |
| 850 | } | 845 | } |
| 851 | 846 | ||
| 852 | static inline unsigned long __raw_local_irq_save(void) | 847 | static inline unsigned long arch_local_irq_save(void) |
| 853 | { | 848 | { |
| 854 | unsigned long f; | 849 | unsigned long f; |
| 855 | 850 | ||
| 856 | f = __raw_local_save_flags(); | 851 | f = arch_local_save_flags(); |
| 857 | raw_local_irq_disable(); | 852 | arch_local_irq_disable(); |
| 858 | return f; | 853 | return f; |
| 859 | } | 854 | } |
| 860 | 855 | ||
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index db9ef5532341..b82bac975250 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
| @@ -255,7 +255,6 @@ struct pv_mmu_ops { | |||
| 255 | */ | 255 | */ |
| 256 | void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); | 256 | void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); |
| 257 | void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); | 257 | void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); |
| 258 | void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count); | ||
| 259 | void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); | 258 | void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); |
| 260 | void (*release_pte)(unsigned long pfn); | 259 | void (*release_pte)(unsigned long pfn); |
| 261 | void (*release_pmd)(unsigned long pfn); | 260 | void (*release_pmd)(unsigned long pfn); |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 404a880ea325..d395540ff894 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
| @@ -27,6 +27,9 @@ extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, | |||
| 27 | int node); | 27 | int node); |
| 28 | extern struct pci_bus *pci_scan_bus_with_sysdata(int busno); | 28 | extern struct pci_bus *pci_scan_bus_with_sysdata(int busno); |
| 29 | 29 | ||
| 30 | #ifdef CONFIG_PCI | ||
| 31 | |||
| 32 | #ifdef CONFIG_PCI_DOMAINS | ||
| 30 | static inline int pci_domain_nr(struct pci_bus *bus) | 33 | static inline int pci_domain_nr(struct pci_bus *bus) |
| 31 | { | 34 | { |
| 32 | struct pci_sysdata *sd = bus->sysdata; | 35 | struct pci_sysdata *sd = bus->sysdata; |
| @@ -37,13 +40,12 @@ static inline int pci_proc_domain(struct pci_bus *bus) | |||
| 37 | { | 40 | { |
| 38 | return pci_domain_nr(bus); | 41 | return pci_domain_nr(bus); |
| 39 | } | 42 | } |
| 40 | 43 | #endif | |
| 41 | 44 | ||
| 42 | /* Can be used to override the logic in pci_scan_bus for skipping | 45 | /* Can be used to override the logic in pci_scan_bus for skipping |
| 43 | already-configured bus numbers - to be used for buggy BIOSes | 46 | already-configured bus numbers - to be used for buggy BIOSes |
| 44 | or architectures with incomplete PCI setup by the loader */ | 47 | or architectures with incomplete PCI setup by the loader */ |
| 45 | 48 | ||
| 46 | #ifdef CONFIG_PCI | ||
| 47 | extern unsigned int pcibios_assign_all_busses(void); | 49 | extern unsigned int pcibios_assign_all_busses(void); |
| 48 | extern int pci_legacy_init(void); | 50 | extern int pci_legacy_init(void); |
| 49 | # ifdef CONFIG_ACPI | 51 | # ifdef CONFIG_ACPI |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index cd28f9ad910d..f899e01a8ac9 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
| @@ -47,6 +47,20 @@ | |||
| 47 | #ifdef CONFIG_SMP | 47 | #ifdef CONFIG_SMP |
| 48 | #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x | 48 | #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x |
| 49 | #define __my_cpu_offset percpu_read(this_cpu_off) | 49 | #define __my_cpu_offset percpu_read(this_cpu_off) |
| 50 | |||
| 51 | /* | ||
| 52 | * Compared to the generic __my_cpu_offset version, the following | ||
| 53 | * saves one instruction and avoids clobbering a temp register. | ||
| 54 | */ | ||
| 55 | #define __this_cpu_ptr(ptr) \ | ||
| 56 | ({ \ | ||
| 57 | unsigned long tcp_ptr__; \ | ||
| 58 | __verify_pcpu_ptr(ptr); \ | ||
| 59 | asm volatile("add " __percpu_arg(1) ", %0" \ | ||
| 60 | : "=r" (tcp_ptr__) \ | ||
| 61 | : "m" (this_cpu_off), "0" (ptr)); \ | ||
| 62 | (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \ | ||
| 63 | }) | ||
| 50 | #else | 64 | #else |
| 51 | #define __percpu_arg(x) "%P" #x | 65 | #define __percpu_arg(x) "%P" #x |
| 52 | #endif | 66 | #endif |
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index def500776b16..a70cd216be5d 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h | |||
| @@ -36,19 +36,6 @@ | |||
| 36 | #define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT) | 36 | #define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT) |
| 37 | #define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT) | 37 | #define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT) |
| 38 | 38 | ||
| 39 | /* Non HT mask */ | ||
| 40 | #define P4_ESCR_MASK \ | ||
| 41 | (P4_ESCR_EVENT_MASK | \ | ||
| 42 | P4_ESCR_EVENTMASK_MASK | \ | ||
| 43 | P4_ESCR_TAG_MASK | \ | ||
| 44 | P4_ESCR_TAG_ENABLE | \ | ||
| 45 | P4_ESCR_T0_OS | \ | ||
| 46 | P4_ESCR_T0_USR) | ||
| 47 | |||
| 48 | /* HT mask */ | ||
| 49 | #define P4_ESCR_MASK_HT \ | ||
| 50 | (P4_ESCR_MASK | P4_ESCR_T1_OS | P4_ESCR_T1_USR) | ||
| 51 | |||
| 52 | #define P4_CCCR_OVF 0x80000000U | 39 | #define P4_CCCR_OVF 0x80000000U |
| 53 | #define P4_CCCR_CASCADE 0x40000000U | 40 | #define P4_CCCR_CASCADE 0x40000000U |
| 54 | #define P4_CCCR_OVF_PMI_T0 0x04000000U | 41 | #define P4_CCCR_OVF_PMI_T0 0x04000000U |
| @@ -70,23 +57,6 @@ | |||
| 70 | #define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT) | 57 | #define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT) |
| 71 | #define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT) | 58 | #define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT) |
| 72 | 59 | ||
| 73 | /* Non HT mask */ | ||
| 74 | #define P4_CCCR_MASK \ | ||
| 75 | (P4_CCCR_OVF | \ | ||
| 76 | P4_CCCR_CASCADE | \ | ||
| 77 | P4_CCCR_OVF_PMI_T0 | \ | ||
| 78 | P4_CCCR_FORCE_OVF | \ | ||
| 79 | P4_CCCR_EDGE | \ | ||
| 80 | P4_CCCR_THRESHOLD_MASK | \ | ||
| 81 | P4_CCCR_COMPLEMENT | \ | ||
| 82 | P4_CCCR_COMPARE | \ | ||
| 83 | P4_CCCR_ESCR_SELECT_MASK | \ | ||
| 84 | P4_CCCR_ENABLE) | ||
| 85 | |||
| 86 | /* HT mask */ | ||
| 87 | #define P4_CCCR_MASK_HT \ | ||
| 88 | (P4_CCCR_MASK | P4_CCCR_OVF_PMI_T1 | P4_CCCR_THREAD_ANY) | ||
| 89 | |||
| 90 | #define P4_GEN_ESCR_EMASK(class, name, bit) \ | 60 | #define P4_GEN_ESCR_EMASK(class, name, bit) \ |
| 91 | class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT) | 61 | class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT) |
| 92 | #define P4_ESCR_EMASK_BIT(class, name) class##__##name | 62 | #define P4_ESCR_EMASK_BIT(class, name) class##__##name |
| @@ -127,6 +97,28 @@ | |||
| 127 | #define P4_CONFIG_HT_SHIFT 63 | 97 | #define P4_CONFIG_HT_SHIFT 63 |
| 128 | #define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT) | 98 | #define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT) |
| 129 | 99 | ||
| 100 | /* | ||
| 101 | * The bits we allow to pass for RAW events | ||
| 102 | */ | ||
| 103 | #define P4_CONFIG_MASK_ESCR \ | ||
| 104 | P4_ESCR_EVENT_MASK | \ | ||
| 105 | P4_ESCR_EVENTMASK_MASK | \ | ||
| 106 | P4_ESCR_TAG_MASK | \ | ||
| 107 | P4_ESCR_TAG_ENABLE | ||
| 108 | |||
| 109 | #define P4_CONFIG_MASK_CCCR \ | ||
| 110 | P4_CCCR_EDGE | \ | ||
| 111 | P4_CCCR_THRESHOLD_MASK | \ | ||
| 112 | P4_CCCR_COMPLEMENT | \ | ||
| 113 | P4_CCCR_COMPARE | \ | ||
| 114 | P4_CCCR_THREAD_ANY | \ | ||
| 115 | P4_CCCR_RESERVED | ||
| 116 | |||
| 117 | /* some dangerous bits are reserved for kernel internals */ | ||
| 118 | #define P4_CONFIG_MASK \ | ||
| 119 | (p4_config_pack_escr(P4_CONFIG_MASK_ESCR)) | \ | ||
| 120 | (p4_config_pack_cccr(P4_CONFIG_MASK_CCCR)) | ||
| 121 | |||
| 130 | static inline bool p4_is_event_cascaded(u64 config) | 122 | static inline bool p4_is_event_cascaded(u64 config) |
| 131 | { | 123 | { |
| 132 | u32 cccr = p4_config_unpack_cccr(config); | 124 | u32 cccr = p4_config_unpack_cccr(config); |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index a34c785c5a63..ada823a13c7c 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
| @@ -28,6 +28,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
| 28 | extern spinlock_t pgd_lock; | 28 | extern spinlock_t pgd_lock; |
| 29 | extern struct list_head pgd_list; | 29 | extern struct list_head pgd_list; |
| 30 | 30 | ||
| 31 | extern struct mm_struct *pgd_page_get_mm(struct page *page); | ||
| 32 | |||
| 31 | #ifdef CONFIG_PARAVIRT | 33 | #ifdef CONFIG_PARAVIRT |
| 32 | #include <asm/paravirt.h> | 34 | #include <asm/paravirt.h> |
| 33 | #else /* !CONFIG_PARAVIRT */ | 35 | #else /* !CONFIG_PARAVIRT */ |
| @@ -603,6 +605,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, | |||
| 603 | pte_update(mm, addr, ptep); | 605 | pte_update(mm, addr, ptep); |
| 604 | } | 606 | } |
| 605 | 607 | ||
| 608 | #define flush_tlb_fix_spurious_fault(vma, address) | ||
| 609 | |||
| 606 | /* | 610 | /* |
| 607 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | 611 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); |
| 608 | * | 612 | * |
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index f686f49e8b7b..8abde9ec90bf 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h | |||
| @@ -26,7 +26,7 @@ struct mm_struct; | |||
| 26 | struct vm_area_struct; | 26 | struct vm_area_struct; |
| 27 | 27 | ||
| 28 | extern pgd_t swapper_pg_dir[1024]; | 28 | extern pgd_t swapper_pg_dir[1024]; |
| 29 | extern pgd_t trampoline_pg_dir[1024]; | 29 | extern pgd_t initial_page_table[1024]; |
| 30 | 30 | ||
| 31 | static inline void pgtable_cache_init(void) { } | 31 | static inline void pgtable_cache_init(void) { } |
| 32 | static inline void check_pgt_cache(void) { } | 32 | static inline void check_pgt_cache(void) { } |
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 076052cd62be..f96ac9bedf75 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
| @@ -102,6 +102,8 @@ static inline void native_pgd_clear(pgd_t *pgd) | |||
| 102 | native_set_pgd(pgd, native_make_pgd(0)); | 102 | native_set_pgd(pgd, native_make_pgd(0)); |
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | extern void sync_global_pgds(unsigned long start, unsigned long end); | ||
| 106 | |||
| 105 | /* | 107 | /* |
| 106 | * Conversion functions: convert a page and protection to a page entry, | 108 | * Conversion functions: convert a page and protection to a page entry, |
| 107 | * and a page entry and page directory to the page they refer to. | 109 | * and a page entry and page directory to the page they refer to. |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 325b7bdbebaa..cae9c3cb95cf 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
| @@ -110,6 +110,8 @@ struct cpuinfo_x86 { | |||
| 110 | u16 phys_proc_id; | 110 | u16 phys_proc_id; |
| 111 | /* Core id: */ | 111 | /* Core id: */ |
| 112 | u16 cpu_core_id; | 112 | u16 cpu_core_id; |
| 113 | /* Compute unit id */ | ||
| 114 | u8 compute_unit_id; | ||
| 113 | /* Index into per_cpu list: */ | 115 | /* Index into per_cpu list: */ |
| 114 | u16 cpu_index; | 116 | u16 cpu_index; |
| 115 | #endif | 117 | #endif |
| @@ -602,7 +604,7 @@ extern unsigned long mmu_cr4_features; | |||
| 602 | 604 | ||
| 603 | static inline void set_in_cr4(unsigned long mask) | 605 | static inline void set_in_cr4(unsigned long mask) |
| 604 | { | 606 | { |
| 605 | unsigned cr4; | 607 | unsigned long cr4; |
| 606 | 608 | ||
| 607 | mmu_cr4_features |= mask; | 609 | mmu_cr4_features |= mask; |
| 608 | cr4 = read_cr4(); | 610 | cr4 = read_cr4(); |
| @@ -612,7 +614,7 @@ static inline void set_in_cr4(unsigned long mask) | |||
| 612 | 614 | ||
| 613 | static inline void clear_in_cr4(unsigned long mask) | 615 | static inline void clear_in_cr4(unsigned long mask) |
| 614 | { | 616 | { |
| 615 | unsigned cr4; | 617 | unsigned long cr4; |
| 616 | 618 | ||
| 617 | mmu_cr4_features &= ~mask; | 619 | mmu_cr4_features &= ~mask; |
| 618 | cr4 = read_cr4(); | 620 | cr4 = read_cr4(); |
| @@ -764,29 +766,6 @@ extern unsigned long idle_halt; | |||
| 764 | extern unsigned long idle_nomwait; | 766 | extern unsigned long idle_nomwait; |
| 765 | extern bool c1e_detected; | 767 | extern bool c1e_detected; |
| 766 | 768 | ||
| 767 | /* | ||
| 768 | * on systems with caches, caches must be flashed as the absolute | ||
| 769 | * last instruction before going into a suspended halt. Otherwise, | ||
| 770 | * dirty data can linger in the cache and become stale on resume, | ||
| 771 | * leading to strange errors. | ||
| 772 | * | ||
| 773 | * perform a variety of operations to guarantee that the compiler | ||
| 774 | * will not reorder instructions. wbinvd itself is serializing | ||
| 775 | * so the processor will not reorder. | ||
| 776 | * | ||
| 777 | * Systems without cache can just go into halt. | ||
| 778 | */ | ||
| 779 | static inline void wbinvd_halt(void) | ||
| 780 | { | ||
| 781 | mb(); | ||
| 782 | /* check for clflush to determine if wbinvd is legal */ | ||
| 783 | if (cpu_has_clflush) | ||
| 784 | asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory"); | ||
| 785 | else | ||
| 786 | while (1) | ||
| 787 | halt(); | ||
| 788 | } | ||
| 789 | |||
| 790 | extern void enable_sep_cpu(void); | 769 | extern void enable_sep_cpu(void); |
| 791 | extern int sysenter_setup(void); | 770 | extern int sysenter_setup(void); |
| 792 | 771 | ||
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index ef292c792d74..d6763b139a84 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h | |||
| @@ -93,6 +93,11 @@ void *extend_brk(size_t size, size_t align); | |||
| 93 | : : "i" (sz)); \ | 93 | : : "i" (sz)); \ |
| 94 | } | 94 | } |
| 95 | 95 | ||
| 96 | /* Helper for reserving space for arrays of things */ | ||
| 97 | #define RESERVE_BRK_ARRAY(type, name, entries) \ | ||
| 98 | type *name; \ | ||
| 99 | RESERVE_BRK(name, sizeof(type) * entries) | ||
| 100 | |||
| 96 | #ifdef __i386__ | 101 | #ifdef __i386__ |
| 97 | 102 | ||
| 98 | void __init i386_start_kernel(void); | 103 | void __init i386_start_kernel(void); |
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h index 8085277e1b8b..977f1761a25d 100644 --- a/arch/x86/include/asm/swiotlb.h +++ b/arch/x86/include/asm/swiotlb.h | |||
| @@ -5,17 +5,26 @@ | |||
| 5 | 5 | ||
| 6 | #ifdef CONFIG_SWIOTLB | 6 | #ifdef CONFIG_SWIOTLB |
| 7 | extern int swiotlb; | 7 | extern int swiotlb; |
| 8 | extern int __init pci_swiotlb_detect(void); | 8 | extern int __init pci_swiotlb_detect_override(void); |
| 9 | extern int __init pci_swiotlb_detect_4gb(void); | ||
| 9 | extern void __init pci_swiotlb_init(void); | 10 | extern void __init pci_swiotlb_init(void); |
| 11 | extern void __init pci_swiotlb_late_init(void); | ||
| 10 | #else | 12 | #else |
| 11 | #define swiotlb 0 | 13 | #define swiotlb 0 |
| 12 | static inline int pci_swiotlb_detect(void) | 14 | static inline int pci_swiotlb_detect_override(void) |
| 15 | { | ||
| 16 | return 0; | ||
| 17 | } | ||
| 18 | static inline int pci_swiotlb_detect_4gb(void) | ||
| 13 | { | 19 | { |
| 14 | return 0; | 20 | return 0; |
| 15 | } | 21 | } |
| 16 | static inline void pci_swiotlb_init(void) | 22 | static inline void pci_swiotlb_init(void) |
| 17 | { | 23 | { |
| 18 | } | 24 | } |
| 25 | static inline void pci_swiotlb_late_init(void) | ||
| 26 | { | ||
| 27 | } | ||
| 19 | #endif | 28 | #endif |
| 20 | 29 | ||
| 21 | static inline void dma_mark_clean(void *addr, size_t size) {} | 30 | static inline void dma_mark_clean(void *addr, size_t size) {} |
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 7f3eba08e7de..169be8938b96 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
| @@ -172,6 +172,4 @@ static inline void flush_tlb_kernel_range(unsigned long start, | |||
| 172 | flush_tlb_all(); | 172 | flush_tlb_all(); |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | extern void zap_low_mappings(bool early); | ||
| 176 | |||
| 177 | #endif /* _ASM_X86_TLBFLUSH_H */ | 175 | #endif /* _ASM_X86_TLBFLUSH_H */ |
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h index 4dde797c0578..f4500fb3b485 100644 --- a/arch/x86/include/asm/trampoline.h +++ b/arch/x86/include/asm/trampoline.h | |||
| @@ -13,16 +13,13 @@ extern unsigned char *trampoline_base; | |||
| 13 | 13 | ||
| 14 | extern unsigned long init_rsp; | 14 | extern unsigned long init_rsp; |
| 15 | extern unsigned long initial_code; | 15 | extern unsigned long initial_code; |
| 16 | extern unsigned long initial_page_table; | ||
| 17 | extern unsigned long initial_gs; | 16 | extern unsigned long initial_gs; |
| 18 | 17 | ||
| 19 | #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) | 18 | #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) |
| 20 | 19 | ||
| 21 | extern unsigned long setup_trampoline(void); | 20 | extern unsigned long setup_trampoline(void); |
| 22 | extern void __init setup_trampoline_page_table(void); | ||
| 23 | extern void __init reserve_trampoline_memory(void); | 21 | extern void __init reserve_trampoline_memory(void); |
| 24 | #else | 22 | #else |
| 25 | static inline void setup_trampoline_page_table(void) {} | ||
| 26 | static inline void reserve_trampoline_memory(void) {} | 23 | static inline void reserve_trampoline_memory(void) {} |
| 27 | #endif /* CONFIG_X86_TRAMPOLINE */ | 24 | #endif /* CONFIG_X86_TRAMPOLINE */ |
| 28 | 25 | ||
diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h deleted file mode 100644 index 61e08c0a2907..000000000000 --- a/arch/x86/include/asm/vmi.h +++ /dev/null | |||
| @@ -1,269 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * VMI interface definition | ||
| 3 | * | ||
| 4 | * Copyright (C) 2005, VMware, Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, but | ||
| 12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
| 14 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
| 15 | * details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 20 | * | ||
| 21 | * Maintained by: Zachary Amsden zach@vmware.com | ||
| 22 | * | ||
| 23 | */ | ||
| 24 | #include <linux/types.h> | ||
| 25 | |||
| 26 | /* | ||
| 27 | *--------------------------------------------------------------------- | ||
| 28 | * | ||
| 29 | * VMI Option ROM API | ||
| 30 | * | ||
| 31 | *--------------------------------------------------------------------- | ||
| 32 | */ | ||
| 33 | #define VMI_SIGNATURE 0x696d5663 /* "cVmi" */ | ||
| 34 | |||
| 35 | #define PCI_VENDOR_ID_VMWARE 0x15AD | ||
| 36 | #define PCI_DEVICE_ID_VMWARE_VMI 0x0801 | ||
| 37 | |||
| 38 | /* | ||
| 39 | * We use two version numbers for compatibility, with the major | ||
| 40 | * number signifying interface breakages, and the minor number | ||
| 41 | * interface extensions. | ||
| 42 | */ | ||
| 43 | #define VMI_API_REV_MAJOR 3 | ||
| 44 | #define VMI_API_REV_MINOR 0 | ||
| 45 | |||
| 46 | #define VMI_CALL_CPUID 0 | ||
| 47 | #define VMI_CALL_WRMSR 1 | ||
| 48 | #define VMI_CALL_RDMSR 2 | ||
| 49 | #define VMI_CALL_SetGDT 3 | ||
| 50 | #define VMI_CALL_SetLDT 4 | ||
| 51 | #define VMI_CALL_SetIDT 5 | ||
| 52 | #define VMI_CALL_SetTR 6 | ||
| 53 | #define VMI_CALL_GetGDT 7 | ||
| 54 | #define VMI_CALL_GetLDT 8 | ||
| 55 | #define VMI_CALL_GetIDT 9 | ||
| 56 | #define VMI_CALL_GetTR 10 | ||
| 57 | #define VMI_CALL_WriteGDTEntry 11 | ||
| 58 | #define VMI_CALL_WriteLDTEntry 12 | ||
| 59 | #define VMI_CALL_WriteIDTEntry 13 | ||
| 60 | #define VMI_CALL_UpdateKernelStack 14 | ||
| 61 | #define VMI_CALL_SetCR0 15 | ||
| 62 | #define VMI_CALL_SetCR2 16 | ||
| 63 | #define VMI_CALL_SetCR3 17 | ||
| 64 | #define VMI_CALL_SetCR4 18 | ||
| 65 | #define VMI_CALL_GetCR0 19 | ||
| 66 | #define VMI_CALL_GetCR2 20 | ||
| 67 | #define VMI_CALL_GetCR3 21 | ||
| 68 | #define VMI_CALL_GetCR4 22 | ||
| 69 | #define VMI_CALL_WBINVD 23 | ||
| 70 | #define VMI_CALL_SetDR 24 | ||
| 71 | #define VMI_CALL_GetDR 25 | ||
| 72 | #define VMI_CALL_RDPMC 26 | ||
| 73 | #define VMI_CALL_RDTSC 27 | ||
| 74 | #define VMI_CALL_CLTS 28 | ||
| 75 | #define VMI_CALL_EnableInterrupts 29 | ||
| 76 | #define VMI_CALL_DisableInterrupts 30 | ||
| 77 | #define VMI_CALL_GetInterruptMask 31 | ||
| 78 | #define VMI_CALL_SetInterruptMask 32 | ||
| 79 | #define VMI_CALL_IRET 33 | ||
| 80 | #define VMI_CALL_SYSEXIT 34 | ||
| 81 | #define VMI_CALL_Halt 35 | ||
| 82 | #define VMI_CALL_Reboot 36 | ||
| 83 | #define VMI_CALL_Shutdown 37 | ||
| 84 | #define VMI_CALL_SetPxE 38 | ||
| 85 | #define VMI_CALL_SetPxELong 39 | ||
| 86 | #define VMI_CALL_UpdatePxE 40 | ||
| 87 | #define VMI_CALL_UpdatePxELong 41 | ||
| 88 | #define VMI_CALL_MachineToPhysical 42 | ||
| 89 | #define VMI_CALL_PhysicalToMachine 43 | ||
| 90 | #define VMI_CALL_AllocatePage 44 | ||
| 91 | #define VMI_CALL_ReleasePage 45 | ||
| 92 | #define VMI_CALL_InvalPage 46 | ||
| 93 | #define VMI_CALL_FlushTLB 47 | ||
| 94 | #define VMI_CALL_SetLinearMapping 48 | ||
| 95 | |||
| 96 | #define VMI_CALL_SetIOPLMask 61 | ||
| 97 | #define VMI_CALL_SetInitialAPState 62 | ||
| 98 | #define VMI_CALL_APICWrite 63 | ||
| 99 | #define VMI_CALL_APICRead 64 | ||
| 100 | #define VMI_CALL_IODelay 65 | ||
| 101 | #define VMI_CALL_SetLazyMode 73 | ||
| 102 | |||
| 103 | /* | ||
| 104 | *--------------------------------------------------------------------- | ||
| 105 | * | ||
| 106 | * MMU operation flags | ||
| 107 | * | ||
| 108 | *--------------------------------------------------------------------- | ||
| 109 | */ | ||
| 110 | |||
| 111 | /* Flags used by VMI_{Allocate|Release}Page call */ | ||
| 112 | #define VMI_PAGE_PAE 0x10 /* Allocate PAE shadow */ | ||
| 113 | #define VMI_PAGE_CLONE 0x20 /* Clone from another shadow */ | ||
| 114 | #define VMI_PAGE_ZEROED 0x40 /* Page is pre-zeroed */ | ||
| 115 | |||
| 116 | |||
| 117 | /* Flags shared by Allocate|Release Page and PTE updates */ | ||
| 118 | #define VMI_PAGE_PT 0x01 | ||
| 119 | #define VMI_PAGE_PD 0x02 | ||
| 120 | #define VMI_PAGE_PDP 0x04 | ||
| 121 | #define VMI_PAGE_PML4 0x08 | ||
| 122 | |||
| 123 | #define VMI_PAGE_NORMAL 0x00 /* for debugging */ | ||
| 124 | |||
| 125 | /* Flags used by PTE updates */ | ||
| 126 | #define VMI_PAGE_CURRENT_AS 0x10 /* implies VMI_PAGE_VA_MASK is valid */ | ||
| 127 | #define VMI_PAGE_DEFER 0x20 /* may queue update until TLB inval */ | ||
| 128 | #define VMI_PAGE_VA_MASK 0xfffff000 | ||
| 129 | |||
| 130 | #ifdef CONFIG_X86_PAE | ||
| 131 | #define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_PAE | VMI_PAGE_ZEROED) | ||
| 132 | #define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_PAE | VMI_PAGE_ZEROED) | ||
| 133 | #else | ||
| 134 | #define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_ZEROED) | ||
| 135 | #define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_ZEROED) | ||
| 136 | #endif | ||
| 137 | |||
| 138 | /* Flags used by VMI_FlushTLB call */ | ||
| 139 | #define VMI_FLUSH_TLB 0x01 | ||
| 140 | #define VMI_FLUSH_GLOBAL 0x02 | ||
| 141 | |||
| 142 | /* | ||
| 143 | *--------------------------------------------------------------------- | ||
| 144 | * | ||
| 145 | * VMI relocation definitions for ROM call get_reloc | ||
| 146 | * | ||
| 147 | *--------------------------------------------------------------------- | ||
| 148 | */ | ||
| 149 | |||
| 150 | /* VMI Relocation types */ | ||
| 151 | #define VMI_RELOCATION_NONE 0 | ||
| 152 | #define VMI_RELOCATION_CALL_REL 1 | ||
| 153 | #define VMI_RELOCATION_JUMP_REL 2 | ||
| 154 | #define VMI_RELOCATION_NOP 3 | ||
| 155 | |||
| 156 | #ifndef __ASSEMBLY__ | ||
| 157 | struct vmi_relocation_info { | ||
| 158 | unsigned char *eip; | ||
| 159 | unsigned char type; | ||
| 160 | unsigned char reserved[3]; | ||
| 161 | }; | ||
| 162 | #endif | ||
| 163 | |||
| 164 | |||
| 165 | /* | ||
| 166 | *--------------------------------------------------------------------- | ||
| 167 | * | ||
| 168 | * Generic ROM structures and definitions | ||
| 169 | * | ||
| 170 | *--------------------------------------------------------------------- | ||
| 171 | */ | ||
| 172 | |||
| 173 | #ifndef __ASSEMBLY__ | ||
| 174 | |||
| 175 | struct vrom_header { | ||
| 176 | u16 rom_signature; /* option ROM signature */ | ||
| 177 | u8 rom_length; /* ROM length in 512 byte chunks */ | ||
| 178 | u8 rom_entry[4]; /* 16-bit code entry point */ | ||
| 179 | u8 rom_pad0; /* 4-byte align pad */ | ||
| 180 | u32 vrom_signature; /* VROM identification signature */ | ||
| 181 | u8 api_version_min;/* Minor version of API */ | ||
| 182 | u8 api_version_maj;/* Major version of API */ | ||
| 183 | u8 jump_slots; /* Number of jump slots */ | ||
| 184 | u8 reserved1; /* Reserved for expansion */ | ||
| 185 | u32 virtual_top; /* Hypervisor virtual address start */ | ||
| 186 | u16 reserved2; /* Reserved for expansion */ | ||
| 187 | u16 license_offs; /* Offset to License string */ | ||
| 188 | u16 pci_header_offs;/* Offset to PCI OPROM header */ | ||
| 189 | u16 pnp_header_offs;/* Offset to PnP OPROM header */ | ||
| 190 | u32 rom_pad3; /* PnP reserverd / VMI reserved */ | ||
| 191 | u8 reserved[96]; /* Reserved for headers */ | ||
| 192 | char vmi_init[8]; /* VMI_Init jump point */ | ||
| 193 | char get_reloc[8]; /* VMI_GetRelocationInfo jump point */ | ||
| 194 | } __attribute__((packed)); | ||
| 195 | |||
| 196 | struct pnp_header { | ||
| 197 | char sig[4]; | ||
| 198 | char rev; | ||
| 199 | char size; | ||
| 200 | short next; | ||
| 201 | short res; | ||
| 202 | long devID; | ||
| 203 | unsigned short manufacturer_offset; | ||
| 204 | unsigned short product_offset; | ||
| 205 | } __attribute__((packed)); | ||
| 206 | |||
| 207 | struct pci_header { | ||
| 208 | char sig[4]; | ||
| 209 | short vendorID; | ||
| 210 | short deviceID; | ||
| 211 | short vpdData; | ||
| 212 | short size; | ||
| 213 | char rev; | ||
| 214 | char class; | ||
| 215 | char subclass; | ||
| 216 | char interface; | ||
| 217 | short chunks; | ||
| 218 | char rom_version_min; | ||
| 219 | char rom_version_maj; | ||
| 220 | char codetype; | ||
| 221 | char lastRom; | ||
| 222 | short reserved; | ||
| 223 | } __attribute__((packed)); | ||
| 224 | |||
| 225 | /* Function prototypes for bootstrapping */ | ||
| 226 | #ifdef CONFIG_VMI | ||
| 227 | extern void vmi_init(void); | ||
| 228 | extern void vmi_activate(void); | ||
| 229 | extern void vmi_bringup(void); | ||
| 230 | #else | ||
| 231 | static inline void vmi_init(void) {} | ||
| 232 | static inline void vmi_activate(void) {} | ||
| 233 | static inline void vmi_bringup(void) {} | ||
| 234 | #endif | ||
| 235 | |||
| 236 | /* State needed to start an application processor in an SMP system. */ | ||
| 237 | struct vmi_ap_state { | ||
| 238 | u32 cr0; | ||
| 239 | u32 cr2; | ||
| 240 | u32 cr3; | ||
| 241 | u32 cr4; | ||
| 242 | |||
| 243 | u64 efer; | ||
| 244 | |||
| 245 | u32 eip; | ||
| 246 | u32 eflags; | ||
| 247 | u32 eax; | ||
| 248 | u32 ebx; | ||
| 249 | u32 ecx; | ||
| 250 | u32 edx; | ||
| 251 | u32 esp; | ||
| 252 | u32 ebp; | ||
| 253 | u32 esi; | ||
| 254 | u32 edi; | ||
| 255 | u16 cs; | ||
| 256 | u16 ss; | ||
| 257 | u16 ds; | ||
| 258 | u16 es; | ||
| 259 | u16 fs; | ||
| 260 | u16 gs; | ||
| 261 | u16 ldtr; | ||
| 262 | |||
| 263 | u16 gdtr_limit; | ||
| 264 | u32 gdtr_base; | ||
| 265 | u32 idtr_base; | ||
| 266 | u16 idtr_limit; | ||
| 267 | }; | ||
| 268 | |||
| 269 | #endif | ||
diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h deleted file mode 100644 index c6e0bee93e3c..000000000000 --- a/arch/x86/include/asm/vmi_time.h +++ /dev/null | |||
| @@ -1,98 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * VMI Time wrappers | ||
| 3 | * | ||
| 4 | * Copyright (C) 2006, VMware, Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, but | ||
| 12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
| 14 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
| 15 | * details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 20 | * | ||
| 21 | * Send feedback to dhecht@vmware.com | ||
| 22 | * | ||
| 23 | */ | ||
| 24 | |||
| 25 | #ifndef _ASM_X86_VMI_TIME_H | ||
| 26 | #define _ASM_X86_VMI_TIME_H | ||
| 27 | |||
| 28 | /* | ||
| 29 | * Raw VMI call indices for timer functions | ||
| 30 | */ | ||
| 31 | #define VMI_CALL_GetCycleFrequency 66 | ||
| 32 | #define VMI_CALL_GetCycleCounter 67 | ||
| 33 | #define VMI_CALL_SetAlarm 68 | ||
| 34 | #define VMI_CALL_CancelAlarm 69 | ||
| 35 | #define VMI_CALL_GetWallclockTime 70 | ||
| 36 | #define VMI_CALL_WallclockUpdated 71 | ||
| 37 | |||
| 38 | /* Cached VMI timer operations */ | ||
| 39 | extern struct vmi_timer_ops { | ||
| 40 | u64 (*get_cycle_frequency)(void); | ||
| 41 | u64 (*get_cycle_counter)(int); | ||
| 42 | u64 (*get_wallclock)(void); | ||
| 43 | int (*wallclock_updated)(void); | ||
| 44 | void (*set_alarm)(u32 flags, u64 expiry, u64 period); | ||
| 45 | void (*cancel_alarm)(u32 flags); | ||
| 46 | } vmi_timer_ops; | ||
| 47 | |||
| 48 | /* Prototypes */ | ||
| 49 | extern void __init vmi_time_init(void); | ||
| 50 | extern unsigned long vmi_get_wallclock(void); | ||
| 51 | extern int vmi_set_wallclock(unsigned long now); | ||
| 52 | extern unsigned long long vmi_sched_clock(void); | ||
| 53 | extern unsigned long vmi_tsc_khz(void); | ||
| 54 | |||
| 55 | #ifdef CONFIG_X86_LOCAL_APIC | ||
| 56 | extern void __devinit vmi_time_bsp_init(void); | ||
| 57 | extern void __devinit vmi_time_ap_init(void); | ||
| 58 | #endif | ||
| 59 | |||
| 60 | /* | ||
| 61 | * When run under a hypervisor, a vcpu is always in one of three states: | ||
| 62 | * running, halted, or ready. The vcpu is in the 'running' state if it | ||
| 63 | * is executing. When the vcpu executes the halt interface, the vcpu | ||
| 64 | * enters the 'halted' state and remains halted until there is some work | ||
| 65 | * pending for the vcpu (e.g. an alarm expires, host I/O completes on | ||
| 66 | * behalf of virtual I/O). At this point, the vcpu enters the 'ready' | ||
| 67 | * state (waiting for the hypervisor to reschedule it). Finally, at any | ||
| 68 | * time when the vcpu is not in the 'running' state nor the 'halted' | ||
| 69 | * state, it is in the 'ready' state. | ||
| 70 | * | ||
| 71 | * Real time is advances while the vcpu is 'running', 'ready', or | ||
| 72 | * 'halted'. Stolen time is the time in which the vcpu is in the | ||
| 73 | * 'ready' state. Available time is the remaining time -- the vcpu is | ||
| 74 | * either 'running' or 'halted'. | ||
| 75 | * | ||
| 76 | * All three views of time are accessible through the VMI cycle | ||
| 77 | * counters. | ||
| 78 | */ | ||
| 79 | |||
| 80 | /* The cycle counters. */ | ||
| 81 | #define VMI_CYCLES_REAL 0 | ||
| 82 | #define VMI_CYCLES_AVAILABLE 1 | ||
| 83 | #define VMI_CYCLES_STOLEN 2 | ||
| 84 | |||
| 85 | /* The alarm interface 'flags' bits */ | ||
| 86 | #define VMI_ALARM_COUNTERS 2 | ||
| 87 | |||
| 88 | #define VMI_ALARM_COUNTER_MASK 0x000000ff | ||
| 89 | |||
| 90 | #define VMI_ALARM_WIRED_IRQ0 0x00000000 | ||
| 91 | #define VMI_ALARM_WIRED_LVTT 0x00010000 | ||
| 92 | |||
| 93 | #define VMI_ALARM_IS_ONESHOT 0x00000000 | ||
| 94 | #define VMI_ALARM_IS_PERIODIC 0x00000100 | ||
| 95 | |||
| 96 | #define CONFIG_VMI_ALARM_HZ 100 | ||
| 97 | |||
| 98 | #endif /* _ASM_X86_VMI_TIME_H */ | ||
