diff options
Diffstat (limited to 'arch/x86/include')
30 files changed, 589 insertions, 222 deletions
diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h index 0d9ec770f2f8..e6a92455740e 100644 --- a/arch/x86/include/asm/archrandom.h +++ b/arch/x86/include/asm/archrandom.h | |||
@@ -39,6 +39,20 @@ | |||
39 | 39 | ||
40 | #ifdef CONFIG_ARCH_RANDOM | 40 | #ifdef CONFIG_ARCH_RANDOM |
41 | 41 | ||
42 | /* Instead of arch_get_random_long() when alternatives haven't run. */ | ||
43 | static inline int rdrand_long(unsigned long *v) | ||
44 | { | ||
45 | int ok; | ||
46 | asm volatile("1: " RDRAND_LONG "\n\t" | ||
47 | "jc 2f\n\t" | ||
48 | "decl %0\n\t" | ||
49 | "jnz 1b\n\t" | ||
50 | "2:" | ||
51 | : "=r" (ok), "=a" (*v) | ||
52 | : "0" (RDRAND_RETRY_LOOPS)); | ||
53 | return ok; | ||
54 | } | ||
55 | |||
42 | #define GET_RANDOM(name, type, rdrand, nop) \ | 56 | #define GET_RANDOM(name, type, rdrand, nop) \ |
43 | static inline int name(type *v) \ | 57 | static inline int name(type *v) \ |
44 | { \ | 58 | { \ |
@@ -68,6 +82,13 @@ GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3); | |||
68 | 82 | ||
69 | #endif /* CONFIG_X86_64 */ | 83 | #endif /* CONFIG_X86_64 */ |
70 | 84 | ||
85 | #else | ||
86 | |||
87 | static inline int rdrand_long(unsigned long *v) | ||
88 | { | ||
89 | return 0; | ||
90 | } | ||
91 | |||
71 | #endif /* CONFIG_ARCH_RANDOM */ | 92 | #endif /* CONFIG_ARCH_RANDOM */ |
72 | 93 | ||
73 | extern void x86_init_rdrand(struct cpuinfo_x86 *c); | 94 | extern void x86_init_rdrand(struct cpuinfo_x86 *c); |
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index c6cd358a1eec..04a48903b2eb 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h | |||
@@ -92,12 +92,53 @@ | |||
92 | #endif | 92 | #endif |
93 | #define smp_read_barrier_depends() read_barrier_depends() | 93 | #define smp_read_barrier_depends() read_barrier_depends() |
94 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | 94 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
95 | #else | 95 | #else /* !SMP */ |
96 | #define smp_mb() barrier() | 96 | #define smp_mb() barrier() |
97 | #define smp_rmb() barrier() | 97 | #define smp_rmb() barrier() |
98 | #define smp_wmb() barrier() | 98 | #define smp_wmb() barrier() |
99 | #define smp_read_barrier_depends() do { } while (0) | 99 | #define smp_read_barrier_depends() do { } while (0) |
100 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | 100 | #define set_mb(var, value) do { var = value; barrier(); } while (0) |
101 | #endif /* SMP */ | ||
102 | |||
103 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) | ||
104 | |||
105 | /* | ||
106 | * For either of these options x86 doesn't have a strong TSO memory | ||
107 | * model and we should fall back to full barriers. | ||
108 | */ | ||
109 | |||
110 | #define smp_store_release(p, v) \ | ||
111 | do { \ | ||
112 | compiletime_assert_atomic_type(*p); \ | ||
113 | smp_mb(); \ | ||
114 | ACCESS_ONCE(*p) = (v); \ | ||
115 | } while (0) | ||
116 | |||
117 | #define smp_load_acquire(p) \ | ||
118 | ({ \ | ||
119 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | ||
120 | compiletime_assert_atomic_type(*p); \ | ||
121 | smp_mb(); \ | ||
122 | ___p1; \ | ||
123 | }) | ||
124 | |||
125 | #else /* regular x86 TSO memory ordering */ | ||
126 | |||
127 | #define smp_store_release(p, v) \ | ||
128 | do { \ | ||
129 | compiletime_assert_atomic_type(*p); \ | ||
130 | barrier(); \ | ||
131 | ACCESS_ONCE(*p) = (v); \ | ||
132 | } while (0) | ||
133 | |||
134 | #define smp_load_acquire(p) \ | ||
135 | ({ \ | ||
136 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | ||
137 | compiletime_assert_atomic_type(*p); \ | ||
138 | barrier(); \ | ||
139 | ___p1; \ | ||
140 | }) | ||
141 | |||
101 | #endif | 142 | #endif |
102 | 143 | ||
103 | /* | 144 | /* |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 89270b4318db..e099f9502ace 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -216,6 +216,7 @@ | |||
216 | #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ | 216 | #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ |
217 | #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ | 217 | #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ |
218 | #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ | 218 | #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ |
219 | #define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */ | ||
219 | #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ | 220 | #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ |
220 | #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ | 221 | #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ |
221 | #define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ | 222 | #define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 65c6e6e3a552..3b978c472d08 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -1,6 +1,24 @@ | |||
1 | #ifndef _ASM_X86_EFI_H | 1 | #ifndef _ASM_X86_EFI_H |
2 | #define _ASM_X86_EFI_H | 2 | #define _ASM_X86_EFI_H |
3 | 3 | ||
4 | /* | ||
5 | * We map the EFI regions needed for runtime services non-contiguously, | ||
6 | * with preserved alignment on virtual addresses starting from -4G down | ||
7 | * for a total max space of 64G. This way, we provide for stable runtime | ||
8 | * services addresses across kernels so that a kexec'd kernel can still | ||
9 | * use them. | ||
10 | * | ||
11 | * This is the main reason why we're doing stable VA mappings for RT | ||
12 | * services. | ||
13 | * | ||
14 | * This flag is used in conjuction with a chicken bit called | ||
15 | * "efi=old_map" which can be used as a fallback to the old runtime | ||
16 | * services mapping method in case there's some b0rkage with a | ||
17 | * particular EFI implementation (haha, it is hard to hold up the | ||
18 | * sarcasm here...). | ||
19 | */ | ||
20 | #define EFI_OLD_MEMMAP EFI_ARCH_1 | ||
21 | |||
4 | #ifdef CONFIG_X86_32 | 22 | #ifdef CONFIG_X86_32 |
5 | 23 | ||
6 | #define EFI_LOADER_SIGNATURE "EL32" | 24 | #define EFI_LOADER_SIGNATURE "EL32" |
@@ -69,24 +87,31 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, | |||
69 | efi_call6((f), (u64)(a1), (u64)(a2), (u64)(a3), \ | 87 | efi_call6((f), (u64)(a1), (u64)(a2), (u64)(a3), \ |
70 | (u64)(a4), (u64)(a5), (u64)(a6)) | 88 | (u64)(a4), (u64)(a5), (u64)(a6)) |
71 | 89 | ||
90 | #define _efi_call_virtX(x, f, ...) \ | ||
91 | ({ \ | ||
92 | efi_status_t __s; \ | ||
93 | \ | ||
94 | efi_sync_low_kernel_mappings(); \ | ||
95 | preempt_disable(); \ | ||
96 | __s = efi_call##x((void *)efi.systab->runtime->f, __VA_ARGS__); \ | ||
97 | preempt_enable(); \ | ||
98 | __s; \ | ||
99 | }) | ||
100 | |||
72 | #define efi_call_virt0(f) \ | 101 | #define efi_call_virt0(f) \ |
73 | efi_call0((efi.systab->runtime->f)) | 102 | _efi_call_virtX(0, f) |
74 | #define efi_call_virt1(f, a1) \ | 103 | #define efi_call_virt1(f, a1) \ |
75 | efi_call1((efi.systab->runtime->f), (u64)(a1)) | 104 | _efi_call_virtX(1, f, (u64)(a1)) |
76 | #define efi_call_virt2(f, a1, a2) \ | 105 | #define efi_call_virt2(f, a1, a2) \ |
77 | efi_call2((efi.systab->runtime->f), (u64)(a1), (u64)(a2)) | 106 | _efi_call_virtX(2, f, (u64)(a1), (u64)(a2)) |
78 | #define efi_call_virt3(f, a1, a2, a3) \ | 107 | #define efi_call_virt3(f, a1, a2, a3) \ |
79 | efi_call3((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ | 108 | _efi_call_virtX(3, f, (u64)(a1), (u64)(a2), (u64)(a3)) |
80 | (u64)(a3)) | 109 | #define efi_call_virt4(f, a1, a2, a3, a4) \ |
81 | #define efi_call_virt4(f, a1, a2, a3, a4) \ | 110 | _efi_call_virtX(4, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4)) |
82 | efi_call4((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ | 111 | #define efi_call_virt5(f, a1, a2, a3, a4, a5) \ |
83 | (u64)(a3), (u64)(a4)) | 112 | _efi_call_virtX(5, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5)) |
84 | #define efi_call_virt5(f, a1, a2, a3, a4, a5) \ | 113 | #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ |
85 | efi_call5((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ | 114 | _efi_call_virtX(6, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) |
86 | (u64)(a3), (u64)(a4), (u64)(a5)) | ||
87 | #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ | ||
88 | efi_call6((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ | ||
89 | (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) | ||
90 | 115 | ||
91 | extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, | 116 | extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, |
92 | u32 type, u64 attribute); | 117 | u32 type, u64 attribute); |
@@ -95,12 +120,28 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, | |||
95 | 120 | ||
96 | extern int add_efi_memmap; | 121 | extern int add_efi_memmap; |
97 | extern unsigned long x86_efi_facility; | 122 | extern unsigned long x86_efi_facility; |
123 | extern struct efi_scratch efi_scratch; | ||
98 | extern void efi_set_executable(efi_memory_desc_t *md, bool executable); | 124 | extern void efi_set_executable(efi_memory_desc_t *md, bool executable); |
99 | extern int efi_memblock_x86_reserve_range(void); | 125 | extern int efi_memblock_x86_reserve_range(void); |
100 | extern void efi_call_phys_prelog(void); | 126 | extern void efi_call_phys_prelog(void); |
101 | extern void efi_call_phys_epilog(void); | 127 | extern void efi_call_phys_epilog(void); |
102 | extern void efi_unmap_memmap(void); | 128 | extern void efi_unmap_memmap(void); |
103 | extern void efi_memory_uc(u64 addr, unsigned long size); | 129 | extern void efi_memory_uc(u64 addr, unsigned long size); |
130 | extern void __init efi_map_region(efi_memory_desc_t *md); | ||
131 | extern void __init efi_map_region_fixed(efi_memory_desc_t *md); | ||
132 | extern void efi_sync_low_kernel_mappings(void); | ||
133 | extern void efi_setup_page_tables(void); | ||
134 | extern void __init old_map_region(efi_memory_desc_t *md); | ||
135 | |||
136 | struct efi_setup_data { | ||
137 | u64 fw_vendor; | ||
138 | u64 runtime; | ||
139 | u64 tables; | ||
140 | u64 smbios; | ||
141 | u64 reserved[8]; | ||
142 | }; | ||
143 | |||
144 | extern u64 efi_setup; | ||
104 | 145 | ||
105 | #ifdef CONFIG_EFI | 146 | #ifdef CONFIG_EFI |
106 | 147 | ||
@@ -110,7 +151,7 @@ static inline bool efi_is_native(void) | |||
110 | } | 151 | } |
111 | 152 | ||
112 | extern struct console early_efi_console; | 153 | extern struct console early_efi_console; |
113 | 154 | extern void parse_efi_setup(u64 phys_addr, u32 data_len); | |
114 | #else | 155 | #else |
115 | /* | 156 | /* |
116 | * IF EFI is not configured, have the EFI calls return -ENOSYS. | 157 | * IF EFI is not configured, have the EFI calls return -ENOSYS. |
@@ -122,6 +163,7 @@ extern struct console early_efi_console; | |||
122 | #define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS) | 163 | #define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS) |
123 | #define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS) | 164 | #define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS) |
124 | #define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS) | 165 | #define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS) |
166 | static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} | ||
125 | #endif /* CONFIG_EFI */ | 167 | #endif /* CONFIG_EFI */ |
126 | 168 | ||
127 | #endif /* _ASM_X86_EFI_H */ | 169 | #endif /* _ASM_X86_EFI_H */ |
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index be27ba1e947a..b4c1f5453436 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h | |||
@@ -110,26 +110,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |||
110 | static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | 110 | static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
111 | u32 oldval, u32 newval) | 111 | u32 oldval, u32 newval) |
112 | { | 112 | { |
113 | int ret = 0; | 113 | return user_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval); |
114 | |||
115 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | ||
116 | return -EFAULT; | ||
117 | |||
118 | asm volatile("\t" ASM_STAC "\n" | ||
119 | "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" | ||
120 | "2:\t" ASM_CLAC "\n" | ||
121 | "\t.section .fixup, \"ax\"\n" | ||
122 | "3:\tmov %3, %0\n" | ||
123 | "\tjmp 2b\n" | ||
124 | "\t.previous\n" | ||
125 | _ASM_EXTABLE(1b, 3b) | ||
126 | : "+r" (ret), "=a" (oldval), "+m" (*uaddr) | ||
127 | : "i" (-EFAULT), "r" (newval), "1" (oldval) | ||
128 | : "memory" | ||
129 | ); | ||
130 | |||
131 | *uval = oldval; | ||
132 | return ret; | ||
133 | } | 114 | } |
134 | 115 | ||
135 | #endif | 116 | #endif |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index cba45d99ac1a..67d69b8e2d20 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -191,6 +191,9 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void); | |||
191 | #define trace_interrupt interrupt | 191 | #define trace_interrupt interrupt |
192 | #endif | 192 | #endif |
193 | 193 | ||
194 | #define VECTOR_UNDEFINED -1 | ||
195 | #define VECTOR_RETRIGGERED -2 | ||
196 | |||
194 | typedef int vector_irq_t[NR_VECTORS]; | 197 | typedef int vector_irq_t[NR_VECTORS]; |
195 | DECLARE_PER_CPU(vector_irq_t, vector_irq); | 198 | DECLARE_PER_CPU(vector_irq_t, vector_irq); |
196 | extern void setup_vector_irq(int cpu); | 199 | extern void setup_vector_irq(int cpu); |
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h index 459769d39263..e34e097b6f9d 100644 --- a/arch/x86/include/asm/intel-mid.h +++ b/arch/x86/include/asm/intel-mid.h | |||
@@ -51,10 +51,41 @@ struct devs_id { | |||
51 | enum intel_mid_cpu_type { | 51 | enum intel_mid_cpu_type { |
52 | /* 1 was Moorestown */ | 52 | /* 1 was Moorestown */ |
53 | INTEL_MID_CPU_CHIP_PENWELL = 2, | 53 | INTEL_MID_CPU_CHIP_PENWELL = 2, |
54 | INTEL_MID_CPU_CHIP_CLOVERVIEW, | ||
55 | INTEL_MID_CPU_CHIP_TANGIER, | ||
54 | }; | 56 | }; |
55 | 57 | ||
56 | extern enum intel_mid_cpu_type __intel_mid_cpu_chip; | 58 | extern enum intel_mid_cpu_type __intel_mid_cpu_chip; |
57 | 59 | ||
60 | /** | ||
61 | * struct intel_mid_ops - Interface between intel-mid & sub archs | ||
62 | * @arch_setup: arch_setup function to re-initialize platform | ||
63 | * structures (x86_init, x86_platform_init) | ||
64 | * | ||
65 | * This structure can be extended if any new interface is required | ||
66 | * between intel-mid & its sub arch files. | ||
67 | */ | ||
68 | struct intel_mid_ops { | ||
69 | void (*arch_setup)(void); | ||
70 | }; | ||
71 | |||
72 | /* Helper API's for INTEL_MID_OPS_INIT */ | ||
73 | #define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \ | ||
74 | [cpuid] = get_##cpuname##_ops | ||
75 | |||
76 | /* Maximum number of CPU ops */ | ||
77 | #define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *)) | ||
78 | |||
79 | /* | ||
80 | * For every new cpu addition, a weak get_<cpuname>_ops() function needs be | ||
81 | * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h. | ||
82 | */ | ||
83 | #define INTEL_MID_OPS_INIT {\ | ||
84 | DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \ | ||
85 | DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \ | ||
86 | DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \ | ||
87 | }; | ||
88 | |||
58 | #ifdef CONFIG_X86_INTEL_MID | 89 | #ifdef CONFIG_X86_INTEL_MID |
59 | 90 | ||
60 | static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void) | 91 | static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void) |
@@ -86,8 +117,21 @@ extern enum intel_mid_timer_options intel_mid_timer_options; | |||
86 | * Penwell uses spread spectrum clock, so the freq number is not exactly | 117 | * Penwell uses spread spectrum clock, so the freq number is not exactly |
87 | * the same as reported by MSR based on SDM. | 118 | * the same as reported by MSR based on SDM. |
88 | */ | 119 | */ |
89 | #define PENWELL_FSB_FREQ_83SKU 83200 | 120 | #define FSB_FREQ_83SKU 83200 |
90 | #define PENWELL_FSB_FREQ_100SKU 99840 | 121 | #define FSB_FREQ_100SKU 99840 |
122 | #define FSB_FREQ_133SKU 133000 | ||
123 | |||
124 | #define FSB_FREQ_167SKU 167000 | ||
125 | #define FSB_FREQ_200SKU 200000 | ||
126 | #define FSB_FREQ_267SKU 267000 | ||
127 | #define FSB_FREQ_333SKU 333000 | ||
128 | #define FSB_FREQ_400SKU 400000 | ||
129 | |||
130 | /* Bus Select SoC Fuse value */ | ||
131 | #define BSEL_SOC_FUSE_MASK 0x7 | ||
132 | #define BSEL_SOC_FUSE_001 0x1 /* FSB 133MHz */ | ||
133 | #define BSEL_SOC_FUSE_101 0x5 /* FSB 100MHz */ | ||
134 | #define BSEL_SOC_FUSE_111 0x7 /* FSB 83MHz */ | ||
91 | 135 | ||
92 | #define SFI_MTMR_MAX_NUM 8 | 136 | #define SFI_MTMR_MAX_NUM 8 |
93 | #define SFI_MRTC_MAX 8 | 137 | #define SFI_MRTC_MAX 8 |
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h new file mode 100644 index 000000000000..8e71c7941767 --- /dev/null +++ b/arch/x86/include/asm/iosf_mbi.h | |||
@@ -0,0 +1,90 @@ | |||
1 | /* | ||
2 | * iosf_mbi.h: Intel OnChip System Fabric MailBox access support | ||
3 | */ | ||
4 | |||
5 | #ifndef IOSF_MBI_SYMS_H | ||
6 | #define IOSF_MBI_SYMS_H | ||
7 | |||
8 | #define MBI_MCR_OFFSET 0xD0 | ||
9 | #define MBI_MDR_OFFSET 0xD4 | ||
10 | #define MBI_MCRX_OFFSET 0xD8 | ||
11 | |||
12 | #define MBI_RD_MASK 0xFEFFFFFF | ||
13 | #define MBI_WR_MASK 0X01000000 | ||
14 | |||
15 | #define MBI_MASK_HI 0xFFFFFF00 | ||
16 | #define MBI_MASK_LO 0x000000FF | ||
17 | #define MBI_ENABLE 0xF0 | ||
18 | |||
19 | /* Baytrail available units */ | ||
20 | #define BT_MBI_UNIT_AUNIT 0x00 | ||
21 | #define BT_MBI_UNIT_SMC 0x01 | ||
22 | #define BT_MBI_UNIT_CPU 0x02 | ||
23 | #define BT_MBI_UNIT_BUNIT 0x03 | ||
24 | #define BT_MBI_UNIT_PMC 0x04 | ||
25 | #define BT_MBI_UNIT_GFX 0x06 | ||
26 | #define BT_MBI_UNIT_SMI 0x0C | ||
27 | #define BT_MBI_UNIT_USB 0x43 | ||
28 | #define BT_MBI_UNIT_SATA 0xA3 | ||
29 | #define BT_MBI_UNIT_PCIE 0xA6 | ||
30 | |||
31 | /* Baytrail read/write opcodes */ | ||
32 | #define BT_MBI_AUNIT_READ 0x10 | ||
33 | #define BT_MBI_AUNIT_WRITE 0x11 | ||
34 | #define BT_MBI_SMC_READ 0x10 | ||
35 | #define BT_MBI_SMC_WRITE 0x11 | ||
36 | #define BT_MBI_CPU_READ 0x10 | ||
37 | #define BT_MBI_CPU_WRITE 0x11 | ||
38 | #define BT_MBI_BUNIT_READ 0x10 | ||
39 | #define BT_MBI_BUNIT_WRITE 0x11 | ||
40 | #define BT_MBI_PMC_READ 0x06 | ||
41 | #define BT_MBI_PMC_WRITE 0x07 | ||
42 | #define BT_MBI_GFX_READ 0x00 | ||
43 | #define BT_MBI_GFX_WRITE 0x01 | ||
44 | #define BT_MBI_SMIO_READ 0x06 | ||
45 | #define BT_MBI_SMIO_WRITE 0x07 | ||
46 | #define BT_MBI_USB_READ 0x06 | ||
47 | #define BT_MBI_USB_WRITE 0x07 | ||
48 | #define BT_MBI_SATA_READ 0x00 | ||
49 | #define BT_MBI_SATA_WRITE 0x01 | ||
50 | #define BT_MBI_PCIE_READ 0x00 | ||
51 | #define BT_MBI_PCIE_WRITE 0x01 | ||
52 | |||
53 | /** | ||
54 | * iosf_mbi_read() - MailBox Interface read command | ||
55 | * @port: port indicating subunit being accessed | ||
56 | * @opcode: port specific read or write opcode | ||
57 | * @offset: register address offset | ||
58 | * @mdr: register data to be read | ||
59 | * | ||
60 | * Locking is handled by spinlock - cannot sleep. | ||
61 | * Return: Nonzero on error | ||
62 | */ | ||
63 | int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr); | ||
64 | |||
65 | /** | ||
66 | * iosf_mbi_write() - MailBox unmasked write command | ||
67 | * @port: port indicating subunit being accessed | ||
68 | * @opcode: port specific read or write opcode | ||
69 | * @offset: register address offset | ||
70 | * @mdr: register data to be written | ||
71 | * | ||
72 | * Locking is handled by spinlock - cannot sleep. | ||
73 | * Return: Nonzero on error | ||
74 | */ | ||
75 | int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr); | ||
76 | |||
77 | /** | ||
78 | * iosf_mbi_modify() - MailBox masked write command | ||
79 | * @port: port indicating subunit being accessed | ||
80 | * @opcode: port specific read or write opcode | ||
81 | * @offset: register address offset | ||
82 | * @mdr: register data being modified | ||
83 | * @mask: mask indicating bits in mdr to be modified | ||
84 | * | ||
85 | * Locking is handled by spinlock - cannot sleep. | ||
86 | * Return: Nonzero on error | ||
87 | */ | ||
88 | int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask); | ||
89 | |||
90 | #endif /* IOSF_MBI_SYMS_H */ | ||
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index c696a8687567..6e4ce2df87cf 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -118,7 +118,6 @@ extern void mce_register_decode_chain(struct notifier_block *nb); | |||
118 | extern void mce_unregister_decode_chain(struct notifier_block *nb); | 118 | extern void mce_unregister_decode_chain(struct notifier_block *nb); |
119 | 119 | ||
120 | #include <linux/percpu.h> | 120 | #include <linux/percpu.h> |
121 | #include <linux/init.h> | ||
122 | #include <linux/atomic.h> | 121 | #include <linux/atomic.h> |
123 | 122 | ||
124 | extern int mce_p5_enabled; | 123 | extern int mce_p5_enabled; |
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index f98bd6625318..b59827e76529 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
@@ -1,6 +1,21 @@ | |||
1 | #ifndef _ASM_X86_MICROCODE_H | 1 | #ifndef _ASM_X86_MICROCODE_H |
2 | #define _ASM_X86_MICROCODE_H | 2 | #define _ASM_X86_MICROCODE_H |
3 | 3 | ||
4 | #define native_rdmsr(msr, val1, val2) \ | ||
5 | do { \ | ||
6 | u64 __val = native_read_msr((msr)); \ | ||
7 | (void)((val1) = (u32)__val); \ | ||
8 | (void)((val2) = (u32)(__val >> 32)); \ | ||
9 | } while (0) | ||
10 | |||
11 | #define native_wrmsr(msr, low, high) \ | ||
12 | native_write_msr(msr, low, high) | ||
13 | |||
14 | #define native_wrmsrl(msr, val) \ | ||
15 | native_write_msr((msr), \ | ||
16 | (u32)((u64)(val)), \ | ||
17 | (u32)((u64)(val) >> 32)) | ||
18 | |||
4 | struct cpu_signature { | 19 | struct cpu_signature { |
5 | unsigned int sig; | 20 | unsigned int sig; |
6 | unsigned int pf; | 21 | unsigned int pf; |
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 4c019179a57d..b7b10b82d3e5 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h | |||
@@ -61,11 +61,10 @@ extern int __apply_microcode_amd(struct microcode_amd *mc_amd); | |||
61 | extern int apply_microcode_amd(int cpu); | 61 | extern int apply_microcode_amd(int cpu); |
62 | extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); | 62 | extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); |
63 | 63 | ||
64 | #define PATCH_MAX_SIZE PAGE_SIZE | ||
65 | extern u8 amd_ucode_patch[PATCH_MAX_SIZE]; | ||
66 | |||
64 | #ifdef CONFIG_MICROCODE_AMD_EARLY | 67 | #ifdef CONFIG_MICROCODE_AMD_EARLY |
65 | #ifdef CONFIG_X86_32 | ||
66 | #define MPB_MAX_SIZE PAGE_SIZE | ||
67 | extern u8 amd_bsp_mpb[MPB_MAX_SIZE]; | ||
68 | #endif | ||
69 | extern void __init load_ucode_amd_bsp(void); | 68 | extern void __init load_ucode_amd_bsp(void); |
70 | extern void load_ucode_amd_ap(void); | 69 | extern void load_ucode_amd_ap(void); |
71 | extern int __init save_microcode_in_initrd_amd(void); | 70 | extern int __init save_microcode_in_initrd_amd(void); |
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 3142a94c7b4b..3e6b4920ef5d 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef _ASM_X86_MPSPEC_H | 1 | #ifndef _ASM_X86_MPSPEC_H |
2 | #define _ASM_X86_MPSPEC_H | 2 | #define _ASM_X86_MPSPEC_H |
3 | 3 | ||
4 | #include <linux/init.h> | ||
5 | 4 | ||
6 | #include <asm/mpspec_def.h> | 5 | #include <asm/mpspec_def.h> |
7 | #include <asm/x86_init.h> | 6 | #include <asm/x86_init.h> |
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 2f366d0ac6b4..1da25a5f96f9 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_X86_MWAIT_H | 1 | #ifndef _ASM_X86_MWAIT_H |
2 | #define _ASM_X86_MWAIT_H | 2 | #define _ASM_X86_MWAIT_H |
3 | 3 | ||
4 | #include <linux/sched.h> | ||
5 | |||
4 | #define MWAIT_SUBSTATE_MASK 0xf | 6 | #define MWAIT_SUBSTATE_MASK 0xf |
5 | #define MWAIT_CSTATE_MASK 0xf | 7 | #define MWAIT_CSTATE_MASK 0xf |
6 | #define MWAIT_SUBSTATE_SIZE 4 | 8 | #define MWAIT_SUBSTATE_SIZE 4 |
@@ -13,4 +15,45 @@ | |||
13 | 15 | ||
14 | #define MWAIT_ECX_INTERRUPT_BREAK 0x1 | 16 | #define MWAIT_ECX_INTERRUPT_BREAK 0x1 |
15 | 17 | ||
18 | static inline void __monitor(const void *eax, unsigned long ecx, | ||
19 | unsigned long edx) | ||
20 | { | ||
21 | /* "monitor %eax, %ecx, %edx;" */ | ||
22 | asm volatile(".byte 0x0f, 0x01, 0xc8;" | ||
23 | :: "a" (eax), "c" (ecx), "d"(edx)); | ||
24 | } | ||
25 | |||
26 | static inline void __mwait(unsigned long eax, unsigned long ecx) | ||
27 | { | ||
28 | /* "mwait %eax, %ecx;" */ | ||
29 | asm volatile(".byte 0x0f, 0x01, 0xc9;" | ||
30 | :: "a" (eax), "c" (ecx)); | ||
31 | } | ||
32 | |||
33 | /* | ||
34 | * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, | ||
35 | * which can obviate IPI to trigger checking of need_resched. | ||
36 | * We execute MONITOR against need_resched and enter optimized wait state | ||
37 | * through MWAIT. Whenever someone changes need_resched, we would be woken | ||
38 | * up from MWAIT (without an IPI). | ||
39 | * | ||
40 | * New with Core Duo processors, MWAIT can take some hints based on CPU | ||
41 | * capability. | ||
42 | */ | ||
43 | static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) | ||
44 | { | ||
45 | if (!current_set_polling_and_test()) { | ||
46 | if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) { | ||
47 | mb(); | ||
48 | clflush((void *)¤t_thread_info()->flags); | ||
49 | mb(); | ||
50 | } | ||
51 | |||
52 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | ||
53 | if (!need_resched()) | ||
54 | __mwait(eax, ecx); | ||
55 | } | ||
56 | current_clr_polling(); | ||
57 | } | ||
58 | |||
16 | #endif /* _ASM_X86_MWAIT_H */ | 59 | #endif /* _ASM_X86_MWAIT_H */ |
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index c87892442e53..775873d3be55 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h | |||
@@ -71,6 +71,7 @@ extern bool __virt_addr_valid(unsigned long kaddr); | |||
71 | #include <asm-generic/getorder.h> | 71 | #include <asm-generic/getorder.h> |
72 | 72 | ||
73 | #define __HAVE_ARCH_GATE_AREA 1 | 73 | #define __HAVE_ARCH_GATE_AREA 1 |
74 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
74 | 75 | ||
75 | #endif /* __KERNEL__ */ | 76 | #endif /* __KERNEL__ */ |
76 | #endif /* _ASM_X86_PAGE_H */ | 77 | #endif /* _ASM_X86_PAGE_H */ |
diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h index 4d550d04b609..904f528cc8e8 100644 --- a/arch/x86/include/asm/page_32.h +++ b/arch/x86/include/asm/page_32.h | |||
@@ -5,10 +5,6 @@ | |||
5 | 5 | ||
6 | #ifndef __ASSEMBLY__ | 6 | #ifndef __ASSEMBLY__ |
7 | 7 | ||
8 | #ifdef CONFIG_HUGETLB_PAGE | ||
9 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
10 | #endif | ||
11 | |||
12 | #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) | 8 | #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) |
13 | #ifdef CONFIG_DEBUG_VIRTUAL | 9 | #ifdef CONFIG_DEBUG_VIRTUAL |
14 | extern unsigned long __phys_addr(unsigned long); | 10 | extern unsigned long __phys_addr(unsigned long); |
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 43dcd804ebd5..8de6d9cf3b95 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h | |||
@@ -39,9 +39,18 @@ | |||
39 | #define __VIRTUAL_MASK_SHIFT 47 | 39 | #define __VIRTUAL_MASK_SHIFT 47 |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * Kernel image size is limited to 512 MB (see level2_kernel_pgt in | 42 | * Kernel image size is limited to 1GiB due to the fixmap living in the |
43 | * arch/x86/kernel/head_64.S), and it is mapped here: | 43 | * next 1GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). Use |
44 | * 512MiB by default, leaving 1.5GiB for modules once the page tables | ||
45 | * are fully set up. If kernel ASLR is configured, it can extend the | ||
46 | * kernel page table mapping, reducing the size of the modules area. | ||
44 | */ | 47 | */ |
45 | #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) | 48 | #define KERNEL_IMAGE_SIZE_DEFAULT (512 * 1024 * 1024) |
49 | #if defined(CONFIG_RANDOMIZE_BASE) && \ | ||
50 | CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE_DEFAULT | ||
51 | #define KERNEL_IMAGE_SIZE CONFIG_RANDOMIZE_BASE_MAX_OFFSET | ||
52 | #else | ||
53 | #define KERNEL_IMAGE_SIZE KERNEL_IMAGE_SIZE_DEFAULT | ||
54 | #endif | ||
46 | 55 | ||
47 | #endif /* _ASM_X86_PAGE_64_DEFS_H */ | 56 | #endif /* _ASM_X86_PAGE_64_DEFS_H */ |
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index 3bf2dd0cf61f..0d193e234647 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h | |||
@@ -55,6 +55,13 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
55 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) | 55 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | /* Bit manipulation helper on pte/pgoff entry */ | ||
59 | static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift, | ||
60 | unsigned long mask, unsigned int leftshift) | ||
61 | { | ||
62 | return ((value >> rightshift) & mask) << leftshift; | ||
63 | } | ||
64 | |||
58 | #ifdef CONFIG_MEM_SOFT_DIRTY | 65 | #ifdef CONFIG_MEM_SOFT_DIRTY |
59 | 66 | ||
60 | /* | 67 | /* |
@@ -71,31 +78,34 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
71 | #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) | 78 | #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) |
72 | #define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1) | 79 | #define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1) |
73 | 80 | ||
74 | #define pte_to_pgoff(pte) \ | 81 | #define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1) |
75 | ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \ | 82 | #define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1) |
76 | & ((1U << PTE_FILE_BITS1) - 1))) \ | 83 | #define PTE_FILE_MASK3 ((1U << PTE_FILE_BITS3) - 1) |
77 | + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \ | 84 | |
78 | & ((1U << PTE_FILE_BITS2) - 1)) \ | 85 | #define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1) |
79 | << (PTE_FILE_BITS1)) \ | 86 | #define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2) |
80 | + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \ | 87 | #define PTE_FILE_LSHIFT4 (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3) |
81 | & ((1U << PTE_FILE_BITS3) - 1)) \ | 88 | |
82 | << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ | 89 | static __always_inline pgoff_t pte_to_pgoff(pte_t pte) |
83 | + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \ | 90 | { |
84 | << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)) | 91 | return (pgoff_t) |
85 | 92 | (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) + | |
86 | #define pgoff_to_pte(off) \ | 93 | pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) + |
87 | ((pte_t) { .pte_low = \ | 94 | pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, PTE_FILE_MASK3, PTE_FILE_LSHIFT3) + |
88 | ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ | 95 | pte_bitop(pte.pte_low, PTE_FILE_SHIFT4, -1UL, PTE_FILE_LSHIFT4)); |
89 | + ((((off) >> PTE_FILE_BITS1) \ | 96 | } |
90 | & ((1U << PTE_FILE_BITS2) - 1)) \ | 97 | |
91 | << PTE_FILE_SHIFT2) \ | 98 | static __always_inline pte_t pgoff_to_pte(pgoff_t off) |
92 | + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ | 99 | { |
93 | & ((1U << PTE_FILE_BITS3) - 1)) \ | 100 | return (pte_t){ |
94 | << PTE_FILE_SHIFT3) \ | 101 | .pte_low = |
95 | + ((((off) >> \ | 102 | pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) + |
96 | (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \ | 103 | pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) + |
97 | << PTE_FILE_SHIFT4) \ | 104 | pte_bitop(off, PTE_FILE_LSHIFT3, PTE_FILE_MASK3, PTE_FILE_SHIFT3) + |
98 | + _PAGE_FILE }) | 105 | pte_bitop(off, PTE_FILE_LSHIFT4, -1UL, PTE_FILE_SHIFT4) + |
106 | _PAGE_FILE, | ||
107 | }; | ||
108 | } | ||
99 | 109 | ||
100 | #else /* CONFIG_MEM_SOFT_DIRTY */ | 110 | #else /* CONFIG_MEM_SOFT_DIRTY */ |
101 | 111 | ||
@@ -115,22 +125,30 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
115 | #define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) | 125 | #define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) |
116 | #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) | 126 | #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) |
117 | 127 | ||
118 | #define pte_to_pgoff(pte) \ | 128 | #define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1) |
119 | ((((pte).pte_low >> PTE_FILE_SHIFT1) \ | 129 | #define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1) |
120 | & ((1U << PTE_FILE_BITS1) - 1)) \ | 130 | |
121 | + ((((pte).pte_low >> PTE_FILE_SHIFT2) \ | 131 | #define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1) |
122 | & ((1U << PTE_FILE_BITS2) - 1)) << PTE_FILE_BITS1) \ | 132 | #define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2) |
123 | + (((pte).pte_low >> PTE_FILE_SHIFT3) \ | 133 | |
124 | << (PTE_FILE_BITS1 + PTE_FILE_BITS2))) | 134 | static __always_inline pgoff_t pte_to_pgoff(pte_t pte) |
125 | 135 | { | |
126 | #define pgoff_to_pte(off) \ | 136 | return (pgoff_t) |
127 | ((pte_t) { .pte_low = \ | 137 | (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) + |
128 | (((off) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ | 138 | pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) + |
129 | + ((((off) >> PTE_FILE_BITS1) & ((1U << PTE_FILE_BITS2) - 1)) \ | 139 | pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3)); |
130 | << PTE_FILE_SHIFT2) \ | 140 | } |
131 | + (((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ | 141 | |
132 | << PTE_FILE_SHIFT3) \ | 142 | static __always_inline pte_t pgoff_to_pte(pgoff_t off) |
133 | + _PAGE_FILE }) | 143 | { |
144 | return (pte_t){ | ||
145 | .pte_low = | ||
146 | pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) + | ||
147 | pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) + | ||
148 | pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) + | ||
149 | _PAGE_FILE, | ||
150 | }; | ||
151 | } | ||
134 | 152 | ||
135 | #endif /* CONFIG_MEM_SOFT_DIRTY */ | 153 | #endif /* CONFIG_MEM_SOFT_DIRTY */ |
136 | 154 | ||
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 2d883440cb9a..c883bf726398 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h | |||
@@ -58,7 +58,7 @@ typedef struct { pteval_t pte; } pte_t; | |||
58 | #define VMALLOC_START _AC(0xffffc90000000000, UL) | 58 | #define VMALLOC_START _AC(0xffffc90000000000, UL) |
59 | #define VMALLOC_END _AC(0xffffe8ffffffffff, UL) | 59 | #define VMALLOC_END _AC(0xffffe8ffffffffff, UL) |
60 | #define VMEMMAP_START _AC(0xffffea0000000000, UL) | 60 | #define VMEMMAP_START _AC(0xffffea0000000000, UL) |
61 | #define MODULES_VADDR _AC(0xffffffffa0000000, UL) | 61 | #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) |
62 | #define MODULES_END _AC(0xffffffffff000000, UL) | 62 | #define MODULES_END _AC(0xffffffffff000000, UL) |
63 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) | 63 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) |
64 | 64 | ||
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 0ecac257fb26..a83aa44bb1fb 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -382,7 +382,8 @@ static inline void update_page_count(int level, unsigned long pages) { } | |||
382 | */ | 382 | */ |
383 | extern pte_t *lookup_address(unsigned long address, unsigned int *level); | 383 | extern pte_t *lookup_address(unsigned long address, unsigned int *level); |
384 | extern phys_addr_t slow_virt_to_phys(void *__address); | 384 | extern phys_addr_t slow_virt_to_phys(void *__address); |
385 | 385 | extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, | |
386 | unsigned numpages, unsigned long page_flags); | ||
386 | #endif /* !__ASSEMBLY__ */ | 387 | #endif /* !__ASSEMBLY__ */ |
387 | 388 | ||
388 | #endif /* _ASM_X86_PGTABLE_DEFS_H */ | 389 | #endif /* _ASM_X86_PGTABLE_DEFS_H */ |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 7b034a4057f9..fdedd38fd0fc 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -27,7 +27,6 @@ struct mm_struct; | |||
27 | #include <linux/cache.h> | 27 | #include <linux/cache.h> |
28 | #include <linux/threads.h> | 28 | #include <linux/threads.h> |
29 | #include <linux/math64.h> | 29 | #include <linux/math64.h> |
30 | #include <linux/init.h> | ||
31 | #include <linux/err.h> | 30 | #include <linux/err.h> |
32 | #include <linux/irqflags.h> | 31 | #include <linux/irqflags.h> |
33 | 32 | ||
@@ -72,6 +71,7 @@ extern u16 __read_mostly tlb_lli_4m[NR_INFO]; | |||
72 | extern u16 __read_mostly tlb_lld_4k[NR_INFO]; | 71 | extern u16 __read_mostly tlb_lld_4k[NR_INFO]; |
73 | extern u16 __read_mostly tlb_lld_2m[NR_INFO]; | 72 | extern u16 __read_mostly tlb_lld_2m[NR_INFO]; |
74 | extern u16 __read_mostly tlb_lld_4m[NR_INFO]; | 73 | extern u16 __read_mostly tlb_lld_4m[NR_INFO]; |
74 | extern u16 __read_mostly tlb_lld_1g[NR_INFO]; | ||
75 | extern s8 __read_mostly tlb_flushall_shift; | 75 | extern s8 __read_mostly tlb_flushall_shift; |
76 | 76 | ||
77 | /* | 77 | /* |
@@ -370,6 +370,20 @@ struct ymmh_struct { | |||
370 | u32 ymmh_space[64]; | 370 | u32 ymmh_space[64]; |
371 | }; | 371 | }; |
372 | 372 | ||
373 | /* We don't support LWP yet: */ | ||
374 | struct lwp_struct { | ||
375 | u8 reserved[128]; | ||
376 | }; | ||
377 | |||
378 | struct bndregs_struct { | ||
379 | u64 bndregs[8]; | ||
380 | } __packed; | ||
381 | |||
382 | struct bndcsr_struct { | ||
383 | u64 cfg_reg_u; | ||
384 | u64 status_reg; | ||
385 | } __packed; | ||
386 | |||
373 | struct xsave_hdr_struct { | 387 | struct xsave_hdr_struct { |
374 | u64 xstate_bv; | 388 | u64 xstate_bv; |
375 | u64 reserved1[2]; | 389 | u64 reserved1[2]; |
@@ -380,6 +394,9 @@ struct xsave_struct { | |||
380 | struct i387_fxsave_struct i387; | 394 | struct i387_fxsave_struct i387; |
381 | struct xsave_hdr_struct xsave_hdr; | 395 | struct xsave_hdr_struct xsave_hdr; |
382 | struct ymmh_struct ymmh; | 396 | struct ymmh_struct ymmh; |
397 | struct lwp_struct lwp; | ||
398 | struct bndregs_struct bndregs; | ||
399 | struct bndcsr_struct bndcsr; | ||
383 | /* new processor state extensions will go here */ | 400 | /* new processor state extensions will go here */ |
384 | } __attribute__ ((packed, aligned (64))); | 401 | } __attribute__ ((packed, aligned (64))); |
385 | 402 | ||
@@ -700,29 +717,6 @@ static inline void sync_core(void) | |||
700 | #endif | 717 | #endif |
701 | } | 718 | } |
702 | 719 | ||
703 | static inline void __monitor(const void *eax, unsigned long ecx, | ||
704 | unsigned long edx) | ||
705 | { | ||
706 | /* "monitor %eax, %ecx, %edx;" */ | ||
707 | asm volatile(".byte 0x0f, 0x01, 0xc8;" | ||
708 | :: "a" (eax), "c" (ecx), "d"(edx)); | ||
709 | } | ||
710 | |||
711 | static inline void __mwait(unsigned long eax, unsigned long ecx) | ||
712 | { | ||
713 | /* "mwait %eax, %ecx;" */ | ||
714 | asm volatile(".byte 0x0f, 0x01, 0xc9;" | ||
715 | :: "a" (eax), "c" (ecx)); | ||
716 | } | ||
717 | |||
718 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | ||
719 | { | ||
720 | trace_hardirqs_on(); | ||
721 | /* "mwait %eax, %ecx;" */ | ||
722 | asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" | ||
723 | :: "a" (eax), "c" (ecx)); | ||
724 | } | ||
725 | |||
726 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | 720 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
727 | extern void init_amd_e400_c1e_mask(void); | 721 | extern void init_amd_e400_c1e_mask(void); |
728 | 722 | ||
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 942a08623a1a..14fd6fd75a19 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h | |||
@@ -60,7 +60,6 @@ struct pt_regs { | |||
60 | 60 | ||
61 | #endif /* !__i386__ */ | 61 | #endif /* !__i386__ */ |
62 | 62 | ||
63 | #include <linux/init.h> | ||
64 | #ifdef CONFIG_PARAVIRT | 63 | #ifdef CONFIG_PARAVIRT |
65 | #include <asm/paravirt_types.h> | 64 | #include <asm/paravirt_types.h> |
66 | #endif | 65 | #endif |
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 59bcf4e22418..d62c9f809bc5 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h | |||
@@ -3,7 +3,6 @@ | |||
3 | 3 | ||
4 | #include <uapi/asm/setup.h> | 4 | #include <uapi/asm/setup.h> |
5 | 5 | ||
6 | |||
7 | #define COMMAND_LINE_SIZE 2048 | 6 | #define COMMAND_LINE_SIZE 2048 |
8 | 7 | ||
9 | #include <linux/linkage.h> | 8 | #include <linux/linkage.h> |
@@ -29,6 +28,8 @@ | |||
29 | #include <asm/bootparam.h> | 28 | #include <asm/bootparam.h> |
30 | #include <asm/x86_init.h> | 29 | #include <asm/x86_init.h> |
31 | 30 | ||
31 | extern u64 relocated_ramdisk; | ||
32 | |||
32 | /* Interrupt control for vSMPowered x86_64 systems */ | 33 | /* Interrupt control for vSMPowered x86_64 systems */ |
33 | #ifdef CONFIG_X86_64 | 34 | #ifdef CONFIG_X86_64 |
34 | void vsmp_init(void); | 35 | void vsmp_init(void); |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 4137890e88e3..8cd27e08e23c 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define _ASM_X86_SMP_H | 2 | #define _ASM_X86_SMP_H |
3 | #ifndef __ASSEMBLY__ | 3 | #ifndef __ASSEMBLY__ |
4 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
5 | #include <linux/init.h> | ||
6 | #include <asm/percpu.h> | 5 | #include <asm/percpu.h> |
7 | 6 | ||
8 | /* | 7 | /* |
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index 34baa0eb5d0c..a04eabd43d06 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h | |||
@@ -1,9 +1,9 @@ | |||
1 | #ifndef _ASM_X86_TIMER_H | 1 | #ifndef _ASM_X86_TIMER_H |
2 | #define _ASM_X86_TIMER_H | 2 | #define _ASM_X86_TIMER_H |
3 | #include <linux/init.h> | ||
4 | #include <linux/pm.h> | 3 | #include <linux/pm.h> |
5 | #include <linux/percpu.h> | 4 | #include <linux/percpu.h> |
6 | #include <linux/interrupt.h> | 5 | #include <linux/interrupt.h> |
6 | #include <linux/math64.h> | ||
7 | 7 | ||
8 | #define TICK_SIZE (tick_nsec / 1000) | 8 | #define TICK_SIZE (tick_nsec / 1000) |
9 | 9 | ||
@@ -12,68 +12,26 @@ extern int recalibrate_cpu_khz(void); | |||
12 | 12 | ||
13 | extern int no_timer_check; | 13 | extern int no_timer_check; |
14 | 14 | ||
15 | /* Accelerators for sched_clock() | 15 | /* |
16 | * convert from cycles(64bits) => nanoseconds (64bits) | 16 | * We use the full linear equation: f(x) = a + b*x, in order to allow |
17 | * basic equation: | 17 | * a continuous function in the face of dynamic freq changes. |
18 | * ns = cycles / (freq / ns_per_sec) | ||
19 | * ns = cycles * (ns_per_sec / freq) | ||
20 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | ||
21 | * ns = cycles * (10^6 / cpu_khz) | ||
22 | * | 18 | * |
23 | * Then we use scaling math (suggested by george@mvista.com) to get: | 19 | * Continuity means that when our frequency changes our slope (b); we want to |
24 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | 20 | * ensure that: f(t) == f'(t), which gives: a + b*t == a' + b'*t. |
25 | * ns = cycles * cyc2ns_scale / SC | ||
26 | * | 21 | * |
27 | * And since SC is a constant power of two, we can convert the div | 22 | * Without an offset (a) the above would not be possible. |
28 | * into a shift. | ||
29 | * | 23 | * |
30 | * We can use khz divisor instead of mhz to keep a better precision, since | 24 | * See the comment near cycles_2_ns() for details on how we compute (b). |
31 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | ||
32 | * (mathieu.desnoyers@polymtl.ca) | ||
33 | * | ||
34 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | ||
35 | * | ||
36 | * In: | ||
37 | * | ||
38 | * ns = cycles * cyc2ns_scale / SC | ||
39 | * | ||
40 | * Although we may still have enough bits to store the value of ns, | ||
41 | * in some cases, we may not have enough bits to store cycles * cyc2ns_scale, | ||
42 | * leading to an incorrect result. | ||
43 | * | ||
44 | * To avoid this, we can decompose 'cycles' into quotient and remainder | ||
45 | * of division by SC. Then, | ||
46 | * | ||
47 | * ns = (quot * SC + rem) * cyc2ns_scale / SC | ||
48 | * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC | ||
49 | * | ||
50 | * - sqazi@google.com | ||
51 | */ | 25 | */ |
52 | 26 | struct cyc2ns_data { | |
53 | DECLARE_PER_CPU(unsigned long, cyc2ns); | 27 | u32 cyc2ns_mul; |
54 | DECLARE_PER_CPU(unsigned long long, cyc2ns_offset); | 28 | u32 cyc2ns_shift; |
55 | 29 | u64 cyc2ns_offset; | |
56 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | 30 | u32 __count; |
57 | 31 | /* u32 hole */ | |
58 | static inline unsigned long long __cycles_2_ns(unsigned long long cyc) | 32 | }; /* 24 bytes -- do not grow */ |
59 | { | 33 | |
60 | int cpu = smp_processor_id(); | 34 | extern struct cyc2ns_data *cyc2ns_read_begin(void); |
61 | unsigned long long ns = per_cpu(cyc2ns_offset, cpu); | 35 | extern void cyc2ns_read_end(struct cyc2ns_data *); |
62 | ns += mult_frac(cyc, per_cpu(cyc2ns, cpu), | ||
63 | (1UL << CYC2NS_SCALE_FACTOR)); | ||
64 | return ns; | ||
65 | } | ||
66 | |||
67 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | ||
68 | { | ||
69 | unsigned long long ns; | ||
70 | unsigned long flags; | ||
71 | |||
72 | local_irq_save(flags); | ||
73 | ns = __cycles_2_ns(cyc); | ||
74 | local_irq_restore(flags); | ||
75 | |||
76 | return ns; | ||
77 | } | ||
78 | 36 | ||
79 | #endif /* _ASM_X86_TIMER_H */ | 37 | #endif /* _ASM_X86_TIMER_H */ |
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 235be70d5bb4..57ae63cd6ee2 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h | |||
@@ -65,4 +65,7 @@ extern int notsc_setup(char *); | |||
65 | extern void tsc_save_sched_clock_state(void); | 65 | extern void tsc_save_sched_clock_state(void); |
66 | extern void tsc_restore_sched_clock_state(void); | 66 | extern void tsc_restore_sched_clock_state(void); |
67 | 67 | ||
68 | /* MSR based TSC calibration for Intel Atom SoC platforms */ | ||
69 | int try_msr_calibrate_tsc(unsigned long *fast_calibrate); | ||
70 | |||
68 | #endif /* _ASM_X86_TSC_H */ | 71 | #endif /* _ASM_X86_TSC_H */ |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 8ec57c07b125..0d592e0a5b84 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -40,22 +40,30 @@ | |||
40 | /* | 40 | /* |
41 | * Test whether a block of memory is a valid user space address. | 41 | * Test whether a block of memory is a valid user space address. |
42 | * Returns 0 if the range is valid, nonzero otherwise. | 42 | * Returns 0 if the range is valid, nonzero otherwise. |
43 | * | ||
44 | * This is equivalent to the following test: | ||
45 | * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64) | ||
46 | * | ||
47 | * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... | ||
48 | */ | 43 | */ |
44 | static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) | ||
45 | { | ||
46 | /* | ||
47 | * If we have used "sizeof()" for the size, | ||
48 | * we know it won't overflow the limit (but | ||
49 | * it might overflow the 'addr', so it's | ||
50 | * important to subtract the size from the | ||
51 | * limit, not add it to the address). | ||
52 | */ | ||
53 | if (__builtin_constant_p(size)) | ||
54 | return addr > limit - size; | ||
55 | |||
56 | /* Arbitrary sizes? Be careful about overflow */ | ||
57 | addr += size; | ||
58 | if (addr < size) | ||
59 | return true; | ||
60 | return addr > limit; | ||
61 | } | ||
49 | 62 | ||
50 | #define __range_not_ok(addr, size, limit) \ | 63 | #define __range_not_ok(addr, size, limit) \ |
51 | ({ \ | 64 | ({ \ |
52 | unsigned long flag, roksum; \ | ||
53 | __chk_user_ptr(addr); \ | 65 | __chk_user_ptr(addr); \ |
54 | asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ | 66 | __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ |
55 | : "=&r" (flag), "=r" (roksum) \ | ||
56 | : "1" (addr), "g" ((long)(size)), \ | ||
57 | "rm" (limit)); \ | ||
58 | flag; \ | ||
59 | }) | 67 | }) |
60 | 68 | ||
61 | /** | 69 | /** |
@@ -78,7 +86,7 @@ | |||
78 | * this function, memory access functions may still return -EFAULT. | 86 | * this function, memory access functions may still return -EFAULT. |
79 | */ | 87 | */ |
80 | #define access_ok(type, addr, size) \ | 88 | #define access_ok(type, addr, size) \ |
81 | (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) | 89 | likely(!__range_not_ok(addr, size, user_addr_max())) |
82 | 90 | ||
83 | /* | 91 | /* |
84 | * The exception table consists of pairs of addresses relative to the | 92 | * The exception table consists of pairs of addresses relative to the |
@@ -525,6 +533,98 @@ extern __must_check long strnlen_user(const char __user *str, long n); | |||
525 | unsigned long __must_check clear_user(void __user *mem, unsigned long len); | 533 | unsigned long __must_check clear_user(void __user *mem, unsigned long len); |
526 | unsigned long __must_check __clear_user(void __user *mem, unsigned long len); | 534 | unsigned long __must_check __clear_user(void __user *mem, unsigned long len); |
527 | 535 | ||
536 | extern void __cmpxchg_wrong_size(void) | ||
537 | __compiletime_error("Bad argument size for cmpxchg"); | ||
538 | |||
539 | #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ | ||
540 | ({ \ | ||
541 | int __ret = 0; \ | ||
542 | __typeof__(ptr) __uval = (uval); \ | ||
543 | __typeof__(*(ptr)) __old = (old); \ | ||
544 | __typeof__(*(ptr)) __new = (new); \ | ||
545 | switch (size) { \ | ||
546 | case 1: \ | ||
547 | { \ | ||
548 | asm volatile("\t" ASM_STAC "\n" \ | ||
549 | "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ | ||
550 | "2:\t" ASM_CLAC "\n" \ | ||
551 | "\t.section .fixup, \"ax\"\n" \ | ||
552 | "3:\tmov %3, %0\n" \ | ||
553 | "\tjmp 2b\n" \ | ||
554 | "\t.previous\n" \ | ||
555 | _ASM_EXTABLE(1b, 3b) \ | ||
556 | : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ | ||
557 | : "i" (-EFAULT), "q" (__new), "1" (__old) \ | ||
558 | : "memory" \ | ||
559 | ); \ | ||
560 | break; \ | ||
561 | } \ | ||
562 | case 2: \ | ||
563 | { \ | ||
564 | asm volatile("\t" ASM_STAC "\n" \ | ||
565 | "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ | ||
566 | "2:\t" ASM_CLAC "\n" \ | ||
567 | "\t.section .fixup, \"ax\"\n" \ | ||
568 | "3:\tmov %3, %0\n" \ | ||
569 | "\tjmp 2b\n" \ | ||
570 | "\t.previous\n" \ | ||
571 | _ASM_EXTABLE(1b, 3b) \ | ||
572 | : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ | ||
573 | : "i" (-EFAULT), "r" (__new), "1" (__old) \ | ||
574 | : "memory" \ | ||
575 | ); \ | ||
576 | break; \ | ||
577 | } \ | ||
578 | case 4: \ | ||
579 | { \ | ||
580 | asm volatile("\t" ASM_STAC "\n" \ | ||
581 | "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ | ||
582 | "2:\t" ASM_CLAC "\n" \ | ||
583 | "\t.section .fixup, \"ax\"\n" \ | ||
584 | "3:\tmov %3, %0\n" \ | ||
585 | "\tjmp 2b\n" \ | ||
586 | "\t.previous\n" \ | ||
587 | _ASM_EXTABLE(1b, 3b) \ | ||
588 | : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ | ||
589 | : "i" (-EFAULT), "r" (__new), "1" (__old) \ | ||
590 | : "memory" \ | ||
591 | ); \ | ||
592 | break; \ | ||
593 | } \ | ||
594 | case 8: \ | ||
595 | { \ | ||
596 | if (!IS_ENABLED(CONFIG_X86_64)) \ | ||
597 | __cmpxchg_wrong_size(); \ | ||
598 | \ | ||
599 | asm volatile("\t" ASM_STAC "\n" \ | ||
600 | "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ | ||
601 | "2:\t" ASM_CLAC "\n" \ | ||
602 | "\t.section .fixup, \"ax\"\n" \ | ||
603 | "3:\tmov %3, %0\n" \ | ||
604 | "\tjmp 2b\n" \ | ||
605 | "\t.previous\n" \ | ||
606 | _ASM_EXTABLE(1b, 3b) \ | ||
607 | : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ | ||
608 | : "i" (-EFAULT), "r" (__new), "1" (__old) \ | ||
609 | : "memory" \ | ||
610 | ); \ | ||
611 | break; \ | ||
612 | } \ | ||
613 | default: \ | ||
614 | __cmpxchg_wrong_size(); \ | ||
615 | } \ | ||
616 | *__uval = __old; \ | ||
617 | __ret; \ | ||
618 | }) | ||
619 | |||
620 | #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ | ||
621 | ({ \ | ||
622 | access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ | ||
623 | __user_atomic_cmpxchg_inatomic((uval), (ptr), \ | ||
624 | (old), (new), sizeof(*(ptr))) : \ | ||
625 | -EFAULT; \ | ||
626 | }) | ||
627 | |||
528 | /* | 628 | /* |
529 | * movsl can be slow when source and dest are not both 8-byte aligned | 629 | * movsl can be slow when source and dest are not both 8-byte aligned |
530 | */ | 630 | */ |
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 190413d0de57..12a26b979bf1 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -204,13 +204,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | |||
204 | static __must_check __always_inline int | 204 | static __must_check __always_inline int |
205 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) | 205 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) |
206 | { | 206 | { |
207 | return __copy_from_user_nocheck(dst, (__force const void *)src, size); | 207 | return __copy_from_user_nocheck(dst, src, size); |
208 | } | 208 | } |
209 | 209 | ||
210 | static __must_check __always_inline int | 210 | static __must_check __always_inline int |
211 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) | 211 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) |
212 | { | 212 | { |
213 | return __copy_to_user_nocheck((__force void *)dst, src, size); | 213 | return __copy_to_user_nocheck(dst, src, size); |
214 | } | 214 | } |
215 | 215 | ||
216 | extern long __copy_user_nocache(void *dst, const void __user *src, | 216 | extern long __copy_user_nocache(void *dst, const void __user *src, |
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 0415cdabb5a6..554738963b28 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h | |||
@@ -9,6 +9,8 @@ | |||
9 | #define XSTATE_FP 0x1 | 9 | #define XSTATE_FP 0x1 |
10 | #define XSTATE_SSE 0x2 | 10 | #define XSTATE_SSE 0x2 |
11 | #define XSTATE_YMM 0x4 | 11 | #define XSTATE_YMM 0x4 |
12 | #define XSTATE_BNDREGS 0x8 | ||
13 | #define XSTATE_BNDCSR 0x10 | ||
12 | 14 | ||
13 | #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) | 15 | #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) |
14 | 16 | ||
@@ -20,10 +22,14 @@ | |||
20 | #define XSAVE_YMM_SIZE 256 | 22 | #define XSAVE_YMM_SIZE 256 |
21 | #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) | 23 | #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) |
22 | 24 | ||
23 | /* | 25 | /* Supported features which support lazy state saving */ |
24 | * These are the features that the OS can handle currently. | 26 | #define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) |
25 | */ | 27 | |
26 | #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) | 28 | /* Supported features which require eager state saving */ |
29 | #define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR) | ||
30 | |||
31 | /* All currently supported features */ | ||
32 | #define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER) | ||
27 | 33 | ||
28 | #ifdef CONFIG_X86_64 | 34 | #ifdef CONFIG_X86_64 |
29 | #define REX_PREFIX "0x48, " | 35 | #define REX_PREFIX "0x48, " |
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 9c3733c5f8f7..225b0988043a 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #define SETUP_E820_EXT 1 | 6 | #define SETUP_E820_EXT 1 |
7 | #define SETUP_DTB 2 | 7 | #define SETUP_DTB 2 |
8 | #define SETUP_PCI 3 | 8 | #define SETUP_PCI 3 |
9 | #define SETUP_EFI 4 | ||
9 | 10 | ||
10 | /* ram_size flags */ | 11 | /* ram_size flags */ |
11 | #define RAMDISK_IMAGE_START_MASK 0x07FF | 12 | #define RAMDISK_IMAGE_START_MASK 0x07FF |
@@ -23,6 +24,7 @@ | |||
23 | #define XLF_CAN_BE_LOADED_ABOVE_4G (1<<1) | 24 | #define XLF_CAN_BE_LOADED_ABOVE_4G (1<<1) |
24 | #define XLF_EFI_HANDOVER_32 (1<<2) | 25 | #define XLF_EFI_HANDOVER_32 (1<<2) |
25 | #define XLF_EFI_HANDOVER_64 (1<<3) | 26 | #define XLF_EFI_HANDOVER_64 (1<<3) |
27 | #define XLF_EFI_KEXEC (1<<4) | ||
26 | 28 | ||
27 | #ifndef __ASSEMBLY__ | 29 | #ifndef __ASSEMBLY__ |
28 | 30 | ||
diff --git a/arch/x86/include/uapi/asm/stat.h b/arch/x86/include/uapi/asm/stat.h index 7b3ddc348585..bc03eb5d6360 100644 --- a/arch/x86/include/uapi/asm/stat.h +++ b/arch/x86/include/uapi/asm/stat.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_X86_STAT_H | 1 | #ifndef _ASM_X86_STAT_H |
2 | #define _ASM_X86_STAT_H | 2 | #define _ASM_X86_STAT_H |
3 | 3 | ||
4 | #include <asm/posix_types.h> | ||
5 | |||
4 | #define STAT_HAVE_NSEC 1 | 6 | #define STAT_HAVE_NSEC 1 |
5 | 7 | ||
6 | #ifdef __i386__ | 8 | #ifdef __i386__ |
@@ -78,26 +80,26 @@ struct stat64 { | |||
78 | #else /* __i386__ */ | 80 | #else /* __i386__ */ |
79 | 81 | ||
80 | struct stat { | 82 | struct stat { |
81 | unsigned long st_dev; | 83 | __kernel_ulong_t st_dev; |
82 | unsigned long st_ino; | 84 | __kernel_ulong_t st_ino; |
83 | unsigned long st_nlink; | 85 | __kernel_ulong_t st_nlink; |
84 | 86 | ||
85 | unsigned int st_mode; | 87 | unsigned int st_mode; |
86 | unsigned int st_uid; | 88 | unsigned int st_uid; |
87 | unsigned int st_gid; | 89 | unsigned int st_gid; |
88 | unsigned int __pad0; | 90 | unsigned int __pad0; |
89 | unsigned long st_rdev; | 91 | __kernel_ulong_t st_rdev; |
90 | long st_size; | 92 | __kernel_long_t st_size; |
91 | long st_blksize; | 93 | __kernel_long_t st_blksize; |
92 | long st_blocks; /* Number 512-byte blocks allocated. */ | 94 | __kernel_long_t st_blocks; /* Number 512-byte blocks allocated. */ |
93 | 95 | ||
94 | unsigned long st_atime; | 96 | __kernel_ulong_t st_atime; |
95 | unsigned long st_atime_nsec; | 97 | __kernel_ulong_t st_atime_nsec; |
96 | unsigned long st_mtime; | 98 | __kernel_ulong_t st_mtime; |
97 | unsigned long st_mtime_nsec; | 99 | __kernel_ulong_t st_mtime_nsec; |
98 | unsigned long st_ctime; | 100 | __kernel_ulong_t st_ctime; |
99 | unsigned long st_ctime_nsec; | 101 | __kernel_ulong_t st_ctime_nsec; |
100 | long __unused[3]; | 102 | __kernel_long_t __unused[3]; |
101 | }; | 103 | }; |
102 | 104 | ||
103 | /* We don't need to memset the whole thing just to initialize the padding */ | 105 | /* We don't need to memset the whole thing just to initialize the padding */ |