diff options
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/mce.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/mshyperv.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/page_64_types.h | 12 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable_64_types.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/qspinlock.h | 13 | ||||
-rw-r--r-- | arch/x86/include/asm/xen/page.h | 35 |
6 files changed, 50 insertions, 18 deletions
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 4da9b1c58d28..c1a812bd5a27 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -221,6 +221,8 @@ static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_am | |||
221 | 221 | ||
222 | int mce_available(struct cpuinfo_x86 *c); | 222 | int mce_available(struct cpuinfo_x86 *c); |
223 | bool mce_is_memory_error(struct mce *m); | 223 | bool mce_is_memory_error(struct mce *m); |
224 | bool mce_is_correctable(struct mce *m); | ||
225 | int mce_usable_address(struct mce *m); | ||
224 | 226 | ||
225 | DECLARE_PER_CPU(unsigned, mce_exception_count); | 227 | DECLARE_PER_CPU(unsigned, mce_exception_count); |
226 | DECLARE_PER_CPU(unsigned, mce_poll_count); | 228 | DECLARE_PER_CPU(unsigned, mce_poll_count); |
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 0d6271cce198..1d0a7778e163 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h | |||
@@ -232,7 +232,7 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2) | |||
232 | : "cc"); | 232 | : "cc"); |
233 | } | 233 | } |
234 | #endif | 234 | #endif |
235 | return hv_status; | 235 | return hv_status; |
236 | } | 236 | } |
237 | 237 | ||
238 | /* | 238 | /* |
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index cd0cf1c568b4..8f657286d599 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h | |||
@@ -33,12 +33,14 @@ | |||
33 | 33 | ||
34 | /* | 34 | /* |
35 | * Set __PAGE_OFFSET to the most negative possible address + | 35 | * Set __PAGE_OFFSET to the most negative possible address + |
36 | * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a | 36 | * PGDIR_SIZE*17 (pgd slot 273). |
37 | * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's | 37 | * |
38 | * what Xen requires. | 38 | * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for |
39 | * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary, | ||
40 | * but it's what Xen requires. | ||
39 | */ | 41 | */ |
40 | #define __PAGE_OFFSET_BASE_L5 _AC(0xff10000000000000, UL) | 42 | #define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL) |
41 | #define __PAGE_OFFSET_BASE_L4 _AC(0xffff880000000000, UL) | 43 | #define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL) |
42 | 44 | ||
43 | #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT | 45 | #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT |
44 | #define __PAGE_OFFSET page_offset_base | 46 | #define __PAGE_OFFSET page_offset_base |
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 04edd2d58211..84bd9bdc1987 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h | |||
@@ -111,9 +111,7 @@ extern unsigned int ptrs_per_p4d; | |||
111 | */ | 111 | */ |
112 | #define MAXMEM (1UL << MAX_PHYSMEM_BITS) | 112 | #define MAXMEM (1UL << MAX_PHYSMEM_BITS) |
113 | 113 | ||
114 | #define LDT_PGD_ENTRY_L4 -3UL | 114 | #define LDT_PGD_ENTRY -240UL |
115 | #define LDT_PGD_ENTRY_L5 -112UL | ||
116 | #define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) | ||
117 | #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) | 115 | #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) |
118 | #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) | 116 | #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) |
119 | 117 | ||
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 87623c6b13db..bd5ac6cc37db 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h | |||
@@ -13,12 +13,15 @@ | |||
13 | #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire | 13 | #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire |
14 | static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) | 14 | static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) |
15 | { | 15 | { |
16 | u32 val = 0; | 16 | u32 val; |
17 | |||
18 | if (GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c, | ||
19 | "I", _Q_PENDING_OFFSET)) | ||
20 | val |= _Q_PENDING_VAL; | ||
21 | 17 | ||
18 | /* | ||
19 | * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto | ||
20 | * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a | ||
21 | * statement expression, which GCC doesn't like. | ||
22 | */ | ||
23 | val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c, | ||
24 | "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL; | ||
22 | val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK; | 25 | val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK; |
23 | 26 | ||
24 | return val; | 27 | return val; |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 123e669bf363..790ce08e41f2 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/device.h> | 10 | #include <linux/device.h> |
11 | 11 | ||
12 | #include <linux/uaccess.h> | 12 | #include <asm/extable.h> |
13 | #include <asm/page.h> | 13 | #include <asm/page.h> |
14 | #include <asm/pgtable.h> | 14 | #include <asm/pgtable.h> |
15 | 15 | ||
@@ -93,12 +93,39 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, | |||
93 | */ | 93 | */ |
94 | static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val) | 94 | static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val) |
95 | { | 95 | { |
96 | return __put_user(val, (unsigned long __user *)addr); | 96 | int ret = 0; |
97 | |||
98 | asm volatile("1: mov %[val], %[ptr]\n" | ||
99 | "2:\n" | ||
100 | ".section .fixup, \"ax\"\n" | ||
101 | "3: sub $1, %[ret]\n" | ||
102 | " jmp 2b\n" | ||
103 | ".previous\n" | ||
104 | _ASM_EXTABLE(1b, 3b) | ||
105 | : [ret] "+r" (ret), [ptr] "=m" (*addr) | ||
106 | : [val] "r" (val)); | ||
107 | |||
108 | return ret; | ||
97 | } | 109 | } |
98 | 110 | ||
99 | static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val) | 111 | static inline int xen_safe_read_ulong(const unsigned long *addr, |
112 | unsigned long *val) | ||
100 | { | 113 | { |
101 | return __get_user(*val, (unsigned long __user *)addr); | 114 | int ret = 0; |
115 | unsigned long rval = ~0ul; | ||
116 | |||
117 | asm volatile("1: mov %[ptr], %[rval]\n" | ||
118 | "2:\n" | ||
119 | ".section .fixup, \"ax\"\n" | ||
120 | "3: sub $1, %[ret]\n" | ||
121 | " jmp 2b\n" | ||
122 | ".previous\n" | ||
123 | _ASM_EXTABLE(1b, 3b) | ||
124 | : [ret] "+r" (ret), [rval] "+r" (rval) | ||
125 | : [ptr] "m" (*addr)); | ||
126 | *val = rval; | ||
127 | |||
128 | return ret; | ||
102 | } | 129 | } |
103 | 130 | ||
104 | #ifdef CONFIG_XEN_PV | 131 | #ifdef CONFIG_XEN_PV |