diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-09-09 10:05:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-09-09 10:05:15 -0400 |
commit | 9a5682765a2e5f93cf2fe7b612b8072b18f0c68a (patch) | |
tree | a5a7667a1089a367677257ad36b3e3f144405b42 | |
parent | 3567994a05ba6490f6055650fbb892c926ae7fca (diff) | |
parent | 9bc4f28af75a91aea0ae383f50b0a430c4509303 (diff) |
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner:
"A set of fixes for x86:
- Prevent multiplication result truncation on 32bit. Introduced with
the early timestamp reworrk.
- Ensure microcode revision storage to be consistent under all
circumstances
- Prevent write tearing of PTEs
- Prevent confusion of user and kernel reegisters when dumping fatal
signals verbosely
- Make an error return value in a failure path of the vector
allocation negative. Returning EINVAL might the caller assume
success and causes further wreckage.
- A trivial kernel doc warning fix"
* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mm: Use WRITE_ONCE() when setting PTEs
x86/apic/vector: Make error return value negative
x86/process: Don't mix user/kernel regs in 64bit __show_regs()
x86/tsc: Prevent result truncation on 32bit
x86: Fix kernel-doc atomic.h warnings
x86/microcode: Update the new microcode revision unconditionally
x86/microcode: Make sure boot_cpu_data.microcode is up-to-date
-rw-r--r-- | arch/x86/include/asm/atomic.h | 12 | ||||
-rw-r--r-- | arch/x86/include/asm/atomic64_32.h | 8 | ||||
-rw-r--r-- | arch/x86/include/asm/atomic64_64.h | 12 | ||||
-rw-r--r-- | arch/x86/include/asm/kdebug.h | 12 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable_64.h | 20 | ||||
-rw-r--r-- | arch/x86/kernel/apic/vector.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/amd.c | 24 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/intel.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/dumpstack.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/pgtable.c | 8 |
14 files changed, 87 insertions, 59 deletions
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index b143717b92b3..ce84388e540c 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h | |||
@@ -80,11 +80,11 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v) | |||
80 | * true if the result is zero, or false for all | 80 | * true if the result is zero, or false for all |
81 | * other cases. | 81 | * other cases. |
82 | */ | 82 | */ |
83 | #define arch_atomic_sub_and_test arch_atomic_sub_and_test | ||
84 | static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) | 83 | static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) |
85 | { | 84 | { |
86 | GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); | 85 | GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); |
87 | } | 86 | } |
87 | #define arch_atomic_sub_and_test arch_atomic_sub_and_test | ||
88 | 88 | ||
89 | /** | 89 | /** |
90 | * arch_atomic_inc - increment atomic variable | 90 | * arch_atomic_inc - increment atomic variable |
@@ -92,12 +92,12 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) | |||
92 | * | 92 | * |
93 | * Atomically increments @v by 1. | 93 | * Atomically increments @v by 1. |
94 | */ | 94 | */ |
95 | #define arch_atomic_inc arch_atomic_inc | ||
96 | static __always_inline void arch_atomic_inc(atomic_t *v) | 95 | static __always_inline void arch_atomic_inc(atomic_t *v) |
97 | { | 96 | { |
98 | asm volatile(LOCK_PREFIX "incl %0" | 97 | asm volatile(LOCK_PREFIX "incl %0" |
99 | : "+m" (v->counter)); | 98 | : "+m" (v->counter)); |
100 | } | 99 | } |
100 | #define arch_atomic_inc arch_atomic_inc | ||
101 | 101 | ||
102 | /** | 102 | /** |
103 | * arch_atomic_dec - decrement atomic variable | 103 | * arch_atomic_dec - decrement atomic variable |
@@ -105,12 +105,12 @@ static __always_inline void arch_atomic_inc(atomic_t *v) | |||
105 | * | 105 | * |
106 | * Atomically decrements @v by 1. | 106 | * Atomically decrements @v by 1. |
107 | */ | 107 | */ |
108 | #define arch_atomic_dec arch_atomic_dec | ||
109 | static __always_inline void arch_atomic_dec(atomic_t *v) | 108 | static __always_inline void arch_atomic_dec(atomic_t *v) |
110 | { | 109 | { |
111 | asm volatile(LOCK_PREFIX "decl %0" | 110 | asm volatile(LOCK_PREFIX "decl %0" |
112 | : "+m" (v->counter)); | 111 | : "+m" (v->counter)); |
113 | } | 112 | } |
113 | #define arch_atomic_dec arch_atomic_dec | ||
114 | 114 | ||
115 | /** | 115 | /** |
116 | * arch_atomic_dec_and_test - decrement and test | 116 | * arch_atomic_dec_and_test - decrement and test |
@@ -120,11 +120,11 @@ static __always_inline void arch_atomic_dec(atomic_t *v) | |||
120 | * returns true if the result is 0, or false for all other | 120 | * returns true if the result is 0, or false for all other |
121 | * cases. | 121 | * cases. |
122 | */ | 122 | */ |
123 | #define arch_atomic_dec_and_test arch_atomic_dec_and_test | ||
124 | static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) | 123 | static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) |
125 | { | 124 | { |
126 | GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); | 125 | GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); |
127 | } | 126 | } |
127 | #define arch_atomic_dec_and_test arch_atomic_dec_and_test | ||
128 | 128 | ||
129 | /** | 129 | /** |
130 | * arch_atomic_inc_and_test - increment and test | 130 | * arch_atomic_inc_and_test - increment and test |
@@ -134,11 +134,11 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) | |||
134 | * and returns true if the result is zero, or false for all | 134 | * and returns true if the result is zero, or false for all |
135 | * other cases. | 135 | * other cases. |
136 | */ | 136 | */ |
137 | #define arch_atomic_inc_and_test arch_atomic_inc_and_test | ||
138 | static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) | 137 | static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) |
139 | { | 138 | { |
140 | GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); | 139 | GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); |
141 | } | 140 | } |
141 | #define arch_atomic_inc_and_test arch_atomic_inc_and_test | ||
142 | 142 | ||
143 | /** | 143 | /** |
144 | * arch_atomic_add_negative - add and test if negative | 144 | * arch_atomic_add_negative - add and test if negative |
@@ -149,11 +149,11 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) | |||
149 | * if the result is negative, or false when | 149 | * if the result is negative, or false when |
150 | * result is greater than or equal to zero. | 150 | * result is greater than or equal to zero. |
151 | */ | 151 | */ |
152 | #define arch_atomic_add_negative arch_atomic_add_negative | ||
153 | static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) | 152 | static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) |
154 | { | 153 | { |
155 | GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); | 154 | GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); |
156 | } | 155 | } |
156 | #define arch_atomic_add_negative arch_atomic_add_negative | ||
157 | 157 | ||
158 | /** | 158 | /** |
159 | * arch_atomic_add_return - add integer and return | 159 | * arch_atomic_add_return - add integer and return |
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index ef959f02d070..6a5b0ec460da 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h | |||
@@ -205,12 +205,12 @@ static inline long long arch_atomic64_sub(long long i, atomic64_t *v) | |||
205 | * | 205 | * |
206 | * Atomically increments @v by 1. | 206 | * Atomically increments @v by 1. |
207 | */ | 207 | */ |
208 | #define arch_atomic64_inc arch_atomic64_inc | ||
209 | static inline void arch_atomic64_inc(atomic64_t *v) | 208 | static inline void arch_atomic64_inc(atomic64_t *v) |
210 | { | 209 | { |
211 | __alternative_atomic64(inc, inc_return, /* no output */, | 210 | __alternative_atomic64(inc, inc_return, /* no output */, |
212 | "S" (v) : "memory", "eax", "ecx", "edx"); | 211 | "S" (v) : "memory", "eax", "ecx", "edx"); |
213 | } | 212 | } |
213 | #define arch_atomic64_inc arch_atomic64_inc | ||
214 | 214 | ||
215 | /** | 215 | /** |
216 | * arch_atomic64_dec - decrement atomic64 variable | 216 | * arch_atomic64_dec - decrement atomic64 variable |
@@ -218,12 +218,12 @@ static inline void arch_atomic64_inc(atomic64_t *v) | |||
218 | * | 218 | * |
219 | * Atomically decrements @v by 1. | 219 | * Atomically decrements @v by 1. |
220 | */ | 220 | */ |
221 | #define arch_atomic64_dec arch_atomic64_dec | ||
222 | static inline void arch_atomic64_dec(atomic64_t *v) | 221 | static inline void arch_atomic64_dec(atomic64_t *v) |
223 | { | 222 | { |
224 | __alternative_atomic64(dec, dec_return, /* no output */, | 223 | __alternative_atomic64(dec, dec_return, /* no output */, |
225 | "S" (v) : "memory", "eax", "ecx", "edx"); | 224 | "S" (v) : "memory", "eax", "ecx", "edx"); |
226 | } | 225 | } |
226 | #define arch_atomic64_dec arch_atomic64_dec | ||
227 | 227 | ||
228 | /** | 228 | /** |
229 | * arch_atomic64_add_unless - add unless the number is a given value | 229 | * arch_atomic64_add_unless - add unless the number is a given value |
@@ -245,7 +245,6 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a, | |||
245 | return (int)a; | 245 | return (int)a; |
246 | } | 246 | } |
247 | 247 | ||
248 | #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero | ||
249 | static inline int arch_atomic64_inc_not_zero(atomic64_t *v) | 248 | static inline int arch_atomic64_inc_not_zero(atomic64_t *v) |
250 | { | 249 | { |
251 | int r; | 250 | int r; |
@@ -253,8 +252,8 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v) | |||
253 | "S" (v) : "ecx", "edx", "memory"); | 252 | "S" (v) : "ecx", "edx", "memory"); |
254 | return r; | 253 | return r; |
255 | } | 254 | } |
255 | #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero | ||
256 | 256 | ||
257 | #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive | ||
258 | static inline long long arch_atomic64_dec_if_positive(atomic64_t *v) | 257 | static inline long long arch_atomic64_dec_if_positive(atomic64_t *v) |
259 | { | 258 | { |
260 | long long r; | 259 | long long r; |
@@ -262,6 +261,7 @@ static inline long long arch_atomic64_dec_if_positive(atomic64_t *v) | |||
262 | "S" (v) : "ecx", "memory"); | 261 | "S" (v) : "ecx", "memory"); |
263 | return r; | 262 | return r; |
264 | } | 263 | } |
264 | #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive | ||
265 | 265 | ||
266 | #undef alternative_atomic64 | 266 | #undef alternative_atomic64 |
267 | #undef __alternative_atomic64 | 267 | #undef __alternative_atomic64 |
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 4343d9b4f30e..5f851d92eecd 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h | |||
@@ -71,11 +71,11 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v) | |||
71 | * true if the result is zero, or false for all | 71 | * true if the result is zero, or false for all |
72 | * other cases. | 72 | * other cases. |
73 | */ | 73 | */ |
74 | #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test | ||
75 | static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) | 74 | static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) |
76 | { | 75 | { |
77 | GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); | 76 | GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); |
78 | } | 77 | } |
78 | #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test | ||
79 | 79 | ||
80 | /** | 80 | /** |
81 | * arch_atomic64_inc - increment atomic64 variable | 81 | * arch_atomic64_inc - increment atomic64 variable |
@@ -83,13 +83,13 @@ static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) | |||
83 | * | 83 | * |
84 | * Atomically increments @v by 1. | 84 | * Atomically increments @v by 1. |
85 | */ | 85 | */ |
86 | #define arch_atomic64_inc arch_atomic64_inc | ||
87 | static __always_inline void arch_atomic64_inc(atomic64_t *v) | 86 | static __always_inline void arch_atomic64_inc(atomic64_t *v) |
88 | { | 87 | { |
89 | asm volatile(LOCK_PREFIX "incq %0" | 88 | asm volatile(LOCK_PREFIX "incq %0" |
90 | : "=m" (v->counter) | 89 | : "=m" (v->counter) |
91 | : "m" (v->counter)); | 90 | : "m" (v->counter)); |
92 | } | 91 | } |
92 | #define arch_atomic64_inc arch_atomic64_inc | ||
93 | 93 | ||
94 | /** | 94 | /** |
95 | * arch_atomic64_dec - decrement atomic64 variable | 95 | * arch_atomic64_dec - decrement atomic64 variable |
@@ -97,13 +97,13 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v) | |||
97 | * | 97 | * |
98 | * Atomically decrements @v by 1. | 98 | * Atomically decrements @v by 1. |
99 | */ | 99 | */ |
100 | #define arch_atomic64_dec arch_atomic64_dec | ||
101 | static __always_inline void arch_atomic64_dec(atomic64_t *v) | 100 | static __always_inline void arch_atomic64_dec(atomic64_t *v) |
102 | { | 101 | { |
103 | asm volatile(LOCK_PREFIX "decq %0" | 102 | asm volatile(LOCK_PREFIX "decq %0" |
104 | : "=m" (v->counter) | 103 | : "=m" (v->counter) |
105 | : "m" (v->counter)); | 104 | : "m" (v->counter)); |
106 | } | 105 | } |
106 | #define arch_atomic64_dec arch_atomic64_dec | ||
107 | 107 | ||
108 | /** | 108 | /** |
109 | * arch_atomic64_dec_and_test - decrement and test | 109 | * arch_atomic64_dec_and_test - decrement and test |
@@ -113,11 +113,11 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v) | |||
113 | * returns true if the result is 0, or false for all other | 113 | * returns true if the result is 0, or false for all other |
114 | * cases. | 114 | * cases. |
115 | */ | 115 | */ |
116 | #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test | ||
117 | static inline bool arch_atomic64_dec_and_test(atomic64_t *v) | 116 | static inline bool arch_atomic64_dec_and_test(atomic64_t *v) |
118 | { | 117 | { |
119 | GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); | 118 | GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); |
120 | } | 119 | } |
120 | #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test | ||
121 | 121 | ||
122 | /** | 122 | /** |
123 | * arch_atomic64_inc_and_test - increment and test | 123 | * arch_atomic64_inc_and_test - increment and test |
@@ -127,11 +127,11 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v) | |||
127 | * and returns true if the result is zero, or false for all | 127 | * and returns true if the result is zero, or false for all |
128 | * other cases. | 128 | * other cases. |
129 | */ | 129 | */ |
130 | #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test | ||
131 | static inline bool arch_atomic64_inc_and_test(atomic64_t *v) | 130 | static inline bool arch_atomic64_inc_and_test(atomic64_t *v) |
132 | { | 131 | { |
133 | GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); | 132 | GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); |
134 | } | 133 | } |
134 | #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test | ||
135 | 135 | ||
136 | /** | 136 | /** |
137 | * arch_atomic64_add_negative - add and test if negative | 137 | * arch_atomic64_add_negative - add and test if negative |
@@ -142,11 +142,11 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v) | |||
142 | * if the result is negative, or false when | 142 | * if the result is negative, or false when |
143 | * result is greater than or equal to zero. | 143 | * result is greater than or equal to zero. |
144 | */ | 144 | */ |
145 | #define arch_atomic64_add_negative arch_atomic64_add_negative | ||
146 | static inline bool arch_atomic64_add_negative(long i, atomic64_t *v) | 145 | static inline bool arch_atomic64_add_negative(long i, atomic64_t *v) |
147 | { | 146 | { |
148 | GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); | 147 | GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); |
149 | } | 148 | } |
149 | #define arch_atomic64_add_negative arch_atomic64_add_negative | ||
150 | 150 | ||
151 | /** | 151 | /** |
152 | * arch_atomic64_add_return - add and return | 152 | * arch_atomic64_add_return - add and return |
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h index 395c9631e000..75f1e35e7c15 100644 --- a/arch/x86/include/asm/kdebug.h +++ b/arch/x86/include/asm/kdebug.h | |||
@@ -22,10 +22,20 @@ enum die_val { | |||
22 | DIE_NMIUNKNOWN, | 22 | DIE_NMIUNKNOWN, |
23 | }; | 23 | }; |
24 | 24 | ||
25 | enum show_regs_mode { | ||
26 | SHOW_REGS_SHORT, | ||
27 | /* | ||
28 | * For when userspace crashed, but we don't think it's our fault, and | ||
29 | * therefore don't print kernel registers. | ||
30 | */ | ||
31 | SHOW_REGS_USER, | ||
32 | SHOW_REGS_ALL | ||
33 | }; | ||
34 | |||
25 | extern void die(const char *, struct pt_regs *,long); | 35 | extern void die(const char *, struct pt_regs *,long); |
26 | extern int __must_check __die(const char *, struct pt_regs *, long); | 36 | extern int __must_check __die(const char *, struct pt_regs *, long); |
27 | extern void show_stack_regs(struct pt_regs *regs); | 37 | extern void show_stack_regs(struct pt_regs *regs); |
28 | extern void __show_regs(struct pt_regs *regs, int all); | 38 | extern void __show_regs(struct pt_regs *regs, enum show_regs_mode); |
29 | extern void show_iret_regs(struct pt_regs *regs); | 39 | extern void show_iret_regs(struct pt_regs *regs); |
30 | extern unsigned long oops_begin(void); | 40 | extern unsigned long oops_begin(void); |
31 | extern void oops_end(unsigned long, struct pt_regs *, int signr); | 41 | extern void oops_end(unsigned long, struct pt_regs *, int signr); |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index e4ffa565a69f..690c0307afed 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -1195,7 +1195,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, | |||
1195 | return xchg(pmdp, pmd); | 1195 | return xchg(pmdp, pmd); |
1196 | } else { | 1196 | } else { |
1197 | pmd_t old = *pmdp; | 1197 | pmd_t old = *pmdp; |
1198 | *pmdp = pmd; | 1198 | WRITE_ONCE(*pmdp, pmd); |
1199 | return old; | 1199 | return old; |
1200 | } | 1200 | } |
1201 | } | 1201 | } |
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index f773d5e6c8cc..ce2b59047cb8 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -55,15 +55,15 @@ struct mm_struct; | |||
55 | void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); | 55 | void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); |
56 | void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); | 56 | void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); |
57 | 57 | ||
58 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, | 58 | static inline void native_set_pte(pte_t *ptep, pte_t pte) |
59 | pte_t *ptep) | ||
60 | { | 59 | { |
61 | *ptep = native_make_pte(0); | 60 | WRITE_ONCE(*ptep, pte); |
62 | } | 61 | } |
63 | 62 | ||
64 | static inline void native_set_pte(pte_t *ptep, pte_t pte) | 63 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, |
64 | pte_t *ptep) | ||
65 | { | 65 | { |
66 | *ptep = pte; | 66 | native_set_pte(ptep, native_make_pte(0)); |
67 | } | 67 | } |
68 | 68 | ||
69 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) | 69 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
@@ -73,7 +73,7 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) | |||
73 | 73 | ||
74 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) | 74 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
75 | { | 75 | { |
76 | *pmdp = pmd; | 76 | WRITE_ONCE(*pmdp, pmd); |
77 | } | 77 | } |
78 | 78 | ||
79 | static inline void native_pmd_clear(pmd_t *pmd) | 79 | static inline void native_pmd_clear(pmd_t *pmd) |
@@ -109,7 +109,7 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
109 | 109 | ||
110 | static inline void native_set_pud(pud_t *pudp, pud_t pud) | 110 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
111 | { | 111 | { |
112 | *pudp = pud; | 112 | WRITE_ONCE(*pudp, pud); |
113 | } | 113 | } |
114 | 114 | ||
115 | static inline void native_pud_clear(pud_t *pud) | 115 | static inline void native_pud_clear(pud_t *pud) |
@@ -137,13 +137,13 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) | |||
137 | pgd_t pgd; | 137 | pgd_t pgd; |
138 | 138 | ||
139 | if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) { | 139 | if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) { |
140 | *p4dp = p4d; | 140 | WRITE_ONCE(*p4dp, p4d); |
141 | return; | 141 | return; |
142 | } | 142 | } |
143 | 143 | ||
144 | pgd = native_make_pgd(native_p4d_val(p4d)); | 144 | pgd = native_make_pgd(native_p4d_val(p4d)); |
145 | pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd); | 145 | pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd); |
146 | *p4dp = native_make_p4d(native_pgd_val(pgd)); | 146 | WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd))); |
147 | } | 147 | } |
148 | 148 | ||
149 | static inline void native_p4d_clear(p4d_t *p4d) | 149 | static inline void native_p4d_clear(p4d_t *p4d) |
@@ -153,7 +153,7 @@ static inline void native_p4d_clear(p4d_t *p4d) | |||
153 | 153 | ||
154 | static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) | 154 | static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) |
155 | { | 155 | { |
156 | *pgdp = pti_set_user_pgtbl(pgdp, pgd); | 156 | WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd)); |
157 | } | 157 | } |
158 | 158 | ||
159 | static inline void native_pgd_clear(pgd_t *pgd) | 159 | static inline void native_pgd_clear(pgd_t *pgd) |
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 9f148e3d45b4..7654febd5102 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c | |||
@@ -413,7 +413,7 @@ static int activate_managed(struct irq_data *irqd) | |||
413 | if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) { | 413 | if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) { |
414 | /* Something in the core code broke! Survive gracefully */ | 414 | /* Something in the core code broke! Survive gracefully */ |
415 | pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq); | 415 | pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq); |
416 | return EINVAL; | 416 | return -EINVAL; |
417 | } | 417 | } |
418 | 418 | ||
419 | ret = assign_managed_vector(irqd, vector_searchmask); | 419 | ret = assign_managed_vector(irqd, vector_searchmask); |
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 0624957aa068..07b5fc00b188 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c | |||
@@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu) | |||
504 | struct microcode_amd *mc_amd; | 504 | struct microcode_amd *mc_amd; |
505 | struct ucode_cpu_info *uci; | 505 | struct ucode_cpu_info *uci; |
506 | struct ucode_patch *p; | 506 | struct ucode_patch *p; |
507 | enum ucode_state ret; | ||
507 | u32 rev, dummy; | 508 | u32 rev, dummy; |
508 | 509 | ||
509 | BUG_ON(raw_smp_processor_id() != cpu); | 510 | BUG_ON(raw_smp_processor_id() != cpu); |
@@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu) | |||
521 | 522 | ||
522 | /* need to apply patch? */ | 523 | /* need to apply patch? */ |
523 | if (rev >= mc_amd->hdr.patch_id) { | 524 | if (rev >= mc_amd->hdr.patch_id) { |
524 | c->microcode = rev; | 525 | ret = UCODE_OK; |
525 | uci->cpu_sig.rev = rev; | 526 | goto out; |
526 | return UCODE_OK; | ||
527 | } | 527 | } |
528 | 528 | ||
529 | if (__apply_microcode_amd(mc_amd)) { | 529 | if (__apply_microcode_amd(mc_amd)) { |
@@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu) | |||
531 | cpu, mc_amd->hdr.patch_id); | 531 | cpu, mc_amd->hdr.patch_id); |
532 | return UCODE_ERROR; | 532 | return UCODE_ERROR; |
533 | } | 533 | } |
534 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, | ||
535 | mc_amd->hdr.patch_id); | ||
536 | 534 | ||
537 | uci->cpu_sig.rev = mc_amd->hdr.patch_id; | 535 | rev = mc_amd->hdr.patch_id; |
538 | c->microcode = mc_amd->hdr.patch_id; | 536 | ret = UCODE_UPDATED; |
537 | |||
538 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); | ||
539 | 539 | ||
540 | return UCODE_UPDATED; | 540 | out: |
541 | uci->cpu_sig.rev = rev; | ||
542 | c->microcode = rev; | ||
543 | |||
544 | /* Update boot_cpu_data's revision too, if we're on the BSP: */ | ||
545 | if (c->cpu_index == boot_cpu_data.cpu_index) | ||
546 | boot_cpu_data.microcode = rev; | ||
547 | |||
548 | return ret; | ||
541 | } | 549 | } |
542 | 550 | ||
543 | static int install_equiv_cpu_table(const u8 *buf) | 551 | static int install_equiv_cpu_table(const u8 *buf) |
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 97ccf4c3b45b..16936a24795c 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c | |||
@@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu) | |||
795 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 795 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
796 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 796 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
797 | struct microcode_intel *mc; | 797 | struct microcode_intel *mc; |
798 | enum ucode_state ret; | ||
798 | static int prev_rev; | 799 | static int prev_rev; |
799 | u32 rev; | 800 | u32 rev; |
800 | 801 | ||
@@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu) | |||
817 | */ | 818 | */ |
818 | rev = intel_get_microcode_revision(); | 819 | rev = intel_get_microcode_revision(); |
819 | if (rev >= mc->hdr.rev) { | 820 | if (rev >= mc->hdr.rev) { |
820 | uci->cpu_sig.rev = rev; | 821 | ret = UCODE_OK; |
821 | c->microcode = rev; | 822 | goto out; |
822 | return UCODE_OK; | ||
823 | } | 823 | } |
824 | 824 | ||
825 | /* | 825 | /* |
@@ -848,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu) | |||
848 | prev_rev = rev; | 848 | prev_rev = rev; |
849 | } | 849 | } |
850 | 850 | ||
851 | ret = UCODE_UPDATED; | ||
852 | |||
853 | out: | ||
851 | uci->cpu_sig.rev = rev; | 854 | uci->cpu_sig.rev = rev; |
852 | c->microcode = rev; | 855 | c->microcode = rev; |
856 | |||
857 | /* Update boot_cpu_data's revision too, if we're on the BSP: */ | ||
858 | if (c->cpu_index == boot_cpu_data.cpu_index) | ||
859 | boot_cpu_data.microcode = rev; | ||
853 | 860 | ||
854 | return UCODE_UPDATED; | 861 | return ret; |
855 | } | 862 | } |
856 | 863 | ||
857 | static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | 864 | static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index f56895106ccf..2b5886401e5f 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -146,7 +146,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs, | |||
146 | * they can be printed in the right context. | 146 | * they can be printed in the right context. |
147 | */ | 147 | */ |
148 | if (!partial && on_stack(info, regs, sizeof(*regs))) { | 148 | if (!partial && on_stack(info, regs, sizeof(*regs))) { |
149 | __show_regs(regs, 0); | 149 | __show_regs(regs, SHOW_REGS_SHORT); |
150 | 150 | ||
151 | } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, | 151 | } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, |
152 | IRET_FRAME_SIZE)) { | 152 | IRET_FRAME_SIZE)) { |
@@ -344,7 +344,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) | |||
344 | oops_exit(); | 344 | oops_exit(); |
345 | 345 | ||
346 | /* Executive summary in case the oops scrolled away */ | 346 | /* Executive summary in case the oops scrolled away */ |
347 | __show_regs(&exec_summary_regs, true); | 347 | __show_regs(&exec_summary_regs, SHOW_REGS_ALL); |
348 | 348 | ||
349 | if (!signr) | 349 | if (!signr) |
350 | return; | 350 | return; |
@@ -407,14 +407,9 @@ void die(const char *str, struct pt_regs *regs, long err) | |||
407 | 407 | ||
408 | void show_regs(struct pt_regs *regs) | 408 | void show_regs(struct pt_regs *regs) |
409 | { | 409 | { |
410 | bool all = true; | ||
411 | |||
412 | show_regs_print_info(KERN_DEFAULT); | 410 | show_regs_print_info(KERN_DEFAULT); |
413 | 411 | ||
414 | if (IS_ENABLED(CONFIG_X86_32)) | 412 | __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL); |
415 | all = !user_mode(regs); | ||
416 | |||
417 | __show_regs(regs, all); | ||
418 | 413 | ||
419 | /* | 414 | /* |
420 | * When in-kernel, we also print out the stack at the time of the fault.. | 415 | * When in-kernel, we also print out the stack at the time of the fault.. |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 2924fd447e61..5046a3c9dec2 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -59,7 +59,7 @@ | |||
59 | #include <asm/intel_rdt_sched.h> | 59 | #include <asm/intel_rdt_sched.h> |
60 | #include <asm/proto.h> | 60 | #include <asm/proto.h> |
61 | 61 | ||
62 | void __show_regs(struct pt_regs *regs, int all) | 62 | void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) |
63 | { | 63 | { |
64 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; | 64 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; |
65 | unsigned long d0, d1, d2, d3, d6, d7; | 65 | unsigned long d0, d1, d2, d3, d6, d7; |
@@ -85,7 +85,7 @@ void __show_regs(struct pt_regs *regs, int all) | |||
85 | printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n", | 85 | printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n", |
86 | (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags); | 86 | (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags); |
87 | 87 | ||
88 | if (!all) | 88 | if (mode != SHOW_REGS_ALL) |
89 | return; | 89 | return; |
90 | 90 | ||
91 | cr0 = read_cr0(); | 91 | cr0 = read_cr0(); |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index a451bc374b9b..ea5ea850348d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -62,7 +62,7 @@ | |||
62 | __visible DEFINE_PER_CPU(unsigned long, rsp_scratch); | 62 | __visible DEFINE_PER_CPU(unsigned long, rsp_scratch); |
63 | 63 | ||
64 | /* Prints also some state that isn't saved in the pt_regs */ | 64 | /* Prints also some state that isn't saved in the pt_regs */ |
65 | void __show_regs(struct pt_regs *regs, int all) | 65 | void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) |
66 | { | 66 | { |
67 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; | 67 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; |
68 | unsigned long d0, d1, d2, d3, d6, d7; | 68 | unsigned long d0, d1, d2, d3, d6, d7; |
@@ -87,9 +87,17 @@ void __show_regs(struct pt_regs *regs, int all) | |||
87 | printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", | 87 | printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", |
88 | regs->r13, regs->r14, regs->r15); | 88 | regs->r13, regs->r14, regs->r15); |
89 | 89 | ||
90 | if (!all) | 90 | if (mode == SHOW_REGS_SHORT) |
91 | return; | 91 | return; |
92 | 92 | ||
93 | if (mode == SHOW_REGS_USER) { | ||
94 | rdmsrl(MSR_FS_BASE, fs); | ||
95 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); | ||
96 | printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n", | ||
97 | fs, shadowgs); | ||
98 | return; | ||
99 | } | ||
100 | |||
93 | asm("movl %%ds,%0" : "=r" (ds)); | 101 | asm("movl %%ds,%0" : "=r" (ds)); |
94 | asm("movl %%cs,%0" : "=r" (cs)); | 102 | asm("movl %%cs,%0" : "=r" (cs)); |
95 | asm("movl %%es,%0" : "=r" (es)); | 103 | asm("movl %%es,%0" : "=r" (es)); |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 1463468ba9a0..6490f618e096 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -1415,7 +1415,7 @@ static bool __init determine_cpu_tsc_frequencies(bool early) | |||
1415 | 1415 | ||
1416 | static unsigned long __init get_loops_per_jiffy(void) | 1416 | static unsigned long __init get_loops_per_jiffy(void) |
1417 | { | 1417 | { |
1418 | unsigned long lpj = tsc_khz * KHZ; | 1418 | u64 lpj = (u64)tsc_khz * KHZ; |
1419 | 1419 | ||
1420 | do_div(lpj, HZ); | 1420 | do_div(lpj, HZ); |
1421 | return lpj; | 1421 | return lpj; |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index e848a4811785..ae394552fb94 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -269,7 +269,7 @@ static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp) | |||
269 | if (pgd_val(pgd) != 0) { | 269 | if (pgd_val(pgd) != 0) { |
270 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); | 270 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); |
271 | 271 | ||
272 | *pgdp = native_make_pgd(0); | 272 | pgd_clear(pgdp); |
273 | 273 | ||
274 | paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); | 274 | paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); |
275 | pmd_free(mm, pmd); | 275 | pmd_free(mm, pmd); |
@@ -494,7 +494,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, | |||
494 | int changed = !pte_same(*ptep, entry); | 494 | int changed = !pte_same(*ptep, entry); |
495 | 495 | ||
496 | if (changed && dirty) | 496 | if (changed && dirty) |
497 | *ptep = entry; | 497 | set_pte(ptep, entry); |
498 | 498 | ||
499 | return changed; | 499 | return changed; |
500 | } | 500 | } |
@@ -509,7 +509,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, | |||
509 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | 509 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
510 | 510 | ||
511 | if (changed && dirty) { | 511 | if (changed && dirty) { |
512 | *pmdp = entry; | 512 | set_pmd(pmdp, entry); |
513 | /* | 513 | /* |
514 | * We had a write-protection fault here and changed the pmd | 514 | * We had a write-protection fault here and changed the pmd |
515 | * to to more permissive. No need to flush the TLB for that, | 515 | * to to more permissive. No need to flush the TLB for that, |
@@ -529,7 +529,7 @@ int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, | |||
529 | VM_BUG_ON(address & ~HPAGE_PUD_MASK); | 529 | VM_BUG_ON(address & ~HPAGE_PUD_MASK); |
530 | 530 | ||
531 | if (changed && dirty) { | 531 | if (changed && dirty) { |
532 | *pudp = entry; | 532 | set_pud(pudp, entry); |
533 | /* | 533 | /* |
534 | * We had a write-protection fault here and changed the pud | 534 | * We had a write-protection fault here and changed the pud |
535 | * to to more permissive. No need to flush the TLB for that, | 535 | * to to more permissive. No need to flush the TLB for that, |