aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/alternative.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/alternative.c')
-rw-r--r--arch/i386/kernel/alternative.c102
1 files changed, 44 insertions, 58 deletions
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 426f59b0106b..d8cda14fff8b 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -5,6 +5,7 @@
5#include <asm/alternative.h> 5#include <asm/alternative.h>
6#include <asm/sections.h> 6#include <asm/sections.h>
7 7
8static int noreplace_smp = 0;
8static int smp_alt_once = 0; 9static int smp_alt_once = 0;
9static int debug_alternative = 0; 10static int debug_alternative = 0;
10 11
@@ -13,15 +14,33 @@ static int __init bootonly(char *str)
13 smp_alt_once = 1; 14 smp_alt_once = 1;
14 return 1; 15 return 1;
15} 16}
17__setup("smp-alt-boot", bootonly);
18
16static int __init debug_alt(char *str) 19static int __init debug_alt(char *str)
17{ 20{
18 debug_alternative = 1; 21 debug_alternative = 1;
19 return 1; 22 return 1;
20} 23}
21
22__setup("smp-alt-boot", bootonly);
23__setup("debug-alternative", debug_alt); 24__setup("debug-alternative", debug_alt);
24 25
26static int __init setup_noreplace_smp(char *str)
27{
28 noreplace_smp = 1;
29 return 1;
30}
31__setup("noreplace-smp", setup_noreplace_smp);
32
33#ifdef CONFIG_PARAVIRT
34static int noreplace_paravirt = 0;
35
36static int __init setup_noreplace_paravirt(char *str)
37{
38 noreplace_paravirt = 1;
39 return 1;
40}
41__setup("noreplace-paravirt", setup_noreplace_paravirt);
42#endif
43
25#define DPRINTK(fmt, args...) if (debug_alternative) \ 44#define DPRINTK(fmt, args...) if (debug_alternative) \
26 printk(KERN_DEBUG fmt, args) 45 printk(KERN_DEBUG fmt, args)
27 46
@@ -132,11 +151,8 @@ static void nop_out(void *insns, unsigned int len)
132} 151}
133 152
134extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 153extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
135extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
136extern u8 *__smp_locks[], *__smp_locks_end[]; 154extern u8 *__smp_locks[], *__smp_locks_end[];
137 155
138extern u8 __smp_alt_begin[], __smp_alt_end[];
139
140/* Replace instructions with better alternatives for this CPU type. 156/* Replace instructions with better alternatives for this CPU type.
141 This runs before SMP is initialized to avoid SMP problems with 157 This runs before SMP is initialized to avoid SMP problems with
142 self modifying code. This implies that assymetric systems where 158 self modifying code. This implies that assymetric systems where
@@ -171,29 +187,6 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
171 187
172#ifdef CONFIG_SMP 188#ifdef CONFIG_SMP
173 189
174static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
175{
176 struct alt_instr *a;
177
178 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
179 for (a = start; a < end; a++) {
180 memcpy(a->replacement + a->replacementlen,
181 a->instr,
182 a->instrlen);
183 }
184}
185
186static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
187{
188 struct alt_instr *a;
189
190 for (a = start; a < end; a++) {
191 memcpy(a->instr,
192 a->replacement + a->replacementlen,
193 a->instrlen);
194 }
195}
196
197static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) 190static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
198{ 191{
199 u8 **ptr; 192 u8 **ptr;
@@ -211,6 +204,9 @@ static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end
211{ 204{
212 u8 **ptr; 205 u8 **ptr;
213 206
207 if (noreplace_smp)
208 return;
209
214 for (ptr = start; ptr < end; ptr++) { 210 for (ptr = start; ptr < end; ptr++) {
215 if (*ptr < text) 211 if (*ptr < text)
216 continue; 212 continue;
@@ -245,6 +241,9 @@ void alternatives_smp_module_add(struct module *mod, char *name,
245 struct smp_alt_module *smp; 241 struct smp_alt_module *smp;
246 unsigned long flags; 242 unsigned long flags;
247 243
244 if (noreplace_smp)
245 return;
246
248 if (smp_alt_once) { 247 if (smp_alt_once) {
249 if (boot_cpu_has(X86_FEATURE_UP)) 248 if (boot_cpu_has(X86_FEATURE_UP))
250 alternatives_smp_unlock(locks, locks_end, 249 alternatives_smp_unlock(locks, locks_end,
@@ -279,7 +278,7 @@ void alternatives_smp_module_del(struct module *mod)
279 struct smp_alt_module *item; 278 struct smp_alt_module *item;
280 unsigned long flags; 279 unsigned long flags;
281 280
282 if (smp_alt_once) 281 if (smp_alt_once || noreplace_smp)
283 return; 282 return;
284 283
285 spin_lock_irqsave(&smp_alt, flags); 284 spin_lock_irqsave(&smp_alt, flags);
@@ -310,7 +309,7 @@ void alternatives_smp_switch(int smp)
310 return; 309 return;
311#endif 310#endif
312 311
313 if (smp_alt_once) 312 if (noreplace_smp || smp_alt_once)
314 return; 313 return;
315 BUG_ON(!smp && (num_online_cpus() > 1)); 314 BUG_ON(!smp && (num_online_cpus() > 1));
316 315
@@ -319,8 +318,6 @@ void alternatives_smp_switch(int smp)
319 printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); 318 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
320 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); 319 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
321 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); 320 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
322 alternatives_smp_apply(__smp_alt_instructions,
323 __smp_alt_instructions_end);
324 list_for_each_entry(mod, &smp_alt_modules, next) 321 list_for_each_entry(mod, &smp_alt_modules, next)
325 alternatives_smp_lock(mod->locks, mod->locks_end, 322 alternatives_smp_lock(mod->locks, mod->locks_end,
326 mod->text, mod->text_end); 323 mod->text, mod->text_end);
@@ -328,8 +325,6 @@ void alternatives_smp_switch(int smp)
328 printk(KERN_INFO "SMP alternatives: switching to UP code\n"); 325 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
329 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); 326 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
330 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); 327 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
331 apply_alternatives(__smp_alt_instructions,
332 __smp_alt_instructions_end);
333 list_for_each_entry(mod, &smp_alt_modules, next) 328 list_for_each_entry(mod, &smp_alt_modules, next)
334 alternatives_smp_unlock(mod->locks, mod->locks_end, 329 alternatives_smp_unlock(mod->locks, mod->locks_end,
335 mod->text, mod->text_end); 330 mod->text, mod->text_end);
@@ -340,36 +335,31 @@ void alternatives_smp_switch(int smp)
340#endif 335#endif
341 336
342#ifdef CONFIG_PARAVIRT 337#ifdef CONFIG_PARAVIRT
343void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end) 338void apply_paravirt(struct paravirt_patch_site *start,
339 struct paravirt_patch_site *end)
344{ 340{
345 struct paravirt_patch *p; 341 struct paravirt_patch_site *p;
342
343 if (noreplace_paravirt)
344 return;
346 345
347 for (p = start; p < end; p++) { 346 for (p = start; p < end; p++) {
348 unsigned int used; 347 unsigned int used;
349 348
350 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr, 349 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
351 p->len); 350 p->len);
352#ifdef CONFIG_DEBUG_PARAVIRT 351
353 { 352 BUG_ON(used > p->len);
354 int i; 353
355 /* Deliberately clobber regs using "not %reg" to find bugs. */
356 for (i = 0; i < 3; i++) {
357 if (p->len - used >= 2 && (p->clobbers & (1 << i))) {
358 memcpy(p->instr + used, "\xf7\xd0", 2);
359 p->instr[used+1] |= i;
360 used += 2;
361 }
362 }
363 }
364#endif
365 /* Pad the rest with nops */ 354 /* Pad the rest with nops */
366 nop_out(p->instr + used, p->len - used); 355 nop_out(p->instr + used, p->len - used);
367 } 356 }
368 357
369 /* Sync to be conservative, in case we patched following instructions */ 358 /* Sync to be conservative, in case we patched following
359 * instructions */
370 sync_core(); 360 sync_core();
371} 361}
372extern struct paravirt_patch __start_parainstructions[], 362extern struct paravirt_patch_site __start_parainstructions[],
373 __stop_parainstructions[]; 363 __stop_parainstructions[];
374#endif /* CONFIG_PARAVIRT */ 364#endif /* CONFIG_PARAVIRT */
375 365
@@ -396,23 +386,19 @@ void __init alternative_instructions(void)
396 printk(KERN_INFO "SMP alternatives: switching to UP code\n"); 386 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
397 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); 387 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
398 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); 388 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
399 apply_alternatives(__smp_alt_instructions,
400 __smp_alt_instructions_end);
401 alternatives_smp_unlock(__smp_locks, __smp_locks_end, 389 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
402 _text, _etext); 390 _text, _etext);
403 } 391 }
404 free_init_pages("SMP alternatives", 392 free_init_pages("SMP alternatives",
405 (unsigned long)__smp_alt_begin, 393 (unsigned long)__smp_locks,
406 (unsigned long)__smp_alt_end); 394 (unsigned long)__smp_locks_end);
407 } else { 395 } else {
408 alternatives_smp_save(__smp_alt_instructions,
409 __smp_alt_instructions_end);
410 alternatives_smp_module_add(NULL, "core kernel", 396 alternatives_smp_module_add(NULL, "core kernel",
411 __smp_locks, __smp_locks_end, 397 __smp_locks, __smp_locks_end,
412 _text, _etext); 398 _text, _etext);
413 alternatives_smp_switch(0); 399 alternatives_smp_switch(0);
414 } 400 }
415#endif 401#endif
416 apply_paravirt(__start_parainstructions, __stop_parainstructions); 402 apply_paravirt(__parainstructions, __parainstructions_end);
417 local_irq_restore(flags); 403 local_irq_restore(flags);
418} 404}