diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-18 11:25:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-18 11:25:51 -0400 |
commit | 9e9abecfc0ff3a9ad2ead954b37bbfcb863c775e (patch) | |
tree | 0c3ffda953b82750638a06507591ad587b565ff2 /arch/x86/kernel/alternative.c | |
parent | d7bb545d86825e635cab33a1dd81ca0ad7b92887 (diff) | |
parent | 77ad386e596c6b0930cc2e09e3cce485e3ee7f72 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (613 commits)
x86: standalone trampoline code
x86: move suspend wakeup code to C
x86: coding style fixes to arch/x86/kernel/acpi/sleep.c
x86: setup_trampoline() - fix section mismatch warning
x86: section mismatch fixes, #1
x86: fix paranoia about using BIOS quickboot mechanism.
x86: print out buggy mptable
x86: use cpu_online()
x86: use cpumask_of_cpu()
x86: remove unnecessary tmp local variable
x86: remove unnecessary memset()
x86: use ioapic_read_entry() and ioapic_write_entry()
x86: avoid redundant loop in io_apic_level_ack_pending()
x86: remove superfluous initialisation in boot code.
x86: merge mpparse_{32,64}.c
x86: unify mp_register_gsi
x86: unify mp_config_acpi_legacy_irqs
x86: unify mp_register_ioapic
x86: unify uniq_io_apic_id
x86: unify smp_scan_config
...
Diffstat (limited to 'arch/x86/kernel/alternative.c')
-rw-r--r-- | arch/x86/kernel/alternative.c | 103 |
1 files changed, 76 insertions, 27 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 5fed98ca0e1f..df4099dc1c68 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <asm/mce.h> | 11 | #include <asm/mce.h> |
12 | #include <asm/nmi.h> | 12 | #include <asm/nmi.h> |
13 | #include <asm/vsyscall.h> | 13 | #include <asm/vsyscall.h> |
14 | #include <asm/cacheflush.h> | ||
15 | #include <asm/io.h> | ||
14 | 16 | ||
15 | #define MAX_PATCH_LEN (255-1) | 17 | #define MAX_PATCH_LEN (255-1) |
16 | 18 | ||
@@ -177,7 +179,7 @@ static const unsigned char*const * find_nop_table(void) | |||
177 | #endif /* CONFIG_X86_64 */ | 179 | #endif /* CONFIG_X86_64 */ |
178 | 180 | ||
179 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ | 181 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ |
180 | static void add_nops(void *insns, unsigned int len) | 182 | void add_nops(void *insns, unsigned int len) |
181 | { | 183 | { |
182 | const unsigned char *const *noptable = find_nop_table(); | 184 | const unsigned char *const *noptable = find_nop_table(); |
183 | 185 | ||
@@ -190,6 +192,7 @@ static void add_nops(void *insns, unsigned int len) | |||
190 | len -= noplen; | 192 | len -= noplen; |
191 | } | 193 | } |
192 | } | 194 | } |
195 | EXPORT_SYMBOL_GPL(add_nops); | ||
193 | 196 | ||
194 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | 197 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
195 | extern u8 *__smp_locks[], *__smp_locks_end[]; | 198 | extern u8 *__smp_locks[], *__smp_locks_end[]; |
@@ -205,7 +208,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end) | |||
205 | struct alt_instr *a; | 208 | struct alt_instr *a; |
206 | char insnbuf[MAX_PATCH_LEN]; | 209 | char insnbuf[MAX_PATCH_LEN]; |
207 | 210 | ||
208 | DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end); | 211 | DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); |
209 | for (a = start; a < end; a++) { | 212 | for (a = start; a < end; a++) { |
210 | u8 *instr = a->instr; | 213 | u8 *instr = a->instr; |
211 | BUG_ON(a->replacementlen > a->instrlen); | 214 | BUG_ON(a->replacementlen > a->instrlen); |
@@ -217,13 +220,13 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end) | |||
217 | if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) { | 220 | if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) { |
218 | instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0)); | 221 | instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0)); |
219 | DPRINTK("%s: vsyscall fixup: %p => %p\n", | 222 | DPRINTK("%s: vsyscall fixup: %p => %p\n", |
220 | __FUNCTION__, a->instr, instr); | 223 | __func__, a->instr, instr); |
221 | } | 224 | } |
222 | #endif | 225 | #endif |
223 | memcpy(insnbuf, a->replacement, a->replacementlen); | 226 | memcpy(insnbuf, a->replacement, a->replacementlen); |
224 | add_nops(insnbuf + a->replacementlen, | 227 | add_nops(insnbuf + a->replacementlen, |
225 | a->instrlen - a->replacementlen); | 228 | a->instrlen - a->replacementlen); |
226 | text_poke(instr, insnbuf, a->instrlen); | 229 | text_poke_early(instr, insnbuf, a->instrlen); |
227 | } | 230 | } |
228 | } | 231 | } |
229 | 232 | ||
@@ -284,7 +287,6 @@ void alternatives_smp_module_add(struct module *mod, char *name, | |||
284 | void *text, void *text_end) | 287 | void *text, void *text_end) |
285 | { | 288 | { |
286 | struct smp_alt_module *smp; | 289 | struct smp_alt_module *smp; |
287 | unsigned long flags; | ||
288 | 290 | ||
289 | if (noreplace_smp) | 291 | if (noreplace_smp) |
290 | return; | 292 | return; |
@@ -307,42 +309,40 @@ void alternatives_smp_module_add(struct module *mod, char *name, | |||
307 | smp->text = text; | 309 | smp->text = text; |
308 | smp->text_end = text_end; | 310 | smp->text_end = text_end; |
309 | DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n", | 311 | DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n", |
310 | __FUNCTION__, smp->locks, smp->locks_end, | 312 | __func__, smp->locks, smp->locks_end, |
311 | smp->text, smp->text_end, smp->name); | 313 | smp->text, smp->text_end, smp->name); |
312 | 314 | ||
313 | spin_lock_irqsave(&smp_alt, flags); | 315 | spin_lock(&smp_alt); |
314 | list_add_tail(&smp->next, &smp_alt_modules); | 316 | list_add_tail(&smp->next, &smp_alt_modules); |
315 | if (boot_cpu_has(X86_FEATURE_UP)) | 317 | if (boot_cpu_has(X86_FEATURE_UP)) |
316 | alternatives_smp_unlock(smp->locks, smp->locks_end, | 318 | alternatives_smp_unlock(smp->locks, smp->locks_end, |
317 | smp->text, smp->text_end); | 319 | smp->text, smp->text_end); |
318 | spin_unlock_irqrestore(&smp_alt, flags); | 320 | spin_unlock(&smp_alt); |
319 | } | 321 | } |
320 | 322 | ||
321 | void alternatives_smp_module_del(struct module *mod) | 323 | void alternatives_smp_module_del(struct module *mod) |
322 | { | 324 | { |
323 | struct smp_alt_module *item; | 325 | struct smp_alt_module *item; |
324 | unsigned long flags; | ||
325 | 326 | ||
326 | if (smp_alt_once || noreplace_smp) | 327 | if (smp_alt_once || noreplace_smp) |
327 | return; | 328 | return; |
328 | 329 | ||
329 | spin_lock_irqsave(&smp_alt, flags); | 330 | spin_lock(&smp_alt); |
330 | list_for_each_entry(item, &smp_alt_modules, next) { | 331 | list_for_each_entry(item, &smp_alt_modules, next) { |
331 | if (mod != item->mod) | 332 | if (mod != item->mod) |
332 | continue; | 333 | continue; |
333 | list_del(&item->next); | 334 | list_del(&item->next); |
334 | spin_unlock_irqrestore(&smp_alt, flags); | 335 | spin_unlock(&smp_alt); |
335 | DPRINTK("%s: %s\n", __FUNCTION__, item->name); | 336 | DPRINTK("%s: %s\n", __func__, item->name); |
336 | kfree(item); | 337 | kfree(item); |
337 | return; | 338 | return; |
338 | } | 339 | } |
339 | spin_unlock_irqrestore(&smp_alt, flags); | 340 | spin_unlock(&smp_alt); |
340 | } | 341 | } |
341 | 342 | ||
342 | void alternatives_smp_switch(int smp) | 343 | void alternatives_smp_switch(int smp) |
343 | { | 344 | { |
344 | struct smp_alt_module *mod; | 345 | struct smp_alt_module *mod; |
345 | unsigned long flags; | ||
346 | 346 | ||
347 | #ifdef CONFIG_LOCKDEP | 347 | #ifdef CONFIG_LOCKDEP |
348 | /* | 348 | /* |
@@ -359,7 +359,7 @@ void alternatives_smp_switch(int smp) | |||
359 | return; | 359 | return; |
360 | BUG_ON(!smp && (num_online_cpus() > 1)); | 360 | BUG_ON(!smp && (num_online_cpus() > 1)); |
361 | 361 | ||
362 | spin_lock_irqsave(&smp_alt, flags); | 362 | spin_lock(&smp_alt); |
363 | 363 | ||
364 | /* | 364 | /* |
365 | * Avoid unnecessary switches because it forces JIT based VMs to | 365 | * Avoid unnecessary switches because it forces JIT based VMs to |
@@ -383,7 +383,7 @@ void alternatives_smp_switch(int smp) | |||
383 | mod->text, mod->text_end); | 383 | mod->text, mod->text_end); |
384 | } | 384 | } |
385 | smp_mode = smp; | 385 | smp_mode = smp; |
386 | spin_unlock_irqrestore(&smp_alt, flags); | 386 | spin_unlock(&smp_alt); |
387 | } | 387 | } |
388 | 388 | ||
389 | #endif | 389 | #endif |
@@ -411,7 +411,7 @@ void apply_paravirt(struct paravirt_patch_site *start, | |||
411 | 411 | ||
412 | /* Pad the rest with nops */ | 412 | /* Pad the rest with nops */ |
413 | add_nops(insnbuf + used, p->len - used); | 413 | add_nops(insnbuf + used, p->len - used); |
414 | text_poke(p->instr, insnbuf, p->len); | 414 | text_poke_early(p->instr, insnbuf, p->len); |
415 | } | 415 | } |
416 | } | 416 | } |
417 | extern struct paravirt_patch_site __start_parainstructions[], | 417 | extern struct paravirt_patch_site __start_parainstructions[], |
@@ -420,8 +420,6 @@ extern struct paravirt_patch_site __start_parainstructions[], | |||
420 | 420 | ||
421 | void __init alternative_instructions(void) | 421 | void __init alternative_instructions(void) |
422 | { | 422 | { |
423 | unsigned long flags; | ||
424 | |||
425 | /* The patching is not fully atomic, so try to avoid local interruptions | 423 | /* The patching is not fully atomic, so try to avoid local interruptions |
426 | that might execute the to be patched code. | 424 | that might execute the to be patched code. |
427 | Other CPUs are not running. */ | 425 | Other CPUs are not running. */ |
@@ -430,7 +428,6 @@ void __init alternative_instructions(void) | |||
430 | stop_mce(); | 428 | stop_mce(); |
431 | #endif | 429 | #endif |
432 | 430 | ||
433 | local_irq_save(flags); | ||
434 | apply_alternatives(__alt_instructions, __alt_instructions_end); | 431 | apply_alternatives(__alt_instructions, __alt_instructions_end); |
435 | 432 | ||
436 | /* switch to patch-once-at-boottime-only mode and free the | 433 | /* switch to patch-once-at-boottime-only mode and free the |
@@ -462,7 +459,6 @@ void __init alternative_instructions(void) | |||
462 | } | 459 | } |
463 | #endif | 460 | #endif |
464 | apply_paravirt(__parainstructions, __parainstructions_end); | 461 | apply_paravirt(__parainstructions, __parainstructions_end); |
465 | local_irq_restore(flags); | ||
466 | 462 | ||
467 | if (smp_alt_once) | 463 | if (smp_alt_once) |
468 | free_init_pages("SMP alternatives", | 464 | free_init_pages("SMP alternatives", |
@@ -475,18 +471,71 @@ void __init alternative_instructions(void) | |||
475 | #endif | 471 | #endif |
476 | } | 472 | } |
477 | 473 | ||
478 | /* | 474 | /** |
479 | * Warning: | 475 | * text_poke_early - Update instructions on a live kernel at boot time |
476 | * @addr: address to modify | ||
477 | * @opcode: source of the copy | ||
478 | * @len: length to copy | ||
479 | * | ||
480 | * When you use this code to patch more than one byte of an instruction | 480 | * When you use this code to patch more than one byte of an instruction |
481 | * you need to make sure that other CPUs cannot execute this code in parallel. | 481 | * you need to make sure that other CPUs cannot execute this code in parallel. |
482 | * Also no thread must be currently preempted in the middle of these instructions. | 482 | * Also no thread must be currently preempted in the middle of these |
483 | * And on the local CPU you need to be protected again NMI or MCE handlers | 483 | * instructions. And on the local CPU you need to be protected again NMI or MCE |
484 | * seeing an inconsistent instruction while you patch. | 484 | * handlers seeing an inconsistent instruction while you patch. |
485 | */ | 485 | */ |
486 | void __kprobes text_poke(void *addr, unsigned char *opcode, int len) | 486 | void *text_poke_early(void *addr, const void *opcode, size_t len) |
487 | { | 487 | { |
488 | unsigned long flags; | ||
489 | local_irq_save(flags); | ||
488 | memcpy(addr, opcode, len); | 490 | memcpy(addr, opcode, len); |
491 | local_irq_restore(flags); | ||
492 | sync_core(); | ||
493 | /* Could also do a CLFLUSH here to speed up CPU recovery; but | ||
494 | that causes hangs on some VIA CPUs. */ | ||
495 | return addr; | ||
496 | } | ||
497 | |||
498 | /** | ||
499 | * text_poke - Update instructions on a live kernel | ||
500 | * @addr: address to modify | ||
501 | * @opcode: source of the copy | ||
502 | * @len: length to copy | ||
503 | * | ||
504 | * Only atomic text poke/set should be allowed when not doing early patching. | ||
505 | * It means the size must be writable atomically and the address must be aligned | ||
506 | * in a way that permits an atomic write. It also makes sure we fit on a single | ||
507 | * page. | ||
508 | */ | ||
509 | void *__kprobes text_poke(void *addr, const void *opcode, size_t len) | ||
510 | { | ||
511 | unsigned long flags; | ||
512 | char *vaddr; | ||
513 | int nr_pages = 2; | ||
514 | |||
515 | BUG_ON(len > sizeof(long)); | ||
516 | BUG_ON((((long)addr + len - 1) & ~(sizeof(long) - 1)) | ||
517 | - ((long)addr & ~(sizeof(long) - 1))); | ||
518 | if (kernel_text_address((unsigned long)addr)) { | ||
519 | struct page *pages[2] = { virt_to_page(addr), | ||
520 | virt_to_page(addr + PAGE_SIZE) }; | ||
521 | if (!pages[1]) | ||
522 | nr_pages = 1; | ||
523 | vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); | ||
524 | BUG_ON(!vaddr); | ||
525 | local_irq_save(flags); | ||
526 | memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); | ||
527 | local_irq_restore(flags); | ||
528 | vunmap(vaddr); | ||
529 | } else { | ||
530 | /* | ||
531 | * modules are in vmalloc'ed memory, always writable. | ||
532 | */ | ||
533 | local_irq_save(flags); | ||
534 | memcpy(addr, opcode, len); | ||
535 | local_irq_restore(flags); | ||
536 | } | ||
489 | sync_core(); | 537 | sync_core(); |
490 | /* Could also do a CLFLUSH here to speed up CPU recovery; but | 538 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
491 | that causes hangs on some VIA CPUs. */ | 539 | that causes hangs on some VIA CPUs. */ |
540 | return addr; | ||
492 | } | 541 | } |