diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 08:08:53 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 08:08:53 -0400 |
| commit | 0200fbdd431519d730b5d399a12840ec832b27cc (patch) | |
| tree | 2b58f9e24b61b00e0550f106c95bfabc3b52cfdd /kernel/jump_label.c | |
| parent | de3fbb2aa802a267dee2213ae7d5a1e19eb4294a (diff) | |
| parent | 01a14bda11add9dcd4a59200f13834d634559935 (diff) | |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and misc x86 updates from Ingo Molnar:
"Lots of changes in this cycle - in part because locking/core attracted
a number of related x86 low level work which was easier to handle in a
single tree:
- Linux Kernel Memory Consistency Model updates (Alan Stern, Paul E.
McKenney, Andrea Parri)
- lockdep scalability improvements and micro-optimizations (Waiman
Long)
- rwsem improvements (Waiman Long)
- spinlock micro-optimization (Matthew Wilcox)
- qspinlocks: Provide a liveness guarantee (more fairness) on x86.
(Peter Zijlstra)
- Add support for relative references in jump tables on arm64, x86
and s390 to optimize jump labels (Ard Biesheuvel, Heiko Carstens)
- Be a lot less permissive on weird (kernel address) uaccess faults
on x86: BUG() when uaccess helpers fault on kernel addresses (Jann
Horn)
- macrofy x86 asm statements to un-confuse the GCC inliner. (Nadav
Amit)
- ... and a handful of other smaller changes as well"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (57 commits)
locking/lockdep: Make global debug_locks* variables read-mostly
locking/lockdep: Fix debug_locks off performance problem
locking/pvqspinlock: Extend node size when pvqspinlock is configured
locking/qspinlock_stat: Count instances of nested lock slowpaths
locking/qspinlock, x86: Provide liveness guarantee
x86/asm: 'Simplify' GEN_*_RMWcc() macros
locking/qspinlock: Rework some comments
locking/qspinlock: Re-order code
locking/lockdep: Remove duplicated 'lock_class_ops' percpu array
x86/defconfig: Enable CONFIG_USB_XHCI_HCD=y
futex: Replace spin_is_locked() with lockdep
locking/lockdep: Make class->ops a percpu counter and move it under CONFIG_DEBUG_LOCKDEP=y
x86/jump-labels: Macrofy inline assembly code to work around GCC inlining bugs
x86/cpufeature: Macrofy inline assembly code to work around GCC inlining bugs
x86/extable: Macrofy inline assembly code to work around GCC inlining bugs
x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops
x86/bug: Macrofy the BUG table section handling, to work around GCC inlining bugs
x86/alternatives: Macrofy lock prefixes to work around GCC inlining bugs
x86/refcount: Work around GCC inlining bug
x86/objtool: Use asm macros to work around GCC inlining bugs
...
Diffstat (limited to 'kernel/jump_label.c')
| -rw-r--r-- | kernel/jump_label.c | 107 |
1 files changed, 51 insertions, 56 deletions
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 2e62503bea0d..b28028b08d44 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
| @@ -38,23 +38,43 @@ static int jump_label_cmp(const void *a, const void *b) | |||
| 38 | const struct jump_entry *jea = a; | 38 | const struct jump_entry *jea = a; |
| 39 | const struct jump_entry *jeb = b; | 39 | const struct jump_entry *jeb = b; |
| 40 | 40 | ||
| 41 | if (jea->key < jeb->key) | 41 | if (jump_entry_key(jea) < jump_entry_key(jeb)) |
| 42 | return -1; | 42 | return -1; |
| 43 | 43 | ||
| 44 | if (jea->key > jeb->key) | 44 | if (jump_entry_key(jea) > jump_entry_key(jeb)) |
| 45 | return 1; | 45 | return 1; |
| 46 | 46 | ||
| 47 | return 0; | 47 | return 0; |
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | static void jump_label_swap(void *a, void *b, int size) | ||
| 51 | { | ||
| 52 | long delta = (unsigned long)a - (unsigned long)b; | ||
| 53 | struct jump_entry *jea = a; | ||
| 54 | struct jump_entry *jeb = b; | ||
| 55 | struct jump_entry tmp = *jea; | ||
| 56 | |||
| 57 | jea->code = jeb->code - delta; | ||
| 58 | jea->target = jeb->target - delta; | ||
| 59 | jea->key = jeb->key - delta; | ||
| 60 | |||
| 61 | jeb->code = tmp.code + delta; | ||
| 62 | jeb->target = tmp.target + delta; | ||
| 63 | jeb->key = tmp.key + delta; | ||
| 64 | } | ||
| 65 | |||
| 50 | static void | 66 | static void |
| 51 | jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) | 67 | jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) |
| 52 | { | 68 | { |
| 53 | unsigned long size; | 69 | unsigned long size; |
| 70 | void *swapfn = NULL; | ||
| 71 | |||
| 72 | if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE)) | ||
| 73 | swapfn = jump_label_swap; | ||
| 54 | 74 | ||
| 55 | size = (((unsigned long)stop - (unsigned long)start) | 75 | size = (((unsigned long)stop - (unsigned long)start) |
| 56 | / sizeof(struct jump_entry)); | 76 | / sizeof(struct jump_entry)); |
| 57 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); | 77 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn); |
| 58 | } | 78 | } |
| 59 | 79 | ||
| 60 | static void jump_label_update(struct static_key *key); | 80 | static void jump_label_update(struct static_key *key); |
| @@ -85,6 +105,7 @@ void static_key_slow_inc_cpuslocked(struct static_key *key) | |||
| 85 | int v, v1; | 105 | int v, v1; |
| 86 | 106 | ||
| 87 | STATIC_KEY_CHECK_USE(key); | 107 | STATIC_KEY_CHECK_USE(key); |
| 108 | lockdep_assert_cpus_held(); | ||
| 88 | 109 | ||
| 89 | /* | 110 | /* |
| 90 | * Careful if we get concurrent static_key_slow_inc() calls; | 111 | * Careful if we get concurrent static_key_slow_inc() calls; |
| @@ -130,6 +151,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc); | |||
| 130 | void static_key_enable_cpuslocked(struct static_key *key) | 151 | void static_key_enable_cpuslocked(struct static_key *key) |
| 131 | { | 152 | { |
| 132 | STATIC_KEY_CHECK_USE(key); | 153 | STATIC_KEY_CHECK_USE(key); |
| 154 | lockdep_assert_cpus_held(); | ||
| 133 | 155 | ||
| 134 | if (atomic_read(&key->enabled) > 0) { | 156 | if (atomic_read(&key->enabled) > 0) { |
| 135 | WARN_ON_ONCE(atomic_read(&key->enabled) != 1); | 157 | WARN_ON_ONCE(atomic_read(&key->enabled) != 1); |
| @@ -160,6 +182,7 @@ EXPORT_SYMBOL_GPL(static_key_enable); | |||
| 160 | void static_key_disable_cpuslocked(struct static_key *key) | 182 | void static_key_disable_cpuslocked(struct static_key *key) |
| 161 | { | 183 | { |
| 162 | STATIC_KEY_CHECK_USE(key); | 184 | STATIC_KEY_CHECK_USE(key); |
| 185 | lockdep_assert_cpus_held(); | ||
| 163 | 186 | ||
| 164 | if (atomic_read(&key->enabled) != 1) { | 187 | if (atomic_read(&key->enabled) != 1) { |
| 165 | WARN_ON_ONCE(atomic_read(&key->enabled) != 0); | 188 | WARN_ON_ONCE(atomic_read(&key->enabled) != 0); |
| @@ -185,6 +208,8 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key, | |||
| 185 | unsigned long rate_limit, | 208 | unsigned long rate_limit, |
| 186 | struct delayed_work *work) | 209 | struct delayed_work *work) |
| 187 | { | 210 | { |
| 211 | lockdep_assert_cpus_held(); | ||
| 212 | |||
| 188 | /* | 213 | /* |
| 189 | * The negative count check is valid even when a negative | 214 | * The negative count check is valid even when a negative |
| 190 | * key->enabled is in use by static_key_slow_inc(); a | 215 | * key->enabled is in use by static_key_slow_inc(); a |
| @@ -261,8 +286,8 @@ EXPORT_SYMBOL_GPL(jump_label_rate_limit); | |||
| 261 | 286 | ||
| 262 | static int addr_conflict(struct jump_entry *entry, void *start, void *end) | 287 | static int addr_conflict(struct jump_entry *entry, void *start, void *end) |
| 263 | { | 288 | { |
| 264 | if (entry->code <= (unsigned long)end && | 289 | if (jump_entry_code(entry) <= (unsigned long)end && |
| 265 | entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) | 290 | jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start) |
| 266 | return 1; | 291 | return 1; |
| 267 | 292 | ||
| 268 | return 0; | 293 | return 0; |
| @@ -321,16 +346,6 @@ static inline void static_key_set_linked(struct static_key *key) | |||
| 321 | key->type |= JUMP_TYPE_LINKED; | 346 | key->type |= JUMP_TYPE_LINKED; |
| 322 | } | 347 | } |
| 323 | 348 | ||
| 324 | static inline struct static_key *jump_entry_key(struct jump_entry *entry) | ||
| 325 | { | ||
| 326 | return (struct static_key *)((unsigned long)entry->key & ~1UL); | ||
| 327 | } | ||
| 328 | |||
| 329 | static bool jump_entry_branch(struct jump_entry *entry) | ||
| 330 | { | ||
| 331 | return (unsigned long)entry->key & 1UL; | ||
| 332 | } | ||
| 333 | |||
| 334 | /*** | 349 | /*** |
| 335 | * A 'struct static_key' uses a union such that it either points directly | 350 | * A 'struct static_key' uses a union such that it either points directly |
| 336 | * to a table of 'struct jump_entry' or to a linked list of modules which in | 351 | * to a table of 'struct jump_entry' or to a linked list of modules which in |
| @@ -355,7 +370,7 @@ static enum jump_label_type jump_label_type(struct jump_entry *entry) | |||
| 355 | { | 370 | { |
| 356 | struct static_key *key = jump_entry_key(entry); | 371 | struct static_key *key = jump_entry_key(entry); |
| 357 | bool enabled = static_key_enabled(key); | 372 | bool enabled = static_key_enabled(key); |
| 358 | bool branch = jump_entry_branch(entry); | 373 | bool branch = jump_entry_is_branch(entry); |
| 359 | 374 | ||
| 360 | /* See the comment in linux/jump_label.h */ | 375 | /* See the comment in linux/jump_label.h */ |
| 361 | return enabled ^ branch; | 376 | return enabled ^ branch; |
| @@ -363,19 +378,20 @@ static enum jump_label_type jump_label_type(struct jump_entry *entry) | |||
| 363 | 378 | ||
| 364 | static void __jump_label_update(struct static_key *key, | 379 | static void __jump_label_update(struct static_key *key, |
| 365 | struct jump_entry *entry, | 380 | struct jump_entry *entry, |
| 366 | struct jump_entry *stop) | 381 | struct jump_entry *stop, |
| 382 | bool init) | ||
| 367 | { | 383 | { |
| 368 | for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { | 384 | for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { |
| 369 | /* | 385 | /* |
| 370 | * An entry->code of 0 indicates an entry which has been | 386 | * An entry->code of 0 indicates an entry which has been |
| 371 | * disabled because it was in an init text area. | 387 | * disabled because it was in an init text area. |
| 372 | */ | 388 | */ |
| 373 | if (entry->code) { | 389 | if (init || !jump_entry_is_init(entry)) { |
| 374 | if (kernel_text_address(entry->code)) | 390 | if (kernel_text_address(jump_entry_code(entry))) |
| 375 | arch_jump_label_transform(entry, jump_label_type(entry)); | 391 | arch_jump_label_transform(entry, jump_label_type(entry)); |
| 376 | else | 392 | else |
| 377 | WARN_ONCE(1, "can't patch jump_label at %pS", | 393 | WARN_ONCE(1, "can't patch jump_label at %pS", |
| 378 | (void *)(unsigned long)entry->code); | 394 | (void *)jump_entry_code(entry)); |
| 379 | } | 395 | } |
| 380 | } | 396 | } |
| 381 | } | 397 | } |
| @@ -410,6 +426,9 @@ void __init jump_label_init(void) | |||
| 410 | if (jump_label_type(iter) == JUMP_LABEL_NOP) | 426 | if (jump_label_type(iter) == JUMP_LABEL_NOP) |
| 411 | arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); | 427 | arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); |
| 412 | 428 | ||
| 429 | if (init_section_contains((void *)jump_entry_code(iter), 1)) | ||
| 430 | jump_entry_set_init(iter); | ||
| 431 | |||
| 413 | iterk = jump_entry_key(iter); | 432 | iterk = jump_entry_key(iter); |
| 414 | if (iterk == key) | 433 | if (iterk == key) |
| 415 | continue; | 434 | continue; |
| @@ -422,26 +441,13 @@ void __init jump_label_init(void) | |||
| 422 | cpus_read_unlock(); | 441 | cpus_read_unlock(); |
| 423 | } | 442 | } |
| 424 | 443 | ||
| 425 | /* Disable any jump label entries in __init/__exit code */ | ||
| 426 | void __init jump_label_invalidate_initmem(void) | ||
| 427 | { | ||
| 428 | struct jump_entry *iter_start = __start___jump_table; | ||
| 429 | struct jump_entry *iter_stop = __stop___jump_table; | ||
| 430 | struct jump_entry *iter; | ||
| 431 | |||
| 432 | for (iter = iter_start; iter < iter_stop; iter++) { | ||
| 433 | if (init_section_contains((void *)(unsigned long)iter->code, 1)) | ||
| 434 | iter->code = 0; | ||
| 435 | } | ||
| 436 | } | ||
| 437 | |||
| 438 | #ifdef CONFIG_MODULES | 444 | #ifdef CONFIG_MODULES |
| 439 | 445 | ||
| 440 | static enum jump_label_type jump_label_init_type(struct jump_entry *entry) | 446 | static enum jump_label_type jump_label_init_type(struct jump_entry *entry) |
| 441 | { | 447 | { |
| 442 | struct static_key *key = jump_entry_key(entry); | 448 | struct static_key *key = jump_entry_key(entry); |
| 443 | bool type = static_key_type(key); | 449 | bool type = static_key_type(key); |
| 444 | bool branch = jump_entry_branch(entry); | 450 | bool branch = jump_entry_is_branch(entry); |
| 445 | 451 | ||
| 446 | /* See the comment in linux/jump_label.h */ | 452 | /* See the comment in linux/jump_label.h */ |
| 447 | return type ^ branch; | 453 | return type ^ branch; |
| @@ -455,7 +461,7 @@ struct static_key_mod { | |||
| 455 | 461 | ||
| 456 | static inline struct static_key_mod *static_key_mod(struct static_key *key) | 462 | static inline struct static_key_mod *static_key_mod(struct static_key *key) |
| 457 | { | 463 | { |
| 458 | WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED)); | 464 | WARN_ON_ONCE(!static_key_linked(key)); |
| 459 | return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); | 465 | return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); |
| 460 | } | 466 | } |
| 461 | 467 | ||
| @@ -514,7 +520,8 @@ static void __jump_label_mod_update(struct static_key *key) | |||
| 514 | stop = __stop___jump_table; | 520 | stop = __stop___jump_table; |
| 515 | else | 521 | else |
| 516 | stop = m->jump_entries + m->num_jump_entries; | 522 | stop = m->jump_entries + m->num_jump_entries; |
| 517 | __jump_label_update(key, mod->entries, stop); | 523 | __jump_label_update(key, mod->entries, stop, |
| 524 | m && m->state == MODULE_STATE_COMING); | ||
| 518 | } | 525 | } |
| 519 | } | 526 | } |
| 520 | 527 | ||
| @@ -560,12 +567,15 @@ static int jump_label_add_module(struct module *mod) | |||
| 560 | for (iter = iter_start; iter < iter_stop; iter++) { | 567 | for (iter = iter_start; iter < iter_stop; iter++) { |
| 561 | struct static_key *iterk; | 568 | struct static_key *iterk; |
| 562 | 569 | ||
| 570 | if (within_module_init(jump_entry_code(iter), mod)) | ||
| 571 | jump_entry_set_init(iter); | ||
| 572 | |||
| 563 | iterk = jump_entry_key(iter); | 573 | iterk = jump_entry_key(iter); |
| 564 | if (iterk == key) | 574 | if (iterk == key) |
| 565 | continue; | 575 | continue; |
| 566 | 576 | ||
| 567 | key = iterk; | 577 | key = iterk; |
| 568 | if (within_module(iter->key, mod)) { | 578 | if (within_module((unsigned long)key, mod)) { |
| 569 | static_key_set_entries(key, iter); | 579 | static_key_set_entries(key, iter); |
| 570 | continue; | 580 | continue; |
| 571 | } | 581 | } |
| @@ -595,7 +605,7 @@ static int jump_label_add_module(struct module *mod) | |||
| 595 | 605 | ||
| 596 | /* Only update if we've changed from our initial state */ | 606 | /* Only update if we've changed from our initial state */ |
| 597 | if (jump_label_type(iter) != jump_label_init_type(iter)) | 607 | if (jump_label_type(iter) != jump_label_init_type(iter)) |
| 598 | __jump_label_update(key, iter, iter_stop); | 608 | __jump_label_update(key, iter, iter_stop, true); |
| 599 | } | 609 | } |
| 600 | 610 | ||
| 601 | return 0; | 611 | return 0; |
| @@ -615,7 +625,7 @@ static void jump_label_del_module(struct module *mod) | |||
| 615 | 625 | ||
| 616 | key = jump_entry_key(iter); | 626 | key = jump_entry_key(iter); |
| 617 | 627 | ||
| 618 | if (within_module(iter->key, mod)) | 628 | if (within_module((unsigned long)key, mod)) |
| 619 | continue; | 629 | continue; |
| 620 | 630 | ||
| 621 | /* No memory during module load */ | 631 | /* No memory during module load */ |
| @@ -651,19 +661,6 @@ static void jump_label_del_module(struct module *mod) | |||
| 651 | } | 661 | } |
| 652 | } | 662 | } |
| 653 | 663 | ||
| 654 | /* Disable any jump label entries in module init code */ | ||
| 655 | static void jump_label_invalidate_module_init(struct module *mod) | ||
| 656 | { | ||
| 657 | struct jump_entry *iter_start = mod->jump_entries; | ||
| 658 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; | ||
| 659 | struct jump_entry *iter; | ||
| 660 | |||
| 661 | for (iter = iter_start; iter < iter_stop; iter++) { | ||
| 662 | if (within_module_init(iter->code, mod)) | ||
| 663 | iter->code = 0; | ||
| 664 | } | ||
| 665 | } | ||
| 666 | |||
| 667 | static int | 664 | static int |
| 668 | jump_label_module_notify(struct notifier_block *self, unsigned long val, | 665 | jump_label_module_notify(struct notifier_block *self, unsigned long val, |
| 669 | void *data) | 666 | void *data) |
| @@ -685,9 +682,6 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, | |||
| 685 | case MODULE_STATE_GOING: | 682 | case MODULE_STATE_GOING: |
| 686 | jump_label_del_module(mod); | 683 | jump_label_del_module(mod); |
| 687 | break; | 684 | break; |
| 688 | case MODULE_STATE_LIVE: | ||
| 689 | jump_label_invalidate_module_init(mod); | ||
| 690 | break; | ||
| 691 | } | 685 | } |
| 692 | 686 | ||
| 693 | jump_label_unlock(); | 687 | jump_label_unlock(); |
| @@ -757,7 +751,8 @@ static void jump_label_update(struct static_key *key) | |||
| 757 | entry = static_key_entries(key); | 751 | entry = static_key_entries(key); |
| 758 | /* if there are no users, entry can be NULL */ | 752 | /* if there are no users, entry can be NULL */ |
| 759 | if (entry) | 753 | if (entry) |
| 760 | __jump_label_update(key, entry, stop); | 754 | __jump_label_update(key, entry, stop, |
| 755 | system_state < SYSTEM_RUNNING); | ||
| 761 | } | 756 | } |
| 762 | 757 | ||
| 763 | #ifdef CONFIG_STATIC_KEYS_SELFTEST | 758 | #ifdef CONFIG_STATIC_KEYS_SELFTEST |
