diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 08:08:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 08:08:53 -0400 |
commit | 0200fbdd431519d730b5d399a12840ec832b27cc (patch) | |
tree | 2b58f9e24b61b00e0550f106c95bfabc3b52cfdd /kernel/cpu.c | |
parent | de3fbb2aa802a267dee2213ae7d5a1e19eb4294a (diff) | |
parent | 01a14bda11add9dcd4a59200f13834d634559935 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and misc x86 updates from Ingo Molnar:
"Lots of changes in this cycle - in part because locking/core attracted
a number of related x86 low level work which was easier to handle in a
single tree:
- Linux Kernel Memory Consistency Model updates (Alan Stern, Paul E.
McKenney, Andrea Parri)
- lockdep scalability improvements and micro-optimizations (Waiman
Long)
- rwsem improvements (Waiman Long)
- spinlock micro-optimization (Matthew Wilcox)
- qspinlocks: Provide a liveness guarantee (more fairness) on x86.
(Peter Zijlstra)
- Add support for relative references in jump tables on arm64, x86
and s390 to optimize jump labels (Ard Biesheuvel, Heiko Carstens)
- Be a lot less permissive on weird (kernel address) uaccess faults
on x86: BUG() when uaccess helpers fault on kernel addresses (Jann
Horn)
- macrofy x86 asm statements to un-confuse the GCC inliner. (Nadav
Amit)
- ... and a handful of other smaller changes as well"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (57 commits)
locking/lockdep: Make global debug_locks* variables read-mostly
locking/lockdep: Fix debug_locks off performance problem
locking/pvqspinlock: Extend node size when pvqspinlock is configured
locking/qspinlock_stat: Count instances of nested lock slowpaths
locking/qspinlock, x86: Provide liveness guarantee
x86/asm: 'Simplify' GEN_*_RMWcc() macros
locking/qspinlock: Rework some comments
locking/qspinlock: Re-order code
locking/lockdep: Remove duplicated 'lock_class_ops' percpu array
x86/defconfig: Enable CONFIG_USB_XHCI_HCD=y
futex: Replace spin_is_locked() with lockdep
locking/lockdep: Make class->ops a percpu counter and move it under CONFIG_DEBUG_LOCKDEP=y
x86/jump-labels: Macrofy inline assembly code to work around GCC inlining bugs
x86/cpufeature: Macrofy inline assembly code to work around GCC inlining bugs
x86/extable: Macrofy inline assembly code to work around GCC inlining bugs
x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops
x86/bug: Macrofy the BUG table section handling, to work around GCC inlining bugs
x86/alternatives: Macrofy lock prefixes to work around GCC inlining bugs
x86/refcount: Work around GCC inlining bug
x86/objtool: Use asm macros to work around GCC inlining bugs
...
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r-- | kernel/cpu.c | 28 |
1 files changed, 28 insertions, 0 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 0097acec1c71..be4859f07153 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -315,6 +315,16 @@ void lockdep_assert_cpus_held(void) | |||
315 | percpu_rwsem_assert_held(&cpu_hotplug_lock); | 315 | percpu_rwsem_assert_held(&cpu_hotplug_lock); |
316 | } | 316 | } |
317 | 317 | ||
318 | static void lockdep_acquire_cpus_lock(void) | ||
319 | { | ||
320 | rwsem_acquire(&cpu_hotplug_lock.rw_sem.dep_map, 0, 0, _THIS_IP_); | ||
321 | } | ||
322 | |||
323 | static void lockdep_release_cpus_lock(void) | ||
324 | { | ||
325 | rwsem_release(&cpu_hotplug_lock.rw_sem.dep_map, 1, _THIS_IP_); | ||
326 | } | ||
327 | |||
318 | /* | 328 | /* |
319 | * Wait for currently running CPU hotplug operations to complete (if any) and | 329 | * Wait for currently running CPU hotplug operations to complete (if any) and |
320 | * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects | 330 | * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects |
@@ -344,6 +354,17 @@ void cpu_hotplug_enable(void) | |||
344 | cpu_maps_update_done(); | 354 | cpu_maps_update_done(); |
345 | } | 355 | } |
346 | EXPORT_SYMBOL_GPL(cpu_hotplug_enable); | 356 | EXPORT_SYMBOL_GPL(cpu_hotplug_enable); |
357 | |||
358 | #else | ||
359 | |||
360 | static void lockdep_acquire_cpus_lock(void) | ||
361 | { | ||
362 | } | ||
363 | |||
364 | static void lockdep_release_cpus_lock(void) | ||
365 | { | ||
366 | } | ||
367 | |||
347 | #endif /* CONFIG_HOTPLUG_CPU */ | 368 | #endif /* CONFIG_HOTPLUG_CPU */ |
348 | 369 | ||
349 | #ifdef CONFIG_HOTPLUG_SMT | 370 | #ifdef CONFIG_HOTPLUG_SMT |
@@ -616,6 +637,12 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
616 | */ | 637 | */ |
617 | smp_mb(); | 638 | smp_mb(); |
618 | 639 | ||
640 | /* | ||
641 | * The BP holds the hotplug lock, but we're now running on the AP, | ||
642 | * ensure that anybody asserting the lock is held, will actually find | ||
643 | * it so. | ||
644 | */ | ||
645 | lockdep_acquire_cpus_lock(); | ||
619 | cpuhp_lock_acquire(bringup); | 646 | cpuhp_lock_acquire(bringup); |
620 | 647 | ||
621 | if (st->single) { | 648 | if (st->single) { |
@@ -661,6 +688,7 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
661 | } | 688 | } |
662 | 689 | ||
663 | cpuhp_lock_release(bringup); | 690 | cpuhp_lock_release(bringup); |
691 | lockdep_release_cpus_lock(); | ||
664 | 692 | ||
665 | if (!st->should_run) | 693 | if (!st->should_run) |
666 | complete_ap_thread(st, bringup); | 694 | complete_ap_thread(st, bringup); |