diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 08:08:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 08:08:53 -0400 |
commit | 0200fbdd431519d730b5d399a12840ec832b27cc (patch) | |
tree | 2b58f9e24b61b00e0550f106c95bfabc3b52cfdd /kernel/locking/lockdep_internals.h | |
parent | de3fbb2aa802a267dee2213ae7d5a1e19eb4294a (diff) | |
parent | 01a14bda11add9dcd4a59200f13834d634559935 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and misc x86 updates from Ingo Molnar:
"Lots of changes in this cycle - in part because locking/core attracted
a number of related x86 low level work which was easier to handle in a
single tree:
- Linux Kernel Memory Consistency Model updates (Alan Stern, Paul E.
McKenney, Andrea Parri)
- lockdep scalability improvements and micro-optimizations (Waiman
Long)
- rwsem improvements (Waiman Long)
- spinlock micro-optimization (Matthew Wilcox)
- qspinlocks: Provide a liveness guarantee (more fairness) on x86.
(Peter Zijlstra)
- Add support for relative references in jump tables on arm64, x86
and s390 to optimize jump labels (Ard Biesheuvel, Heiko Carstens)
- Be a lot less permissive on weird (kernel address) uaccess faults
on x86: BUG() when uaccess helpers fault on kernel addresses (Jann
Horn)
- macrofy x86 asm statements to un-confuse the GCC inliner. (Nadav
Amit)
- ... and a handful of other smaller changes as well"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (57 commits)
locking/lockdep: Make global debug_locks* variables read-mostly
locking/lockdep: Fix debug_locks off performance problem
locking/pvqspinlock: Extend node size when pvqspinlock is configured
locking/qspinlock_stat: Count instances of nested lock slowpaths
locking/qspinlock, x86: Provide liveness guarantee
x86/asm: 'Simplify' GEN_*_RMWcc() macros
locking/qspinlock: Rework some comments
locking/qspinlock: Re-order code
locking/lockdep: Remove duplicated 'lock_class_ops' percpu array
x86/defconfig: Enable CONFIG_USB_XHCI_HCD=y
futex: Replace spin_is_locked() with lockdep
locking/lockdep: Make class->ops a percpu counter and move it under CONFIG_DEBUG_LOCKDEP=y
x86/jump-labels: Macrofy inline assembly code to work around GCC inlining bugs
x86/cpufeature: Macrofy inline assembly code to work around GCC inlining bugs
x86/extable: Macrofy inline assembly code to work around GCC inlining bugs
x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops
x86/bug: Macrofy the BUG table section handling, to work around GCC inlining bugs
x86/alternatives: Macrofy lock prefixes to work around GCC inlining bugs
x86/refcount: Work around GCC inlining bug
x86/objtool: Use asm macros to work around GCC inlining bugs
...
Diffstat (limited to 'kernel/locking/lockdep_internals.h')
-rw-r--r-- | kernel/locking/lockdep_internals.h | 27 |
1 files changed, 27 insertions, 0 deletions
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index d459d624ba2a..88c847a41c8a 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h | |||
@@ -152,9 +152,15 @@ struct lockdep_stats { | |||
152 | int nr_find_usage_forwards_recursions; | 152 | int nr_find_usage_forwards_recursions; |
153 | int nr_find_usage_backwards_checks; | 153 | int nr_find_usage_backwards_checks; |
154 | int nr_find_usage_backwards_recursions; | 154 | int nr_find_usage_backwards_recursions; |
155 | |||
156 | /* | ||
157 | * Per lock class locking operation stat counts | ||
158 | */ | ||
159 | unsigned long lock_class_ops[MAX_LOCKDEP_KEYS]; | ||
155 | }; | 160 | }; |
156 | 161 | ||
157 | DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); | 162 | DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); |
163 | extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; | ||
158 | 164 | ||
159 | #define __debug_atomic_inc(ptr) \ | 165 | #define __debug_atomic_inc(ptr) \ |
160 | this_cpu_inc(lockdep_stats.ptr); | 166 | this_cpu_inc(lockdep_stats.ptr); |
@@ -179,9 +185,30 @@ DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); | |||
179 | } \ | 185 | } \ |
180 | __total; \ | 186 | __total; \ |
181 | }) | 187 | }) |
188 | |||
189 | static inline void debug_class_ops_inc(struct lock_class *class) | ||
190 | { | ||
191 | int idx; | ||
192 | |||
193 | idx = class - lock_classes; | ||
194 | __debug_atomic_inc(lock_class_ops[idx]); | ||
195 | } | ||
196 | |||
197 | static inline unsigned long debug_class_ops_read(struct lock_class *class) | ||
198 | { | ||
199 | int idx, cpu; | ||
200 | unsigned long ops = 0; | ||
201 | |||
202 | idx = class - lock_classes; | ||
203 | for_each_possible_cpu(cpu) | ||
204 | ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu); | ||
205 | return ops; | ||
206 | } | ||
207 | |||
182 | #else | 208 | #else |
183 | # define __debug_atomic_inc(ptr) do { } while (0) | 209 | # define __debug_atomic_inc(ptr) do { } while (0) |
184 | # define debug_atomic_inc(ptr) do { } while (0) | 210 | # define debug_atomic_inc(ptr) do { } while (0) |
185 | # define debug_atomic_dec(ptr) do { } while (0) | 211 | # define debug_atomic_dec(ptr) do { } while (0) |
186 | # define debug_atomic_read(ptr) 0 | 212 | # define debug_atomic_read(ptr) 0 |
213 | # define debug_class_ops_inc(ptr) do { } while (0) | ||
187 | #endif | 214 | #endif |