diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 08:08:53 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 08:08:53 -0400 |
| commit | 0200fbdd431519d730b5d399a12840ec832b27cc (patch) | |
| tree | 2b58f9e24b61b00e0550f106c95bfabc3b52cfdd /include/linux | |
| parent | de3fbb2aa802a267dee2213ae7d5a1e19eb4294a (diff) | |
| parent | 01a14bda11add9dcd4a59200f13834d634559935 (diff) | |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and misc x86 updates from Ingo Molnar:
"Lots of changes in this cycle - in part because locking/core attracted
a number of related x86 low level work which was easier to handle in a
single tree:
- Linux Kernel Memory Consistency Model updates (Alan Stern, Paul E.
McKenney, Andrea Parri)
- lockdep scalability improvements and micro-optimizations (Waiman
Long)
- rwsem improvements (Waiman Long)
- spinlock micro-optimization (Matthew Wilcox)
- qspinlocks: Provide a liveness guarantee (more fairness) on x86.
(Peter Zijlstra)
- Add support for relative references in jump tables on arm64, x86
and s390 to optimize jump labels (Ard Biesheuvel, Heiko Carstens)
- Be a lot less permissive on weird (kernel address) uaccess faults
on x86: BUG() when uaccess helpers fault on kernel addresses (Jann
Horn)
- macrofy x86 asm statements to un-confuse the GCC inliner. (Nadav
Amit)
- ... and a handful of other smaller changes as well"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (57 commits)
locking/lockdep: Make global debug_locks* variables read-mostly
locking/lockdep: Fix debug_locks off performance problem
locking/pvqspinlock: Extend node size when pvqspinlock is configured
locking/qspinlock_stat: Count instances of nested lock slowpaths
locking/qspinlock, x86: Provide liveness guarantee
x86/asm: 'Simplify' GEN_*_RMWcc() macros
locking/qspinlock: Rework some comments
locking/qspinlock: Re-order code
locking/lockdep: Remove duplicated 'lock_class_ops' percpu array
x86/defconfig: Enable CONFIG_USB_XHCI_HCD=y
futex: Replace spin_is_locked() with lockdep
locking/lockdep: Make class->ops a percpu counter and move it under CONFIG_DEBUG_LOCKDEP=y
x86/jump-labels: Macrofy inline assembly code to work around GCC inlining bugs
x86/cpufeature: Macrofy inline assembly code to work around GCC inlining bugs
x86/extable: Macrofy inline assembly code to work around GCC inlining bugs
x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops
x86/bug: Macrofy the BUG table section handling, to work around GCC inlining bugs
x86/alternatives: Macrofy lock prefixes to work around GCC inlining bugs
x86/refcount: Work around GCC inlining bug
x86/objtool: Use asm macros to work around GCC inlining bugs
...
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/compiler.h | 56 | ||||
| -rw-r--r-- | include/linux/debug_locks.h | 4 | ||||
| -rw-r--r-- | include/linux/jump_label.h | 65 | ||||
| -rw-r--r-- | include/linux/lockdep.h | 7 | ||||
| -rw-r--r-- | include/linux/rwsem.h | 4 | ||||
| -rw-r--r-- | include/linux/sched.h | 6 |
6 files changed, 116 insertions, 26 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 681d866efb1e..1921545c6351 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -99,22 +99,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, | |||
| 99 | * unique, to convince GCC not to merge duplicate inline asm statements. | 99 | * unique, to convince GCC not to merge duplicate inline asm statements. |
| 100 | */ | 100 | */ |
| 101 | #define annotate_reachable() ({ \ | 101 | #define annotate_reachable() ({ \ |
| 102 | asm volatile("%c0:\n\t" \ | 102 | asm volatile("ANNOTATE_REACHABLE counter=%c0" \ |
| 103 | ".pushsection .discard.reachable\n\t" \ | 103 | : : "i" (__COUNTER__)); \ |
| 104 | ".long %c0b - .\n\t" \ | ||
| 105 | ".popsection\n\t" : : "i" (__COUNTER__)); \ | ||
| 106 | }) | 104 | }) |
| 107 | #define annotate_unreachable() ({ \ | 105 | #define annotate_unreachable() ({ \ |
| 108 | asm volatile("%c0:\n\t" \ | 106 | asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \ |
| 109 | ".pushsection .discard.unreachable\n\t" \ | 107 | : : "i" (__COUNTER__)); \ |
| 110 | ".long %c0b - .\n\t" \ | ||
| 111 | ".popsection\n\t" : : "i" (__COUNTER__)); \ | ||
| 112 | }) | 108 | }) |
| 113 | #define ASM_UNREACHABLE \ | ||
| 114 | "999:\n\t" \ | ||
| 115 | ".pushsection .discard.unreachable\n\t" \ | ||
| 116 | ".long 999b - .\n\t" \ | ||
| 117 | ".popsection\n\t" | ||
| 118 | #else | 109 | #else |
| 119 | #define annotate_reachable() | 110 | #define annotate_reachable() |
| 120 | #define annotate_unreachable() | 111 | #define annotate_unreachable() |
| @@ -299,6 +290,45 @@ static inline void *offset_to_ptr(const int *off) | |||
| 299 | return (void *)((unsigned long)off + *off); | 290 | return (void *)((unsigned long)off + *off); |
| 300 | } | 291 | } |
| 301 | 292 | ||
| 293 | #else /* __ASSEMBLY__ */ | ||
| 294 | |||
| 295 | #ifdef __KERNEL__ | ||
| 296 | #ifndef LINKER_SCRIPT | ||
| 297 | |||
| 298 | #ifdef CONFIG_STACK_VALIDATION | ||
| 299 | .macro ANNOTATE_UNREACHABLE counter:req | ||
| 300 | \counter: | ||
| 301 | .pushsection .discard.unreachable | ||
| 302 | .long \counter\()b -. | ||
| 303 | .popsection | ||
| 304 | .endm | ||
| 305 | |||
| 306 | .macro ANNOTATE_REACHABLE counter:req | ||
| 307 | \counter: | ||
| 308 | .pushsection .discard.reachable | ||
| 309 | .long \counter\()b -. | ||
| 310 | .popsection | ||
| 311 | .endm | ||
| 312 | |||
| 313 | .macro ASM_UNREACHABLE | ||
| 314 | 999: | ||
| 315 | .pushsection .discard.unreachable | ||
| 316 | .long 999b - . | ||
| 317 | .popsection | ||
| 318 | .endm | ||
| 319 | #else /* CONFIG_STACK_VALIDATION */ | ||
| 320 | .macro ANNOTATE_UNREACHABLE counter:req | ||
| 321 | .endm | ||
| 322 | |||
| 323 | .macro ANNOTATE_REACHABLE counter:req | ||
| 324 | .endm | ||
| 325 | |||
| 326 | .macro ASM_UNREACHABLE | ||
| 327 | .endm | ||
| 328 | #endif /* CONFIG_STACK_VALIDATION */ | ||
| 329 | |||
| 330 | #endif /* LINKER_SCRIPT */ | ||
| 331 | #endif /* __KERNEL__ */ | ||
| 302 | #endif /* __ASSEMBLY__ */ | 332 | #endif /* __ASSEMBLY__ */ |
| 303 | 333 | ||
| 304 | #ifndef __optimize | 334 | #ifndef __optimize |
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 120225e9a366..257ab3c92cb8 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h | |||
| @@ -8,8 +8,8 @@ | |||
| 8 | 8 | ||
| 9 | struct task_struct; | 9 | struct task_struct; |
| 10 | 10 | ||
| 11 | extern int debug_locks; | 11 | extern int debug_locks __read_mostly; |
| 12 | extern int debug_locks_silent; | 12 | extern int debug_locks_silent __read_mostly; |
| 13 | 13 | ||
| 14 | 14 | ||
| 15 | static inline int __debug_locks_off(void) | 15 | static inline int __debug_locks_off(void) |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 1a0b6f17a5d6..5df6a621e464 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
| @@ -119,6 +119,68 @@ struct static_key { | |||
| 119 | 119 | ||
| 120 | #ifdef HAVE_JUMP_LABEL | 120 | #ifdef HAVE_JUMP_LABEL |
| 121 | #include <asm/jump_label.h> | 121 | #include <asm/jump_label.h> |
| 122 | |||
| 123 | #ifndef __ASSEMBLY__ | ||
| 124 | #ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE | ||
| 125 | |||
| 126 | struct jump_entry { | ||
| 127 | s32 code; | ||
| 128 | s32 target; | ||
| 129 | long key; // key may be far away from the core kernel under KASLR | ||
| 130 | }; | ||
| 131 | |||
| 132 | static inline unsigned long jump_entry_code(const struct jump_entry *entry) | ||
| 133 | { | ||
| 134 | return (unsigned long)&entry->code + entry->code; | ||
| 135 | } | ||
| 136 | |||
| 137 | static inline unsigned long jump_entry_target(const struct jump_entry *entry) | ||
| 138 | { | ||
| 139 | return (unsigned long)&entry->target + entry->target; | ||
| 140 | } | ||
| 141 | |||
| 142 | static inline struct static_key *jump_entry_key(const struct jump_entry *entry) | ||
| 143 | { | ||
| 144 | long offset = entry->key & ~3L; | ||
| 145 | |||
| 146 | return (struct static_key *)((unsigned long)&entry->key + offset); | ||
| 147 | } | ||
| 148 | |||
| 149 | #else | ||
| 150 | |||
| 151 | static inline unsigned long jump_entry_code(const struct jump_entry *entry) | ||
| 152 | { | ||
| 153 | return entry->code; | ||
| 154 | } | ||
| 155 | |||
| 156 | static inline unsigned long jump_entry_target(const struct jump_entry *entry) | ||
| 157 | { | ||
| 158 | return entry->target; | ||
| 159 | } | ||
| 160 | |||
| 161 | static inline struct static_key *jump_entry_key(const struct jump_entry *entry) | ||
| 162 | { | ||
| 163 | return (struct static_key *)((unsigned long)entry->key & ~3UL); | ||
| 164 | } | ||
| 165 | |||
| 166 | #endif | ||
| 167 | |||
| 168 | static inline bool jump_entry_is_branch(const struct jump_entry *entry) | ||
| 169 | { | ||
| 170 | return (unsigned long)entry->key & 1UL; | ||
| 171 | } | ||
| 172 | |||
| 173 | static inline bool jump_entry_is_init(const struct jump_entry *entry) | ||
| 174 | { | ||
| 175 | return (unsigned long)entry->key & 2UL; | ||
| 176 | } | ||
| 177 | |||
| 178 | static inline void jump_entry_set_init(struct jump_entry *entry) | ||
| 179 | { | ||
| 180 | entry->key |= 2; | ||
| 181 | } | ||
| 182 | |||
| 183 | #endif | ||
| 122 | #endif | 184 | #endif |
| 123 | 185 | ||
| 124 | #ifndef __ASSEMBLY__ | 186 | #ifndef __ASSEMBLY__ |
| @@ -151,7 +213,6 @@ extern struct jump_entry __start___jump_table[]; | |||
| 151 | extern struct jump_entry __stop___jump_table[]; | 213 | extern struct jump_entry __stop___jump_table[]; |
| 152 | 214 | ||
| 153 | extern void jump_label_init(void); | 215 | extern void jump_label_init(void); |
| 154 | extern void jump_label_invalidate_initmem(void); | ||
| 155 | extern void jump_label_lock(void); | 216 | extern void jump_label_lock(void); |
| 156 | extern void jump_label_unlock(void); | 217 | extern void jump_label_unlock(void); |
| 157 | extern void arch_jump_label_transform(struct jump_entry *entry, | 218 | extern void arch_jump_label_transform(struct jump_entry *entry, |
| @@ -199,8 +260,6 @@ static __always_inline void jump_label_init(void) | |||
| 199 | static_key_initialized = true; | 260 | static_key_initialized = true; |
| 200 | } | 261 | } |
| 201 | 262 | ||
| 202 | static inline void jump_label_invalidate_initmem(void) {} | ||
| 203 | |||
| 204 | static __always_inline bool static_key_false(struct static_key *key) | 263 | static __always_inline bool static_key_false(struct static_key *key) |
| 205 | { | 264 | { |
| 206 | if (unlikely(static_key_count(key) > 0)) | 265 | if (unlikely(static_key_count(key) > 0)) |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b0d0b51c4d85..1fd82ff99c65 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -99,13 +99,8 @@ struct lock_class { | |||
| 99 | */ | 99 | */ |
| 100 | unsigned int version; | 100 | unsigned int version; |
| 101 | 101 | ||
| 102 | /* | ||
| 103 | * Statistics counter: | ||
| 104 | */ | ||
| 105 | unsigned long ops; | ||
| 106 | |||
| 107 | const char *name; | ||
| 108 | int name_version; | 102 | int name_version; |
| 103 | const char *name; | ||
| 109 | 104 | ||
| 110 | #ifdef CONFIG_LOCK_STAT | 105 | #ifdef CONFIG_LOCK_STAT |
| 111 | unsigned long contention_point[LOCKSTAT_POINTS]; | 106 | unsigned long contention_point[LOCKSTAT_POINTS]; |
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index ab93b6eae696..67dbb57508b1 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h | |||
| @@ -45,10 +45,10 @@ struct rw_semaphore { | |||
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | /* | 47 | /* |
| 48 | * Setting bit 0 of the owner field with other non-zero bits will indicate | 48 | * Setting bit 1 of the owner field but not bit 0 will indicate |
| 49 | * that the rwsem is writer-owned with an unknown owner. | 49 | * that the rwsem is writer-owned with an unknown owner. |
| 50 | */ | 50 | */ |
| 51 | #define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L) | 51 | #define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L) |
| 52 | 52 | ||
| 53 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | 53 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); |
| 54 | extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); | 54 | extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 004ca21f7e80..58e2af8d064b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -735,6 +735,12 @@ struct task_struct { | |||
| 735 | unsigned use_memdelay:1; | 735 | unsigned use_memdelay:1; |
| 736 | #endif | 736 | #endif |
| 737 | 737 | ||
| 738 | /* | ||
| 739 | * May usercopy functions fault on kernel addresses? | ||
| 740 | * This is not just a single bit because this can potentially nest. | ||
| 741 | */ | ||
| 742 | unsigned int kernel_uaccess_faults_ok; | ||
| 743 | |||
| 738 | unsigned long atomic_flags; /* Flags requiring atomic access. */ | 744 | unsigned long atomic_flags; /* Flags requiring atomic access. */ |
| 739 | 745 | ||
| 740 | struct restart_block restart_block; | 746 | struct restart_block restart_block; |
