diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 08:08:53 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 08:08:53 -0400 |
| commit | 0200fbdd431519d730b5d399a12840ec832b27cc (patch) | |
| tree | 2b58f9e24b61b00e0550f106c95bfabc3b52cfdd /include | |
| parent | de3fbb2aa802a267dee2213ae7d5a1e19eb4294a (diff) | |
| parent | 01a14bda11add9dcd4a59200f13834d634559935 (diff) | |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and misc x86 updates from Ingo Molnar:
"Lots of changes in this cycle - in part because locking/core attracted
a number of related x86 low level work which was easier to handle in a
single tree:
- Linux Kernel Memory Consistency Model updates (Alan Stern, Paul E.
McKenney, Andrea Parri)
- lockdep scalability improvements and micro-optimizations (Waiman
Long)
- rwsem improvements (Waiman Long)
- spinlock micro-optimization (Matthew Wilcox)
- qspinlocks: Provide a liveness guarantee (more fairness) on x86.
(Peter Zijlstra)
- Add support for relative references in jump tables on arm64, x86
and s390 to optimize jump labels (Ard Biesheuvel, Heiko Carstens)
- Be a lot less permissive on weird (kernel address) uaccess faults
on x86: BUG() when uaccess helpers fault on kernel addresses (Jann
Horn)
- macrofy x86 asm statements to un-confuse the GCC inliner. (Nadav
Amit)
- ... and a handful of other smaller changes as well"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (57 commits)
locking/lockdep: Make global debug_locks* variables read-mostly
locking/lockdep: Fix debug_locks off performance problem
locking/pvqspinlock: Extend node size when pvqspinlock is configured
locking/qspinlock_stat: Count instances of nested lock slowpaths
locking/qspinlock, x86: Provide liveness guarantee
x86/asm: 'Simplify' GEN_*_RMWcc() macros
locking/qspinlock: Rework some comments
locking/qspinlock: Re-order code
locking/lockdep: Remove duplicated 'lock_class_ops' percpu array
x86/defconfig: Enable CONFIG_USB_XHCI_HCD=y
futex: Replace spin_is_locked() with lockdep
locking/lockdep: Make class->ops a percpu counter and move it under CONFIG_DEBUG_LOCKDEP=y
x86/jump-labels: Macrofy inline assembly code to work around GCC inlining bugs
x86/cpufeature: Macrofy inline assembly code to work around GCC inlining bugs
x86/extable: Macrofy inline assembly code to work around GCC inlining bugs
x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops
x86/bug: Macrofy the BUG table section handling, to work around GCC inlining bugs
x86/alternatives: Macrofy lock prefixes to work around GCC inlining bugs
x86/refcount: Work around GCC inlining bug
x86/objtool: Use asm macros to work around GCC inlining bugs
...
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-generic/bug.h | 8 | ||||
| -rw-r--r-- | include/asm-generic/qrwlock.h | 7 | ||||
| -rw-r--r-- | include/asm-generic/qspinlock.h | 16 | ||||
| -rw-r--r-- | include/asm-generic/vmlinux.lds.h | 11 | ||||
| -rw-r--r-- | include/linux/compiler.h | 56 | ||||
| -rw-r--r-- | include/linux/debug_locks.h | 4 | ||||
| -rw-r--r-- | include/linux/jump_label.h | 65 | ||||
| -rw-r--r-- | include/linux/lockdep.h | 7 | ||||
| -rw-r--r-- | include/linux/rwsem.h | 4 | ||||
| -rw-r--r-- | include/linux/sched.h | 6 |
10 files changed, 140 insertions, 44 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 20561a60db9c..cdafa5edea49 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h | |||
| @@ -17,10 +17,8 @@ | |||
| 17 | #ifndef __ASSEMBLY__ | 17 | #ifndef __ASSEMBLY__ |
| 18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
| 19 | 19 | ||
| 20 | #ifdef CONFIG_BUG | ||
| 21 | |||
| 22 | #ifdef CONFIG_GENERIC_BUG | ||
| 23 | struct bug_entry { | 20 | struct bug_entry { |
| 21 | #ifdef CONFIG_GENERIC_BUG | ||
| 24 | #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS | 22 | #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS |
| 25 | unsigned long bug_addr; | 23 | unsigned long bug_addr; |
| 26 | #else | 24 | #else |
| @@ -35,8 +33,10 @@ struct bug_entry { | |||
| 35 | unsigned short line; | 33 | unsigned short line; |
| 36 | #endif | 34 | #endif |
| 37 | unsigned short flags; | 35 | unsigned short flags; |
| 38 | }; | ||
| 39 | #endif /* CONFIG_GENERIC_BUG */ | 36 | #endif /* CONFIG_GENERIC_BUG */ |
| 37 | }; | ||
| 38 | |||
| 39 | #ifdef CONFIG_BUG | ||
| 40 | 40 | ||
| 41 | /* | 41 | /* |
| 42 | * Don't use BUG() or BUG_ON() unless there's really no way out; one | 42 | * Don't use BUG() or BUG_ON() unless there's really no way out; one |
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 0f7062bd55e5..36254d2da8e0 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h | |||
| @@ -71,8 +71,8 @@ static inline int queued_write_trylock(struct qrwlock *lock) | |||
| 71 | if (unlikely(cnts)) | 71 | if (unlikely(cnts)) |
| 72 | return 0; | 72 | return 0; |
| 73 | 73 | ||
| 74 | return likely(atomic_cmpxchg_acquire(&lock->cnts, | 74 | return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, |
| 75 | cnts, cnts | _QW_LOCKED) == cnts); | 75 | _QW_LOCKED)); |
| 76 | } | 76 | } |
| 77 | /** | 77 | /** |
| 78 | * queued_read_lock - acquire read lock of a queue rwlock | 78 | * queued_read_lock - acquire read lock of a queue rwlock |
| @@ -96,8 +96,9 @@ static inline void queued_read_lock(struct qrwlock *lock) | |||
| 96 | */ | 96 | */ |
| 97 | static inline void queued_write_lock(struct qrwlock *lock) | 97 | static inline void queued_write_lock(struct qrwlock *lock) |
| 98 | { | 98 | { |
| 99 | u32 cnts = 0; | ||
| 99 | /* Optimize for the unfair lock case where the fair flag is 0. */ | 100 | /* Optimize for the unfair lock case where the fair flag is 0. */ |
| 100 | if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0) | 101 | if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))) |
| 101 | return; | 102 | return; |
| 102 | 103 | ||
| 103 | queued_write_lock_slowpath(lock); | 104 | queued_write_lock_slowpath(lock); |
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 9cc457597ddf..7541fa707f5b 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h | |||
| @@ -66,10 +66,12 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock) | |||
| 66 | */ | 66 | */ |
| 67 | static __always_inline int queued_spin_trylock(struct qspinlock *lock) | 67 | static __always_inline int queued_spin_trylock(struct qspinlock *lock) |
| 68 | { | 68 | { |
| 69 | if (!atomic_read(&lock->val) && | 69 | u32 val = atomic_read(&lock->val); |
| 70 | (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0)) | 70 | |
| 71 | return 1; | 71 | if (unlikely(val)) |
| 72 | return 0; | 72 | return 0; |
| 73 | |||
| 74 | return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)); | ||
| 73 | } | 75 | } |
| 74 | 76 | ||
| 75 | extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); | 77 | extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); |
| @@ -80,11 +82,11 @@ extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); | |||
| 80 | */ | 82 | */ |
| 81 | static __always_inline void queued_spin_lock(struct qspinlock *lock) | 83 | static __always_inline void queued_spin_lock(struct qspinlock *lock) |
| 82 | { | 84 | { |
| 83 | u32 val; | 85 | u32 val = 0; |
| 84 | 86 | ||
| 85 | val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL); | 87 | if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) |
| 86 | if (likely(val == 0)) | ||
| 87 | return; | 88 | return; |
| 89 | |||
| 88 | queued_spin_lock_slowpath(lock, val); | 90 | queued_spin_lock_slowpath(lock, val); |
| 89 | } | 91 | } |
| 90 | 92 | ||
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index d7701d466b60..d0bcea7c8f84 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
| @@ -253,10 +253,6 @@ | |||
| 253 | STRUCT_ALIGN(); \ | 253 | STRUCT_ALIGN(); \ |
| 254 | *(__tracepoints) \ | 254 | *(__tracepoints) \ |
| 255 | /* implement dynamic printk debug */ \ | 255 | /* implement dynamic printk debug */ \ |
| 256 | . = ALIGN(8); \ | ||
| 257 | __start___jump_table = .; \ | ||
| 258 | KEEP(*(__jump_table)) \ | ||
| 259 | __stop___jump_table = .; \ | ||
| 260 | . = ALIGN(8); \ | 256 | . = ALIGN(8); \ |
| 261 | __start___verbose = .; \ | 257 | __start___verbose = .; \ |
| 262 | KEEP(*(__verbose)) \ | 258 | KEEP(*(__verbose)) \ |
| @@ -300,6 +296,12 @@ | |||
| 300 | . = __start_init_task + THREAD_SIZE; \ | 296 | . = __start_init_task + THREAD_SIZE; \ |
| 301 | __end_init_task = .; | 297 | __end_init_task = .; |
| 302 | 298 | ||
| 299 | #define JUMP_TABLE_DATA \ | ||
| 300 | . = ALIGN(8); \ | ||
| 301 | __start___jump_table = .; \ | ||
| 302 | KEEP(*(__jump_table)) \ | ||
| 303 | __stop___jump_table = .; | ||
| 304 | |||
| 303 | /* | 305 | /* |
| 304 | * Allow architectures to handle ro_after_init data on their | 306 | * Allow architectures to handle ro_after_init data on their |
| 305 | * own by defining an empty RO_AFTER_INIT_DATA. | 307 | * own by defining an empty RO_AFTER_INIT_DATA. |
| @@ -308,6 +310,7 @@ | |||
| 308 | #define RO_AFTER_INIT_DATA \ | 310 | #define RO_AFTER_INIT_DATA \ |
| 309 | __start_ro_after_init = .; \ | 311 | __start_ro_after_init = .; \ |
| 310 | *(.data..ro_after_init) \ | 312 | *(.data..ro_after_init) \ |
| 313 | JUMP_TABLE_DATA \ | ||
| 311 | __end_ro_after_init = .; | 314 | __end_ro_after_init = .; |
| 312 | #endif | 315 | #endif |
| 313 | 316 | ||
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 681d866efb1e..1921545c6351 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -99,22 +99,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, | |||
| 99 | * unique, to convince GCC not to merge duplicate inline asm statements. | 99 | * unique, to convince GCC not to merge duplicate inline asm statements. |
| 100 | */ | 100 | */ |
| 101 | #define annotate_reachable() ({ \ | 101 | #define annotate_reachable() ({ \ |
| 102 | asm volatile("%c0:\n\t" \ | 102 | asm volatile("ANNOTATE_REACHABLE counter=%c0" \ |
| 103 | ".pushsection .discard.reachable\n\t" \ | 103 | : : "i" (__COUNTER__)); \ |
| 104 | ".long %c0b - .\n\t" \ | ||
| 105 | ".popsection\n\t" : : "i" (__COUNTER__)); \ | ||
| 106 | }) | 104 | }) |
| 107 | #define annotate_unreachable() ({ \ | 105 | #define annotate_unreachable() ({ \ |
| 108 | asm volatile("%c0:\n\t" \ | 106 | asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \ |
| 109 | ".pushsection .discard.unreachable\n\t" \ | 107 | : : "i" (__COUNTER__)); \ |
| 110 | ".long %c0b - .\n\t" \ | ||
| 111 | ".popsection\n\t" : : "i" (__COUNTER__)); \ | ||
| 112 | }) | 108 | }) |
| 113 | #define ASM_UNREACHABLE \ | ||
| 114 | "999:\n\t" \ | ||
| 115 | ".pushsection .discard.unreachable\n\t" \ | ||
| 116 | ".long 999b - .\n\t" \ | ||
| 117 | ".popsection\n\t" | ||
| 118 | #else | 109 | #else |
| 119 | #define annotate_reachable() | 110 | #define annotate_reachable() |
| 120 | #define annotate_unreachable() | 111 | #define annotate_unreachable() |
| @@ -299,6 +290,45 @@ static inline void *offset_to_ptr(const int *off) | |||
| 299 | return (void *)((unsigned long)off + *off); | 290 | return (void *)((unsigned long)off + *off); |
| 300 | } | 291 | } |
| 301 | 292 | ||
| 293 | #else /* __ASSEMBLY__ */ | ||
| 294 | |||
| 295 | #ifdef __KERNEL__ | ||
| 296 | #ifndef LINKER_SCRIPT | ||
| 297 | |||
| 298 | #ifdef CONFIG_STACK_VALIDATION | ||
| 299 | .macro ANNOTATE_UNREACHABLE counter:req | ||
| 300 | \counter: | ||
| 301 | .pushsection .discard.unreachable | ||
| 302 | .long \counter\()b -. | ||
| 303 | .popsection | ||
| 304 | .endm | ||
| 305 | |||
| 306 | .macro ANNOTATE_REACHABLE counter:req | ||
| 307 | \counter: | ||
| 308 | .pushsection .discard.reachable | ||
| 309 | .long \counter\()b -. | ||
| 310 | .popsection | ||
| 311 | .endm | ||
| 312 | |||
| 313 | .macro ASM_UNREACHABLE | ||
| 314 | 999: | ||
| 315 | .pushsection .discard.unreachable | ||
| 316 | .long 999b - . | ||
| 317 | .popsection | ||
| 318 | .endm | ||
| 319 | #else /* CONFIG_STACK_VALIDATION */ | ||
| 320 | .macro ANNOTATE_UNREACHABLE counter:req | ||
| 321 | .endm | ||
| 322 | |||
| 323 | .macro ANNOTATE_REACHABLE counter:req | ||
| 324 | .endm | ||
| 325 | |||
| 326 | .macro ASM_UNREACHABLE | ||
| 327 | .endm | ||
| 328 | #endif /* CONFIG_STACK_VALIDATION */ | ||
| 329 | |||
| 330 | #endif /* LINKER_SCRIPT */ | ||
| 331 | #endif /* __KERNEL__ */ | ||
| 302 | #endif /* __ASSEMBLY__ */ | 332 | #endif /* __ASSEMBLY__ */ |
| 303 | 333 | ||
| 304 | #ifndef __optimize | 334 | #ifndef __optimize |
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 120225e9a366..257ab3c92cb8 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h | |||
| @@ -8,8 +8,8 @@ | |||
| 8 | 8 | ||
| 9 | struct task_struct; | 9 | struct task_struct; |
| 10 | 10 | ||
| 11 | extern int debug_locks; | 11 | extern int debug_locks __read_mostly; |
| 12 | extern int debug_locks_silent; | 12 | extern int debug_locks_silent __read_mostly; |
| 13 | 13 | ||
| 14 | 14 | ||
| 15 | static inline int __debug_locks_off(void) | 15 | static inline int __debug_locks_off(void) |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 1a0b6f17a5d6..5df6a621e464 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
| @@ -119,6 +119,68 @@ struct static_key { | |||
| 119 | 119 | ||
| 120 | #ifdef HAVE_JUMP_LABEL | 120 | #ifdef HAVE_JUMP_LABEL |
| 121 | #include <asm/jump_label.h> | 121 | #include <asm/jump_label.h> |
| 122 | |||
| 123 | #ifndef __ASSEMBLY__ | ||
| 124 | #ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE | ||
| 125 | |||
| 126 | struct jump_entry { | ||
| 127 | s32 code; | ||
| 128 | s32 target; | ||
| 129 | long key; // key may be far away from the core kernel under KASLR | ||
| 130 | }; | ||
| 131 | |||
| 132 | static inline unsigned long jump_entry_code(const struct jump_entry *entry) | ||
| 133 | { | ||
| 134 | return (unsigned long)&entry->code + entry->code; | ||
| 135 | } | ||
| 136 | |||
| 137 | static inline unsigned long jump_entry_target(const struct jump_entry *entry) | ||
| 138 | { | ||
| 139 | return (unsigned long)&entry->target + entry->target; | ||
| 140 | } | ||
| 141 | |||
| 142 | static inline struct static_key *jump_entry_key(const struct jump_entry *entry) | ||
| 143 | { | ||
| 144 | long offset = entry->key & ~3L; | ||
| 145 | |||
| 146 | return (struct static_key *)((unsigned long)&entry->key + offset); | ||
| 147 | } | ||
| 148 | |||
| 149 | #else | ||
| 150 | |||
| 151 | static inline unsigned long jump_entry_code(const struct jump_entry *entry) | ||
| 152 | { | ||
| 153 | return entry->code; | ||
| 154 | } | ||
| 155 | |||
| 156 | static inline unsigned long jump_entry_target(const struct jump_entry *entry) | ||
| 157 | { | ||
| 158 | return entry->target; | ||
| 159 | } | ||
| 160 | |||
| 161 | static inline struct static_key *jump_entry_key(const struct jump_entry *entry) | ||
| 162 | { | ||
| 163 | return (struct static_key *)((unsigned long)entry->key & ~3UL); | ||
| 164 | } | ||
| 165 | |||
| 166 | #endif | ||
| 167 | |||
| 168 | static inline bool jump_entry_is_branch(const struct jump_entry *entry) | ||
| 169 | { | ||
| 170 | return (unsigned long)entry->key & 1UL; | ||
| 171 | } | ||
| 172 | |||
| 173 | static inline bool jump_entry_is_init(const struct jump_entry *entry) | ||
| 174 | { | ||
| 175 | return (unsigned long)entry->key & 2UL; | ||
| 176 | } | ||
| 177 | |||
| 178 | static inline void jump_entry_set_init(struct jump_entry *entry) | ||
| 179 | { | ||
| 180 | entry->key |= 2; | ||
| 181 | } | ||
| 182 | |||
| 183 | #endif | ||
| 122 | #endif | 184 | #endif |
| 123 | 185 | ||
| 124 | #ifndef __ASSEMBLY__ | 186 | #ifndef __ASSEMBLY__ |
| @@ -151,7 +213,6 @@ extern struct jump_entry __start___jump_table[]; | |||
| 151 | extern struct jump_entry __stop___jump_table[]; | 213 | extern struct jump_entry __stop___jump_table[]; |
| 152 | 214 | ||
| 153 | extern void jump_label_init(void); | 215 | extern void jump_label_init(void); |
| 154 | extern void jump_label_invalidate_initmem(void); | ||
| 155 | extern void jump_label_lock(void); | 216 | extern void jump_label_lock(void); |
| 156 | extern void jump_label_unlock(void); | 217 | extern void jump_label_unlock(void); |
| 157 | extern void arch_jump_label_transform(struct jump_entry *entry, | 218 | extern void arch_jump_label_transform(struct jump_entry *entry, |
| @@ -199,8 +260,6 @@ static __always_inline void jump_label_init(void) | |||
| 199 | static_key_initialized = true; | 260 | static_key_initialized = true; |
| 200 | } | 261 | } |
| 201 | 262 | ||
| 202 | static inline void jump_label_invalidate_initmem(void) {} | ||
| 203 | |||
| 204 | static __always_inline bool static_key_false(struct static_key *key) | 263 | static __always_inline bool static_key_false(struct static_key *key) |
| 205 | { | 264 | { |
| 206 | if (unlikely(static_key_count(key) > 0)) | 265 | if (unlikely(static_key_count(key) > 0)) |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b0d0b51c4d85..1fd82ff99c65 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -99,13 +99,8 @@ struct lock_class { | |||
| 99 | */ | 99 | */ |
| 100 | unsigned int version; | 100 | unsigned int version; |
| 101 | 101 | ||
| 102 | /* | ||
| 103 | * Statistics counter: | ||
| 104 | */ | ||
| 105 | unsigned long ops; | ||
| 106 | |||
| 107 | const char *name; | ||
| 108 | int name_version; | 102 | int name_version; |
| 103 | const char *name; | ||
| 109 | 104 | ||
| 110 | #ifdef CONFIG_LOCK_STAT | 105 | #ifdef CONFIG_LOCK_STAT |
| 111 | unsigned long contention_point[LOCKSTAT_POINTS]; | 106 | unsigned long contention_point[LOCKSTAT_POINTS]; |
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index ab93b6eae696..67dbb57508b1 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h | |||
| @@ -45,10 +45,10 @@ struct rw_semaphore { | |||
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | /* | 47 | /* |
| 48 | * Setting bit 0 of the owner field with other non-zero bits will indicate | 48 | * Setting bit 1 of the owner field but not bit 0 will indicate |
| 49 | * that the rwsem is writer-owned with an unknown owner. | 49 | * that the rwsem is writer-owned with an unknown owner. |
| 50 | */ | 50 | */ |
| 51 | #define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L) | 51 | #define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L) |
| 52 | 52 | ||
| 53 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | 53 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); |
| 54 | extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); | 54 | extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 004ca21f7e80..58e2af8d064b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -735,6 +735,12 @@ struct task_struct { | |||
| 735 | unsigned use_memdelay:1; | 735 | unsigned use_memdelay:1; |
| 736 | #endif | 736 | #endif |
| 737 | 737 | ||
| 738 | /* | ||
| 739 | * May usercopy functions fault on kernel addresses? | ||
| 740 | * This is not just a single bit because this can potentially nest. | ||
| 741 | */ | ||
| 742 | unsigned int kernel_uaccess_faults_ok; | ||
| 743 | |||
| 738 | unsigned long atomic_flags; /* Flags requiring atomic access. */ | 744 | unsigned long atomic_flags; /* Flags requiring atomic access. */ |
| 739 | 745 | ||
| 740 | struct restart_block restart_block; | 746 | struct restart_block restart_block; |
