diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/futex.c | 54 | ||||
| -rw-r--r-- | kernel/kmod.c | 2 | ||||
| -rw-r--r-- | kernel/lockdep.c | 37 | ||||
| -rw-r--r-- | kernel/taskstats.c | 18 |
4 files changed, 69 insertions, 42 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index 0a308970c24a..11cbe052b2e8 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -218,6 +218,8 @@ static void drop_futex_key_refs(union futex_key *key) | |||
| 218 | * @uaddr: virtual address of the futex | 218 | * @uaddr: virtual address of the futex |
| 219 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED | 219 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED |
| 220 | * @key: address where result is stored. | 220 | * @key: address where result is stored. |
| 221 | * @rw: mapping needs to be read/write (values: VERIFY_READ, | ||
| 222 | * VERIFY_WRITE) | ||
| 221 | * | 223 | * |
| 222 | * Returns a negative error code or 0 | 224 | * Returns a negative error code or 0 |
| 223 | * The key words are stored in *key on success. | 225 | * The key words are stored in *key on success. |
| @@ -229,12 +231,12 @@ static void drop_futex_key_refs(union futex_key *key) | |||
| 229 | * lock_page() might sleep, the caller should not hold a spinlock. | 231 | * lock_page() might sleep, the caller should not hold a spinlock. |
| 230 | */ | 232 | */ |
| 231 | static int | 233 | static int |
| 232 | get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) | 234 | get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) |
| 233 | { | 235 | { |
| 234 | unsigned long address = (unsigned long)uaddr; | 236 | unsigned long address = (unsigned long)uaddr; |
| 235 | struct mm_struct *mm = current->mm; | 237 | struct mm_struct *mm = current->mm; |
| 236 | struct page *page, *page_head; | 238 | struct page *page, *page_head; |
| 237 | int err; | 239 | int err, ro = 0; |
| 238 | 240 | ||
| 239 | /* | 241 | /* |
| 240 | * The futex address must be "naturally" aligned. | 242 | * The futex address must be "naturally" aligned. |
| @@ -262,8 +264,18 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) | |||
| 262 | 264 | ||
| 263 | again: | 265 | again: |
| 264 | err = get_user_pages_fast(address, 1, 1, &page); | 266 | err = get_user_pages_fast(address, 1, 1, &page); |
| 267 | /* | ||
| 268 | * If write access is not required (eg. FUTEX_WAIT), try | ||
| 269 | * and get read-only access. | ||
| 270 | */ | ||
| 271 | if (err == -EFAULT && rw == VERIFY_READ) { | ||
| 272 | err = get_user_pages_fast(address, 1, 0, &page); | ||
| 273 | ro = 1; | ||
| 274 | } | ||
| 265 | if (err < 0) | 275 | if (err < 0) |
| 266 | return err; | 276 | return err; |
| 277 | else | ||
| 278 | err = 0; | ||
| 267 | 279 | ||
| 268 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 280 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 269 | page_head = page; | 281 | page_head = page; |
| @@ -305,6 +317,13 @@ again: | |||
| 305 | if (!page_head->mapping) { | 317 | if (!page_head->mapping) { |
| 306 | unlock_page(page_head); | 318 | unlock_page(page_head); |
| 307 | put_page(page_head); | 319 | put_page(page_head); |
| 320 | /* | ||
| 321 | * ZERO_PAGE pages don't have a mapping. Avoid a busy loop | ||
| 322 | * trying to find one. RW mapping would have COW'd (and thus | ||
| 323 | * have a mapping) so this page is RO and won't ever change. | ||
| 324 | */ | ||
| 325 | if ((page_head == ZERO_PAGE(address))) | ||
| 326 | return -EFAULT; | ||
| 308 | goto again; | 327 | goto again; |
| 309 | } | 328 | } |
| 310 | 329 | ||
| @@ -316,6 +335,15 @@ again: | |||
| 316 | * the object not the particular process. | 335 | * the object not the particular process. |
| 317 | */ | 336 | */ |
| 318 | if (PageAnon(page_head)) { | 337 | if (PageAnon(page_head)) { |
| 338 | /* | ||
| 339 | * A RO anonymous page will never change and thus doesn't make | ||
| 340 | * sense for futex operations. | ||
| 341 | */ | ||
| 342 | if (ro) { | ||
| 343 | err = -EFAULT; | ||
| 344 | goto out; | ||
| 345 | } | ||
| 346 | |||
| 319 | key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ | 347 | key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ |
| 320 | key->private.mm = mm; | 348 | key->private.mm = mm; |
| 321 | key->private.address = address; | 349 | key->private.address = address; |
| @@ -327,9 +355,10 @@ again: | |||
| 327 | 355 | ||
| 328 | get_futex_key_refs(key); | 356 | get_futex_key_refs(key); |
| 329 | 357 | ||
| 358 | out: | ||
| 330 | unlock_page(page_head); | 359 | unlock_page(page_head); |
| 331 | put_page(page_head); | 360 | put_page(page_head); |
| 332 | return 0; | 361 | return err; |
| 333 | } | 362 | } |
| 334 | 363 | ||
| 335 | static inline void put_futex_key(union futex_key *key) | 364 | static inline void put_futex_key(union futex_key *key) |
| @@ -940,7 +969,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) | |||
| 940 | if (!bitset) | 969 | if (!bitset) |
| 941 | return -EINVAL; | 970 | return -EINVAL; |
| 942 | 971 | ||
| 943 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key); | 972 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ); |
| 944 | if (unlikely(ret != 0)) | 973 | if (unlikely(ret != 0)) |
| 945 | goto out; | 974 | goto out; |
| 946 | 975 | ||
| @@ -986,10 +1015,10 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, | |||
| 986 | int ret, op_ret; | 1015 | int ret, op_ret; |
| 987 | 1016 | ||
| 988 | retry: | 1017 | retry: |
| 989 | ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1); | 1018 | ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); |
| 990 | if (unlikely(ret != 0)) | 1019 | if (unlikely(ret != 0)) |
| 991 | goto out; | 1020 | goto out; |
| 992 | ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2); | 1021 | ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); |
| 993 | if (unlikely(ret != 0)) | 1022 | if (unlikely(ret != 0)) |
| 994 | goto out_put_key1; | 1023 | goto out_put_key1; |
| 995 | 1024 | ||
| @@ -1243,10 +1272,11 @@ retry: | |||
| 1243 | pi_state = NULL; | 1272 | pi_state = NULL; |
| 1244 | } | 1273 | } |
| 1245 | 1274 | ||
| 1246 | ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1); | 1275 | ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); |
| 1247 | if (unlikely(ret != 0)) | 1276 | if (unlikely(ret != 0)) |
| 1248 | goto out; | 1277 | goto out; |
| 1249 | ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2); | 1278 | ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, |
| 1279 | requeue_pi ? VERIFY_WRITE : VERIFY_READ); | ||
| 1250 | if (unlikely(ret != 0)) | 1280 | if (unlikely(ret != 0)) |
| 1251 | goto out_put_key1; | 1281 | goto out_put_key1; |
| 1252 | 1282 | ||
| @@ -1790,7 +1820,7 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, | |||
| 1790 | * while the syscall executes. | 1820 | * while the syscall executes. |
| 1791 | */ | 1821 | */ |
| 1792 | retry: | 1822 | retry: |
| 1793 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key); | 1823 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ); |
| 1794 | if (unlikely(ret != 0)) | 1824 | if (unlikely(ret != 0)) |
| 1795 | return ret; | 1825 | return ret; |
| 1796 | 1826 | ||
| @@ -1941,7 +1971,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect, | |||
| 1941 | } | 1971 | } |
| 1942 | 1972 | ||
| 1943 | retry: | 1973 | retry: |
| 1944 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key); | 1974 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE); |
| 1945 | if (unlikely(ret != 0)) | 1975 | if (unlikely(ret != 0)) |
| 1946 | goto out; | 1976 | goto out; |
| 1947 | 1977 | ||
| @@ -2060,7 +2090,7 @@ retry: | |||
| 2060 | if ((uval & FUTEX_TID_MASK) != vpid) | 2090 | if ((uval & FUTEX_TID_MASK) != vpid) |
| 2061 | return -EPERM; | 2091 | return -EPERM; |
| 2062 | 2092 | ||
| 2063 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key); | 2093 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE); |
| 2064 | if (unlikely(ret != 0)) | 2094 | if (unlikely(ret != 0)) |
| 2065 | goto out; | 2095 | goto out; |
| 2066 | 2096 | ||
| @@ -2249,7 +2279,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, | |||
| 2249 | debug_rt_mutex_init_waiter(&rt_waiter); | 2279 | debug_rt_mutex_init_waiter(&rt_waiter); |
| 2250 | rt_waiter.task = NULL; | 2280 | rt_waiter.task = NULL; |
| 2251 | 2281 | ||
| 2252 | ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2); | 2282 | ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); |
| 2253 | if (unlikely(ret != 0)) | 2283 | if (unlikely(ret != 0)) |
| 2254 | goto out; | 2284 | goto out; |
| 2255 | 2285 | ||
diff --git a/kernel/kmod.c b/kernel/kmod.c index 47613dfb7b28..ddc7644c1305 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
| @@ -274,7 +274,7 @@ static void __call_usermodehelper(struct work_struct *work) | |||
| 274 | * (used for preventing user land processes from being created after the user | 274 | * (used for preventing user land processes from being created after the user |
| 275 | * land has been frozen during a system-wide hibernation or suspend operation). | 275 | * land has been frozen during a system-wide hibernation or suspend operation). |
| 276 | */ | 276 | */ |
| 277 | static int usermodehelper_disabled; | 277 | static int usermodehelper_disabled = 1; |
| 278 | 278 | ||
| 279 | /* Number of helpers running */ | 279 | /* Number of helpers running */ |
| 280 | static atomic_t running_helpers = ATOMIC_INIT(0); | 280 | static atomic_t running_helpers = ATOMIC_INIT(0); |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 3956f5149e25..8c24294e477f 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -2468,7 +2468,7 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark) | |||
| 2468 | 2468 | ||
| 2469 | BUG_ON(usage_bit >= LOCK_USAGE_STATES); | 2469 | BUG_ON(usage_bit >= LOCK_USAGE_STATES); |
| 2470 | 2470 | ||
| 2471 | if (hlock_class(hlock)->key == &__lockdep_no_validate__) | 2471 | if (hlock_class(hlock)->key == __lockdep_no_validate__.subkeys) |
| 2472 | continue; | 2472 | continue; |
| 2473 | 2473 | ||
| 2474 | if (!mark_lock(curr, hlock, usage_bit)) | 2474 | if (!mark_lock(curr, hlock, usage_bit)) |
| @@ -2485,23 +2485,9 @@ static void __trace_hardirqs_on_caller(unsigned long ip) | |||
| 2485 | { | 2485 | { |
| 2486 | struct task_struct *curr = current; | 2486 | struct task_struct *curr = current; |
| 2487 | 2487 | ||
| 2488 | if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) | ||
| 2489 | return; | ||
| 2490 | |||
| 2491 | if (unlikely(curr->hardirqs_enabled)) { | ||
| 2492 | /* | ||
| 2493 | * Neither irq nor preemption are disabled here | ||
| 2494 | * so this is racy by nature but losing one hit | ||
| 2495 | * in a stat is not a big deal. | ||
| 2496 | */ | ||
| 2497 | __debug_atomic_inc(redundant_hardirqs_on); | ||
| 2498 | return; | ||
| 2499 | } | ||
| 2500 | /* we'll do an OFF -> ON transition: */ | 2488 | /* we'll do an OFF -> ON transition: */ |
| 2501 | curr->hardirqs_enabled = 1; | 2489 | curr->hardirqs_enabled = 1; |
| 2502 | 2490 | ||
| 2503 | if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) | ||
| 2504 | return; | ||
| 2505 | /* | 2491 | /* |
| 2506 | * We are going to turn hardirqs on, so set the | 2492 | * We are going to turn hardirqs on, so set the |
| 2507 | * usage bit for all held locks: | 2493 | * usage bit for all held locks: |
| @@ -2529,9 +2515,25 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
| 2529 | if (unlikely(!debug_locks || current->lockdep_recursion)) | 2515 | if (unlikely(!debug_locks || current->lockdep_recursion)) |
| 2530 | return; | 2516 | return; |
| 2531 | 2517 | ||
| 2518 | if (unlikely(current->hardirqs_enabled)) { | ||
| 2519 | /* | ||
| 2520 | * Neither irq nor preemption are disabled here | ||
| 2521 | * so this is racy by nature but losing one hit | ||
| 2522 | * in a stat is not a big deal. | ||
| 2523 | */ | ||
| 2524 | __debug_atomic_inc(redundant_hardirqs_on); | ||
| 2525 | return; | ||
| 2526 | } | ||
| 2527 | |||
| 2532 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 2528 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 2533 | return; | 2529 | return; |
| 2534 | 2530 | ||
| 2531 | if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) | ||
| 2532 | return; | ||
| 2533 | |||
| 2534 | if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) | ||
| 2535 | return; | ||
| 2536 | |||
| 2535 | current->lockdep_recursion = 1; | 2537 | current->lockdep_recursion = 1; |
| 2536 | __trace_hardirqs_on_caller(ip); | 2538 | __trace_hardirqs_on_caller(ip); |
| 2537 | current->lockdep_recursion = 0; | 2539 | current->lockdep_recursion = 0; |
| @@ -2872,10 +2874,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
| 2872 | void lockdep_init_map(struct lockdep_map *lock, const char *name, | 2874 | void lockdep_init_map(struct lockdep_map *lock, const char *name, |
| 2873 | struct lock_class_key *key, int subclass) | 2875 | struct lock_class_key *key, int subclass) |
| 2874 | { | 2876 | { |
| 2875 | int i; | 2877 | memset(lock, 0, sizeof(*lock)); |
| 2876 | |||
| 2877 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) | ||
| 2878 | lock->class_cache[i] = NULL; | ||
| 2879 | 2878 | ||
| 2880 | #ifdef CONFIG_LOCK_STAT | 2879 | #ifdef CONFIG_LOCK_STAT |
| 2881 | lock->cpu = raw_smp_processor_id(); | 2880 | lock->cpu = raw_smp_processor_id(); |
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index d1db2880d1cf..e19ce1454ee1 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
| @@ -291,30 +291,28 @@ static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) | |||
| 291 | if (!cpumask_subset(mask, cpu_possible_mask)) | 291 | if (!cpumask_subset(mask, cpu_possible_mask)) |
| 292 | return -EINVAL; | 292 | return -EINVAL; |
| 293 | 293 | ||
| 294 | s = NULL; | ||
| 295 | if (isadd == REGISTER) { | 294 | if (isadd == REGISTER) { |
| 296 | for_each_cpu(cpu, mask) { | 295 | for_each_cpu(cpu, mask) { |
| 297 | if (!s) | 296 | s = kmalloc_node(sizeof(struct listener), |
| 298 | s = kmalloc_node(sizeof(struct listener), | 297 | GFP_KERNEL, cpu_to_node(cpu)); |
| 299 | GFP_KERNEL, cpu_to_node(cpu)); | ||
| 300 | if (!s) | 298 | if (!s) |
| 301 | goto cleanup; | 299 | goto cleanup; |
| 300 | |||
| 302 | s->pid = pid; | 301 | s->pid = pid; |
| 303 | INIT_LIST_HEAD(&s->list); | ||
| 304 | s->valid = 1; | 302 | s->valid = 1; |
| 305 | 303 | ||
| 306 | listeners = &per_cpu(listener_array, cpu); | 304 | listeners = &per_cpu(listener_array, cpu); |
| 307 | down_write(&listeners->sem); | 305 | down_write(&listeners->sem); |
| 308 | list_for_each_entry_safe(s2, tmp, &listeners->list, list) { | 306 | list_for_each_entry(s2, &listeners->list, list) { |
| 309 | if (s2->pid == pid) | 307 | if (s2->pid == pid && s2->valid) |
| 310 | goto next_cpu; | 308 | goto exists; |
| 311 | } | 309 | } |
| 312 | list_add(&s->list, &listeners->list); | 310 | list_add(&s->list, &listeners->list); |
| 313 | s = NULL; | 311 | s = NULL; |
| 314 | next_cpu: | 312 | exists: |
| 315 | up_write(&listeners->sem); | 313 | up_write(&listeners->sem); |
| 314 | kfree(s); /* nop if NULL */ | ||
| 316 | } | 315 | } |
| 317 | kfree(s); | ||
| 318 | return 0; | 316 | return 0; |
| 319 | } | 317 | } |
| 320 | 318 | ||
