aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/futex.c54
-rw-r--r--kernel/lockdep.c37
-rw-r--r--mm/slab.c92
3 files changed, 131 insertions, 52 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index 0a308970c24a..11cbe052b2e8 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -218,6 +218,8 @@ static void drop_futex_key_refs(union futex_key *key)
218 * @uaddr: virtual address of the futex 218 * @uaddr: virtual address of the futex
219 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED 219 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
220 * @key: address where result is stored. 220 * @key: address where result is stored.
221 * @rw: mapping needs to be read/write (values: VERIFY_READ,
222 * VERIFY_WRITE)
221 * 223 *
222 * Returns a negative error code or 0 224 * Returns a negative error code or 0
223 * The key words are stored in *key on success. 225 * The key words are stored in *key on success.
@@ -229,12 +231,12 @@ static void drop_futex_key_refs(union futex_key *key)
229 * lock_page() might sleep, the caller should not hold a spinlock. 231 * lock_page() might sleep, the caller should not hold a spinlock.
230 */ 232 */
231static int 233static int
232get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) 234get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
233{ 235{
234 unsigned long address = (unsigned long)uaddr; 236 unsigned long address = (unsigned long)uaddr;
235 struct mm_struct *mm = current->mm; 237 struct mm_struct *mm = current->mm;
236 struct page *page, *page_head; 238 struct page *page, *page_head;
237 int err; 239 int err, ro = 0;
238 240
239 /* 241 /*
240 * The futex address must be "naturally" aligned. 242 * The futex address must be "naturally" aligned.
@@ -262,8 +264,18 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
262 264
263again: 265again:
264 err = get_user_pages_fast(address, 1, 1, &page); 266 err = get_user_pages_fast(address, 1, 1, &page);
267 /*
268 * If write access is not required (eg. FUTEX_WAIT), try
269 * and get read-only access.
270 */
271 if (err == -EFAULT && rw == VERIFY_READ) {
272 err = get_user_pages_fast(address, 1, 0, &page);
273 ro = 1;
274 }
265 if (err < 0) 275 if (err < 0)
266 return err; 276 return err;
277 else
278 err = 0;
267 279
268#ifdef CONFIG_TRANSPARENT_HUGEPAGE 280#ifdef CONFIG_TRANSPARENT_HUGEPAGE
269 page_head = page; 281 page_head = page;
@@ -305,6 +317,13 @@ again:
305 if (!page_head->mapping) { 317 if (!page_head->mapping) {
306 unlock_page(page_head); 318 unlock_page(page_head);
307 put_page(page_head); 319 put_page(page_head);
320 /*
321 * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
322 * trying to find one. RW mapping would have COW'd (and thus
323 * have a mapping) so this page is RO and won't ever change.
324 */
325 if ((page_head == ZERO_PAGE(address)))
326 return -EFAULT;
308 goto again; 327 goto again;
309 } 328 }
310 329
@@ -316,6 +335,15 @@ again:
316 * the object not the particular process. 335 * the object not the particular process.
317 */ 336 */
318 if (PageAnon(page_head)) { 337 if (PageAnon(page_head)) {
338 /*
339 * A RO anonymous page will never change and thus doesn't make
340 * sense for futex operations.
341 */
342 if (ro) {
343 err = -EFAULT;
344 goto out;
345 }
346
319 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ 347 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
320 key->private.mm = mm; 348 key->private.mm = mm;
321 key->private.address = address; 349 key->private.address = address;
@@ -327,9 +355,10 @@ again:
327 355
328 get_futex_key_refs(key); 356 get_futex_key_refs(key);
329 357
358out:
330 unlock_page(page_head); 359 unlock_page(page_head);
331 put_page(page_head); 360 put_page(page_head);
332 return 0; 361 return err;
333} 362}
334 363
335static inline void put_futex_key(union futex_key *key) 364static inline void put_futex_key(union futex_key *key)
@@ -940,7 +969,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
940 if (!bitset) 969 if (!bitset)
941 return -EINVAL; 970 return -EINVAL;
942 971
943 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key); 972 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
944 if (unlikely(ret != 0)) 973 if (unlikely(ret != 0))
945 goto out; 974 goto out;
946 975
@@ -986,10 +1015,10 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
986 int ret, op_ret; 1015 int ret, op_ret;
987 1016
988retry: 1017retry:
989 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1); 1018 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
990 if (unlikely(ret != 0)) 1019 if (unlikely(ret != 0))
991 goto out; 1020 goto out;
992 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2); 1021 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
993 if (unlikely(ret != 0)) 1022 if (unlikely(ret != 0))
994 goto out_put_key1; 1023 goto out_put_key1;
995 1024
@@ -1243,10 +1272,11 @@ retry:
1243 pi_state = NULL; 1272 pi_state = NULL;
1244 } 1273 }
1245 1274
1246 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1); 1275 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1247 if (unlikely(ret != 0)) 1276 if (unlikely(ret != 0))
1248 goto out; 1277 goto out;
1249 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2); 1278 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1279 requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1250 if (unlikely(ret != 0)) 1280 if (unlikely(ret != 0))
1251 goto out_put_key1; 1281 goto out_put_key1;
1252 1282
@@ -1790,7 +1820,7 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
1790 * while the syscall executes. 1820 * while the syscall executes.
1791 */ 1821 */
1792retry: 1822retry:
1793 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key); 1823 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
1794 if (unlikely(ret != 0)) 1824 if (unlikely(ret != 0))
1795 return ret; 1825 return ret;
1796 1826
@@ -1941,7 +1971,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
1941 } 1971 }
1942 1972
1943retry: 1973retry:
1944 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key); 1974 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
1945 if (unlikely(ret != 0)) 1975 if (unlikely(ret != 0))
1946 goto out; 1976 goto out;
1947 1977
@@ -2060,7 +2090,7 @@ retry:
2060 if ((uval & FUTEX_TID_MASK) != vpid) 2090 if ((uval & FUTEX_TID_MASK) != vpid)
2061 return -EPERM; 2091 return -EPERM;
2062 2092
2063 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key); 2093 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2064 if (unlikely(ret != 0)) 2094 if (unlikely(ret != 0))
2065 goto out; 2095 goto out;
2066 2096
@@ -2249,7 +2279,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2249 debug_rt_mutex_init_waiter(&rt_waiter); 2279 debug_rt_mutex_init_waiter(&rt_waiter);
2250 rt_waiter.task = NULL; 2280 rt_waiter.task = NULL;
2251 2281
2252 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2); 2282 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2253 if (unlikely(ret != 0)) 2283 if (unlikely(ret != 0))
2254 goto out; 2284 goto out;
2255 2285
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 3956f5149e25..8c24294e477f 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2468,7 +2468,7 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)
2468 2468
2469 BUG_ON(usage_bit >= LOCK_USAGE_STATES); 2469 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2470 2470
2471 if (hlock_class(hlock)->key == &__lockdep_no_validate__) 2471 if (hlock_class(hlock)->key == __lockdep_no_validate__.subkeys)
2472 continue; 2472 continue;
2473 2473
2474 if (!mark_lock(curr, hlock, usage_bit)) 2474 if (!mark_lock(curr, hlock, usage_bit))
@@ -2485,23 +2485,9 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
2485{ 2485{
2486 struct task_struct *curr = current; 2486 struct task_struct *curr = current;
2487 2487
2488 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2489 return;
2490
2491 if (unlikely(curr->hardirqs_enabled)) {
2492 /*
2493 * Neither irq nor preemption are disabled here
2494 * so this is racy by nature but losing one hit
2495 * in a stat is not a big deal.
2496 */
2497 __debug_atomic_inc(redundant_hardirqs_on);
2498 return;
2499 }
2500 /* we'll do an OFF -> ON transition: */ 2488 /* we'll do an OFF -> ON transition: */
2501 curr->hardirqs_enabled = 1; 2489 curr->hardirqs_enabled = 1;
2502 2490
2503 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2504 return;
2505 /* 2491 /*
2506 * We are going to turn hardirqs on, so set the 2492 * We are going to turn hardirqs on, so set the
2507 * usage bit for all held locks: 2493 * usage bit for all held locks:
@@ -2529,9 +2515,25 @@ void trace_hardirqs_on_caller(unsigned long ip)
2529 if (unlikely(!debug_locks || current->lockdep_recursion)) 2515 if (unlikely(!debug_locks || current->lockdep_recursion))
2530 return; 2516 return;
2531 2517
2518 if (unlikely(current->hardirqs_enabled)) {
2519 /*
2520 * Neither irq nor preemption are disabled here
2521 * so this is racy by nature but losing one hit
2522 * in a stat is not a big deal.
2523 */
2524 __debug_atomic_inc(redundant_hardirqs_on);
2525 return;
2526 }
2527
2532 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2528 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2533 return; 2529 return;
2534 2530
2531 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2532 return;
2533
2534 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2535 return;
2536
2535 current->lockdep_recursion = 1; 2537 current->lockdep_recursion = 1;
2536 __trace_hardirqs_on_caller(ip); 2538 __trace_hardirqs_on_caller(ip);
2537 current->lockdep_recursion = 0; 2539 current->lockdep_recursion = 0;
@@ -2872,10 +2874,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2872void lockdep_init_map(struct lockdep_map *lock, const char *name, 2874void lockdep_init_map(struct lockdep_map *lock, const char *name,
2873 struct lock_class_key *key, int subclass) 2875 struct lock_class_key *key, int subclass)
2874{ 2876{
2875 int i; 2877 memset(lock, 0, sizeof(*lock));
2876
2877 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
2878 lock->class_cache[i] = NULL;
2879 2878
2880#ifdef CONFIG_LOCK_STAT 2879#ifdef CONFIG_LOCK_STAT
2881 lock->cpu = raw_smp_processor_id(); 2880 lock->cpu = raw_smp_processor_id();
diff --git a/mm/slab.c b/mm/slab.c
index 95947400702b..6d90a091fdca 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -622,6 +622,51 @@ int slab_is_available(void)
622static struct lock_class_key on_slab_l3_key; 622static struct lock_class_key on_slab_l3_key;
623static struct lock_class_key on_slab_alc_key; 623static struct lock_class_key on_slab_alc_key;
624 624
625static struct lock_class_key debugobj_l3_key;
626static struct lock_class_key debugobj_alc_key;
627
628static void slab_set_lock_classes(struct kmem_cache *cachep,
629 struct lock_class_key *l3_key, struct lock_class_key *alc_key,
630 int q)
631{
632 struct array_cache **alc;
633 struct kmem_list3 *l3;
634 int r;
635
636 l3 = cachep->nodelists[q];
637 if (!l3)
638 return;
639
640 lockdep_set_class(&l3->list_lock, l3_key);
641 alc = l3->alien;
642 /*
643 * FIXME: This check for BAD_ALIEN_MAGIC
644 * should go away when common slab code is taught to
645 * work even without alien caches.
646 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
647 * for alloc_alien_cache,
648 */
649 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
650 return;
651 for_each_node(r) {
652 if (alc[r])
653 lockdep_set_class(&alc[r]->lock, alc_key);
654 }
655}
656
657static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
658{
659 slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
660}
661
662static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
663{
664 int node;
665
666 for_each_online_node(node)
667 slab_set_debugobj_lock_classes_node(cachep, node);
668}
669
625static void init_node_lock_keys(int q) 670static void init_node_lock_keys(int q)
626{ 671{
627 struct cache_sizes *s = malloc_sizes; 672 struct cache_sizes *s = malloc_sizes;
@@ -630,29 +675,14 @@ static void init_node_lock_keys(int q)
630 return; 675 return;
631 676
632 for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { 677 for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
633 struct array_cache **alc;
634 struct kmem_list3 *l3; 678 struct kmem_list3 *l3;
635 int r;
636 679
637 l3 = s->cs_cachep->nodelists[q]; 680 l3 = s->cs_cachep->nodelists[q];
638 if (!l3 || OFF_SLAB(s->cs_cachep)) 681 if (!l3 || OFF_SLAB(s->cs_cachep))
639 continue; 682 continue;
640 lockdep_set_class(&l3->list_lock, &on_slab_l3_key); 683
641 alc = l3->alien; 684 slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key,
642 /* 685 &on_slab_alc_key, q);
643 * FIXME: This check for BAD_ALIEN_MAGIC
644 * should go away when common slab code is taught to
645 * work even without alien caches.
646 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
647 * for alloc_alien_cache,
648 */
649 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
650 continue;
651 for_each_node(r) {
652 if (alc[r])
653 lockdep_set_class(&alc[r]->lock,
654 &on_slab_alc_key);
655 }
656 } 686 }
657} 687}
658 688
@@ -671,6 +701,14 @@ static void init_node_lock_keys(int q)
671static inline void init_lock_keys(void) 701static inline void init_lock_keys(void)
672{ 702{
673} 703}
704
705static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
706{
707}
708
709static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
710{
711}
674#endif 712#endif
675 713
676/* 714/*
@@ -1264,6 +1302,8 @@ static int __cpuinit cpuup_prepare(long cpu)
1264 spin_unlock_irq(&l3->list_lock); 1302 spin_unlock_irq(&l3->list_lock);
1265 kfree(shared); 1303 kfree(shared);
1266 free_alien_cache(alien); 1304 free_alien_cache(alien);
1305 if (cachep->flags & SLAB_DEBUG_OBJECTS)
1306 slab_set_debugobj_lock_classes_node(cachep, node);
1267 } 1307 }
1268 init_node_lock_keys(node); 1308 init_node_lock_keys(node);
1269 1309
@@ -1626,6 +1666,9 @@ void __init kmem_cache_init_late(void)
1626{ 1666{
1627 struct kmem_cache *cachep; 1667 struct kmem_cache *cachep;
1628 1668
1669 /* Annotate slab for lockdep -- annotate the malloc caches */
1670 init_lock_keys();
1671
1629 /* 6) resize the head arrays to their final sizes */ 1672 /* 6) resize the head arrays to their final sizes */
1630 mutex_lock(&cache_chain_mutex); 1673 mutex_lock(&cache_chain_mutex);
1631 list_for_each_entry(cachep, &cache_chain, next) 1674 list_for_each_entry(cachep, &cache_chain, next)
@@ -1636,9 +1679,6 @@ void __init kmem_cache_init_late(void)
1636 /* Done! */ 1679 /* Done! */
1637 g_cpucache_up = FULL; 1680 g_cpucache_up = FULL;
1638 1681
1639 /* Annotate slab for lockdep -- annotate the malloc caches */
1640 init_lock_keys();
1641
1642 /* 1682 /*
1643 * Register a cpu startup notifier callback that initializes 1683 * Register a cpu startup notifier callback that initializes
1644 * cpu_cache_get for all new cpus 1684 * cpu_cache_get for all new cpus
@@ -2426,6 +2466,16 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2426 goto oops; 2466 goto oops;
2427 } 2467 }
2428 2468
2469 if (flags & SLAB_DEBUG_OBJECTS) {
2470 /*
2471 * Would deadlock through slab_destroy()->call_rcu()->
2472 * debug_object_activate()->kmem_cache_alloc().
2473 */
2474 WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
2475
2476 slab_set_debugobj_lock_classes(cachep);
2477 }
2478
2429 /* cache setup completed, link it into the list */ 2479 /* cache setup completed, link it into the list */
2430 list_add(&cachep->next, &cache_chain); 2480 list_add(&cachep->next, &cache_chain);
2431oops: 2481oops: