aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-08-06 19:04:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:14 -0400
commit367f7f2f45e7f601bcf87aeffb0c81e6d26e53df (patch)
tree9fd27e53d1e0c82e905ffd07501072d96f54d087 /mm/slab.c
parent833b706cc8b7b555e18d3426e9616bd066883a7a (diff)
slab: remove a useless lockdep annotation
Now, there is no code to hold two lock simultaneously, since we don't call slab_destroy() with holding any lock. So, lockdep annotation is useless now. Remove it. v2: don't remove BAD_ALIEN_MAGIC in this patch. It will be removed in the following patch. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c153
1 files changed, 0 insertions, 153 deletions
diff --git a/mm/slab.c b/mm/slab.c
index e4dc0896b891..630c85469164 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -472,139 +472,6 @@ static struct kmem_cache kmem_cache_boot = {
472 472
473#define BAD_ALIEN_MAGIC 0x01020304ul 473#define BAD_ALIEN_MAGIC 0x01020304ul
474 474
475#ifdef CONFIG_LOCKDEP
476
477/*
478 * Slab sometimes uses the kmalloc slabs to store the slab headers
479 * for other slabs "off slab".
480 * The locking for this is tricky in that it nests within the locks
481 * of all other slabs in a few places; to deal with this special
482 * locking we put on-slab caches into a separate lock-class.
483 *
484 * We set lock class for alien array caches which are up during init.
485 * The lock annotation will be lost if all cpus of a node goes down and
486 * then comes back up during hotplug
487 */
488static struct lock_class_key on_slab_l3_key;
489static struct lock_class_key on_slab_alc_key;
490
491static struct lock_class_key debugobj_l3_key;
492static struct lock_class_key debugobj_alc_key;
493
494static void slab_set_lock_classes(struct kmem_cache *cachep,
495 struct lock_class_key *l3_key, struct lock_class_key *alc_key,
496 struct kmem_cache_node *n)
497{
498 struct alien_cache **alc;
499 int r;
500
501 lockdep_set_class(&n->list_lock, l3_key);
502 alc = n->alien;
503 /*
504 * FIXME: This check for BAD_ALIEN_MAGIC
505 * should go away when common slab code is taught to
506 * work even without alien caches.
507 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
508 * for alloc_alien_cache,
509 */
510 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
511 return;
512 for_each_node(r) {
513 if (alc[r])
514 lockdep_set_class(&(alc[r]->lock), alc_key);
515 }
516}
517
518static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep,
519 struct kmem_cache_node *n)
520{
521 slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, n);
522}
523
524static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
525{
526 int node;
527 struct kmem_cache_node *n;
528
529 for_each_kmem_cache_node(cachep, node, n)
530 slab_set_debugobj_lock_classes_node(cachep, n);
531}
532
533static void init_node_lock_keys(int q)
534{
535 int i;
536
537 if (slab_state < UP)
538 return;
539
540 for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
541 struct kmem_cache_node *n;
542 struct kmem_cache *cache = kmalloc_caches[i];
543
544 if (!cache)
545 continue;
546
547 n = get_node(cache, q);
548 if (!n || OFF_SLAB(cache))
549 continue;
550
551 slab_set_lock_classes(cache, &on_slab_l3_key,
552 &on_slab_alc_key, n);
553 }
554}
555
556static void on_slab_lock_classes_node(struct kmem_cache *cachep,
557 struct kmem_cache_node *n)
558{
559 slab_set_lock_classes(cachep, &on_slab_l3_key,
560 &on_slab_alc_key, n);
561}
562
563static inline void on_slab_lock_classes(struct kmem_cache *cachep)
564{
565 int node;
566 struct kmem_cache_node *n;
567
568 VM_BUG_ON(OFF_SLAB(cachep));
569 for_each_kmem_cache_node(cachep, node, n)
570 on_slab_lock_classes_node(cachep, n);
571}
572
573static inline void __init init_lock_keys(void)
574{
575 int node;
576
577 for_each_node(node)
578 init_node_lock_keys(node);
579}
580#else
581static void __init init_node_lock_keys(int q)
582{
583}
584
585static inline void init_lock_keys(void)
586{
587}
588
589static inline void on_slab_lock_classes(struct kmem_cache *cachep)
590{
591}
592
593static inline void on_slab_lock_classes_node(struct kmem_cache *cachep,
594 struct kmem_cache_node *n)
595{
596}
597
598static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep,
599 struct kmem_cache_node *n)
600{
601}
602
603static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
604{
605}
606#endif
607
608static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); 475static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
609 476
610static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 477static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
@@ -1348,13 +1215,7 @@ static int cpuup_prepare(long cpu)
1348 spin_unlock_irq(&n->list_lock); 1215 spin_unlock_irq(&n->list_lock);
1349 kfree(shared); 1216 kfree(shared);
1350 free_alien_cache(alien); 1217 free_alien_cache(alien);
1351 if (cachep->flags & SLAB_DEBUG_OBJECTS)
1352 slab_set_debugobj_lock_classes_node(cachep, n);
1353 else if (!OFF_SLAB(cachep) &&
1354 !(cachep->flags & SLAB_DESTROY_BY_RCU))
1355 on_slab_lock_classes_node(cachep, n);
1356 } 1218 }
1357 init_node_lock_keys(node);
1358 1219
1359 return 0; 1220 return 0;
1360bad: 1221bad:
@@ -1663,9 +1524,6 @@ void __init kmem_cache_init_late(void)
1663 BUG(); 1524 BUG();
1664 mutex_unlock(&slab_mutex); 1525 mutex_unlock(&slab_mutex);
1665 1526
1666 /* Annotate slab for lockdep -- annotate the malloc caches */
1667 init_lock_keys();
1668
1669 /* Done! */ 1527 /* Done! */
1670 slab_state = FULL; 1528 slab_state = FULL;
1671 1529
@@ -2446,17 +2304,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2446 return err; 2304 return err;
2447 } 2305 }
2448 2306
2449 if (flags & SLAB_DEBUG_OBJECTS) {
2450 /*
2451 * Would deadlock through slab_destroy()->call_rcu()->
2452 * debug_object_activate()->kmem_cache_alloc().
2453 */
2454 WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
2455
2456 slab_set_debugobj_lock_classes(cachep);
2457 } else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU))
2458 on_slab_lock_classes(cachep);
2459
2460 return 0; 2307 return 0;
2461} 2308}
2462 2309