diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2014-08-06 19:04:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 21:01:14 -0400 |
commit | 367f7f2f45e7f601bcf87aeffb0c81e6d26e53df (patch) | |
tree | 9fd27e53d1e0c82e905ffd07501072d96f54d087 /mm/slab.c | |
parent | 833b706cc8b7b555e18d3426e9616bd066883a7a (diff) |
slab: remove a useless lockdep annotation
Now, there is no code to hold two lock simultaneously, since we don't
call slab_destroy() with holding any lock. So, lockdep annotation is
useless now. Remove it.
v2: don't remove BAD_ALIEN_MAGIC in this patch. It will be removed
in the following patch.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 153 |
1 files changed, 0 insertions, 153 deletions
@@ -472,139 +472,6 @@ static struct kmem_cache kmem_cache_boot = { | |||
472 | 472 | ||
473 | #define BAD_ALIEN_MAGIC 0x01020304ul | 473 | #define BAD_ALIEN_MAGIC 0x01020304ul |
474 | 474 | ||
475 | #ifdef CONFIG_LOCKDEP | ||
476 | |||
477 | /* | ||
478 | * Slab sometimes uses the kmalloc slabs to store the slab headers | ||
479 | * for other slabs "off slab". | ||
480 | * The locking for this is tricky in that it nests within the locks | ||
481 | * of all other slabs in a few places; to deal with this special | ||
482 | * locking we put on-slab caches into a separate lock-class. | ||
483 | * | ||
484 | * We set lock class for alien array caches which are up during init. | ||
485 | * The lock annotation will be lost if all cpus of a node goes down and | ||
486 | * then comes back up during hotplug | ||
487 | */ | ||
488 | static struct lock_class_key on_slab_l3_key; | ||
489 | static struct lock_class_key on_slab_alc_key; | ||
490 | |||
491 | static struct lock_class_key debugobj_l3_key; | ||
492 | static struct lock_class_key debugobj_alc_key; | ||
493 | |||
494 | static void slab_set_lock_classes(struct kmem_cache *cachep, | ||
495 | struct lock_class_key *l3_key, struct lock_class_key *alc_key, | ||
496 | struct kmem_cache_node *n) | ||
497 | { | ||
498 | struct alien_cache **alc; | ||
499 | int r; | ||
500 | |||
501 | lockdep_set_class(&n->list_lock, l3_key); | ||
502 | alc = n->alien; | ||
503 | /* | ||
504 | * FIXME: This check for BAD_ALIEN_MAGIC | ||
505 | * should go away when common slab code is taught to | ||
506 | * work even without alien caches. | ||
507 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC | ||
508 | * for alloc_alien_cache, | ||
509 | */ | ||
510 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) | ||
511 | return; | ||
512 | for_each_node(r) { | ||
513 | if (alc[r]) | ||
514 | lockdep_set_class(&(alc[r]->lock), alc_key); | ||
515 | } | ||
516 | } | ||
517 | |||
518 | static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, | ||
519 | struct kmem_cache_node *n) | ||
520 | { | ||
521 | slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, n); | ||
522 | } | ||
523 | |||
524 | static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) | ||
525 | { | ||
526 | int node; | ||
527 | struct kmem_cache_node *n; | ||
528 | |||
529 | for_each_kmem_cache_node(cachep, node, n) | ||
530 | slab_set_debugobj_lock_classes_node(cachep, n); | ||
531 | } | ||
532 | |||
533 | static void init_node_lock_keys(int q) | ||
534 | { | ||
535 | int i; | ||
536 | |||
537 | if (slab_state < UP) | ||
538 | return; | ||
539 | |||
540 | for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) { | ||
541 | struct kmem_cache_node *n; | ||
542 | struct kmem_cache *cache = kmalloc_caches[i]; | ||
543 | |||
544 | if (!cache) | ||
545 | continue; | ||
546 | |||
547 | n = get_node(cache, q); | ||
548 | if (!n || OFF_SLAB(cache)) | ||
549 | continue; | ||
550 | |||
551 | slab_set_lock_classes(cache, &on_slab_l3_key, | ||
552 | &on_slab_alc_key, n); | ||
553 | } | ||
554 | } | ||
555 | |||
556 | static void on_slab_lock_classes_node(struct kmem_cache *cachep, | ||
557 | struct kmem_cache_node *n) | ||
558 | { | ||
559 | slab_set_lock_classes(cachep, &on_slab_l3_key, | ||
560 | &on_slab_alc_key, n); | ||
561 | } | ||
562 | |||
563 | static inline void on_slab_lock_classes(struct kmem_cache *cachep) | ||
564 | { | ||
565 | int node; | ||
566 | struct kmem_cache_node *n; | ||
567 | |||
568 | VM_BUG_ON(OFF_SLAB(cachep)); | ||
569 | for_each_kmem_cache_node(cachep, node, n) | ||
570 | on_slab_lock_classes_node(cachep, n); | ||
571 | } | ||
572 | |||
573 | static inline void __init init_lock_keys(void) | ||
574 | { | ||
575 | int node; | ||
576 | |||
577 | for_each_node(node) | ||
578 | init_node_lock_keys(node); | ||
579 | } | ||
580 | #else | ||
581 | static void __init init_node_lock_keys(int q) | ||
582 | { | ||
583 | } | ||
584 | |||
585 | static inline void init_lock_keys(void) | ||
586 | { | ||
587 | } | ||
588 | |||
589 | static inline void on_slab_lock_classes(struct kmem_cache *cachep) | ||
590 | { | ||
591 | } | ||
592 | |||
593 | static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, | ||
594 | struct kmem_cache_node *n) | ||
595 | { | ||
596 | } | ||
597 | |||
598 | static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, | ||
599 | struct kmem_cache_node *n) | ||
600 | { | ||
601 | } | ||
602 | |||
603 | static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) | ||
604 | { | ||
605 | } | ||
606 | #endif | ||
607 | |||
608 | static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); | 475 | static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); |
609 | 476 | ||
610 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 477 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) |
@@ -1348,13 +1215,7 @@ static int cpuup_prepare(long cpu) | |||
1348 | spin_unlock_irq(&n->list_lock); | 1215 | spin_unlock_irq(&n->list_lock); |
1349 | kfree(shared); | 1216 | kfree(shared); |
1350 | free_alien_cache(alien); | 1217 | free_alien_cache(alien); |
1351 | if (cachep->flags & SLAB_DEBUG_OBJECTS) | ||
1352 | slab_set_debugobj_lock_classes_node(cachep, n); | ||
1353 | else if (!OFF_SLAB(cachep) && | ||
1354 | !(cachep->flags & SLAB_DESTROY_BY_RCU)) | ||
1355 | on_slab_lock_classes_node(cachep, n); | ||
1356 | } | 1218 | } |
1357 | init_node_lock_keys(node); | ||
1358 | 1219 | ||
1359 | return 0; | 1220 | return 0; |
1360 | bad: | 1221 | bad: |
@@ -1663,9 +1524,6 @@ void __init kmem_cache_init_late(void) | |||
1663 | BUG(); | 1524 | BUG(); |
1664 | mutex_unlock(&slab_mutex); | 1525 | mutex_unlock(&slab_mutex); |
1665 | 1526 | ||
1666 | /* Annotate slab for lockdep -- annotate the malloc caches */ | ||
1667 | init_lock_keys(); | ||
1668 | |||
1669 | /* Done! */ | 1527 | /* Done! */ |
1670 | slab_state = FULL; | 1528 | slab_state = FULL; |
1671 | 1529 | ||
@@ -2446,17 +2304,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2446 | return err; | 2304 | return err; |
2447 | } | 2305 | } |
2448 | 2306 | ||
2449 | if (flags & SLAB_DEBUG_OBJECTS) { | ||
2450 | /* | ||
2451 | * Would deadlock through slab_destroy()->call_rcu()-> | ||
2452 | * debug_object_activate()->kmem_cache_alloc(). | ||
2453 | */ | ||
2454 | WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU); | ||
2455 | |||
2456 | slab_set_debugobj_lock_classes(cachep); | ||
2457 | } else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU)) | ||
2458 | on_slab_lock_classes(cachep); | ||
2459 | |||
2460 | return 0; | 2307 | return 0; |
2461 | } | 2308 | } |
2462 | 2309 | ||