diff options
author | Pekka Enberg <penberg@kernel.org> | 2011-09-19 10:46:07 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2011-09-19 10:46:07 -0400 |
commit | d20bbfab01802e195a50435940f7e4aa747c217c (patch) | |
tree | 82b0007e33c083050a4e60a49dbb2f5477b4c99d /mm/slab.c | |
parent | a37933c37c14b64e81c7c9cc44a5d3f5e0c91412 (diff) | |
parent | 136333d104bd3a62d783b0ac3d0f32ac0108c5d0 (diff) |
Merge branch 'slab/urgent' into slab/next
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 99 |
1 files changed, 75 insertions, 24 deletions
@@ -622,6 +622,51 @@ int slab_is_available(void) | |||
622 | static struct lock_class_key on_slab_l3_key; | 622 | static struct lock_class_key on_slab_l3_key; |
623 | static struct lock_class_key on_slab_alc_key; | 623 | static struct lock_class_key on_slab_alc_key; |
624 | 624 | ||
625 | static struct lock_class_key debugobj_l3_key; | ||
626 | static struct lock_class_key debugobj_alc_key; | ||
627 | |||
628 | static void slab_set_lock_classes(struct kmem_cache *cachep, | ||
629 | struct lock_class_key *l3_key, struct lock_class_key *alc_key, | ||
630 | int q) | ||
631 | { | ||
632 | struct array_cache **alc; | ||
633 | struct kmem_list3 *l3; | ||
634 | int r; | ||
635 | |||
636 | l3 = cachep->nodelists[q]; | ||
637 | if (!l3) | ||
638 | return; | ||
639 | |||
640 | lockdep_set_class(&l3->list_lock, l3_key); | ||
641 | alc = l3->alien; | ||
642 | /* | ||
643 | * FIXME: This check for BAD_ALIEN_MAGIC | ||
644 | * should go away when common slab code is taught to | ||
645 | * work even without alien caches. | ||
646 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC | ||
647 | * for alloc_alien_cache, | ||
648 | */ | ||
649 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) | ||
650 | return; | ||
651 | for_each_node(r) { | ||
652 | if (alc[r]) | ||
653 | lockdep_set_class(&alc[r]->lock, alc_key); | ||
654 | } | ||
655 | } | ||
656 | |||
657 | static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) | ||
658 | { | ||
659 | slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node); | ||
660 | } | ||
661 | |||
662 | static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) | ||
663 | { | ||
664 | int node; | ||
665 | |||
666 | for_each_online_node(node) | ||
667 | slab_set_debugobj_lock_classes_node(cachep, node); | ||
668 | } | ||
669 | |||
625 | static void init_node_lock_keys(int q) | 670 | static void init_node_lock_keys(int q) |
626 | { | 671 | { |
627 | struct cache_sizes *s = malloc_sizes; | 672 | struct cache_sizes *s = malloc_sizes; |
@@ -630,29 +675,14 @@ static void init_node_lock_keys(int q) | |||
630 | return; | 675 | return; |
631 | 676 | ||
632 | for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { | 677 | for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { |
633 | struct array_cache **alc; | ||
634 | struct kmem_list3 *l3; | 678 | struct kmem_list3 *l3; |
635 | int r; | ||
636 | 679 | ||
637 | l3 = s->cs_cachep->nodelists[q]; | 680 | l3 = s->cs_cachep->nodelists[q]; |
638 | if (!l3 || OFF_SLAB(s->cs_cachep)) | 681 | if (!l3 || OFF_SLAB(s->cs_cachep)) |
639 | continue; | 682 | continue; |
640 | lockdep_set_class(&l3->list_lock, &on_slab_l3_key); | 683 | |
641 | alc = l3->alien; | 684 | slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key, |
642 | /* | 685 | &on_slab_alc_key, q); |
643 | * FIXME: This check for BAD_ALIEN_MAGIC | ||
644 | * should go away when common slab code is taught to | ||
645 | * work even without alien caches. | ||
646 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC | ||
647 | * for alloc_alien_cache, | ||
648 | */ | ||
649 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) | ||
650 | continue; | ||
651 | for_each_node(r) { | ||
652 | if (alc[r]) | ||
653 | lockdep_set_class(&alc[r]->lock, | ||
654 | &on_slab_alc_key); | ||
655 | } | ||
656 | } | 686 | } |
657 | } | 687 | } |
658 | 688 | ||
@@ -671,6 +701,14 @@ static void init_node_lock_keys(int q) | |||
671 | static inline void init_lock_keys(void) | 701 | static inline void init_lock_keys(void) |
672 | { | 702 | { |
673 | } | 703 | } |
704 | |||
705 | static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) | ||
706 | { | ||
707 | } | ||
708 | |||
709 | static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) | ||
710 | { | ||
711 | } | ||
674 | #endif | 712 | #endif |
675 | 713 | ||
676 | /* | 714 | /* |
@@ -1264,6 +1302,8 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
1264 | spin_unlock_irq(&l3->list_lock); | 1302 | spin_unlock_irq(&l3->list_lock); |
1265 | kfree(shared); | 1303 | kfree(shared); |
1266 | free_alien_cache(alien); | 1304 | free_alien_cache(alien); |
1305 | if (cachep->flags & SLAB_DEBUG_OBJECTS) | ||
1306 | slab_set_debugobj_lock_classes_node(cachep, node); | ||
1267 | } | 1307 | } |
1268 | init_node_lock_keys(node); | 1308 | init_node_lock_keys(node); |
1269 | 1309 | ||
@@ -1626,6 +1666,9 @@ void __init kmem_cache_init_late(void) | |||
1626 | { | 1666 | { |
1627 | struct kmem_cache *cachep; | 1667 | struct kmem_cache *cachep; |
1628 | 1668 | ||
1669 | /* Annotate slab for lockdep -- annotate the malloc caches */ | ||
1670 | init_lock_keys(); | ||
1671 | |||
1629 | /* 6) resize the head arrays to their final sizes */ | 1672 | /* 6) resize the head arrays to their final sizes */ |
1630 | mutex_lock(&cache_chain_mutex); | 1673 | mutex_lock(&cache_chain_mutex); |
1631 | list_for_each_entry(cachep, &cache_chain, next) | 1674 | list_for_each_entry(cachep, &cache_chain, next) |
@@ -1636,9 +1679,6 @@ void __init kmem_cache_init_late(void) | |||
1636 | /* Done! */ | 1679 | /* Done! */ |
1637 | g_cpucache_up = FULL; | 1680 | g_cpucache_up = FULL; |
1638 | 1681 | ||
1639 | /* Annotate slab for lockdep -- annotate the malloc caches */ | ||
1640 | init_lock_keys(); | ||
1641 | |||
1642 | /* | 1682 | /* |
1643 | * Register a cpu startup notifier callback that initializes | 1683 | * Register a cpu startup notifier callback that initializes |
1644 | * cpu_cache_get for all new cpus | 1684 | * cpu_cache_get for all new cpus |
@@ -2426,6 +2466,16 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2426 | goto oops; | 2466 | goto oops; |
2427 | } | 2467 | } |
2428 | 2468 | ||
2469 | if (flags & SLAB_DEBUG_OBJECTS) { | ||
2470 | /* | ||
2471 | * Would deadlock through slab_destroy()->call_rcu()-> | ||
2472 | * debug_object_activate()->kmem_cache_alloc(). | ||
2473 | */ | ||
2474 | WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU); | ||
2475 | |||
2476 | slab_set_debugobj_lock_classes(cachep); | ||
2477 | } | ||
2478 | |||
2429 | /* cache setup completed, link it into the list */ | 2479 | /* cache setup completed, link it into the list */ |
2430 | list_add(&cachep->next, &cache_chain); | 2480 | list_add(&cachep->next, &cache_chain); |
2431 | oops: | 2481 | oops: |
@@ -3398,7 +3448,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3398 | cache_alloc_debugcheck_before(cachep, flags); | 3448 | cache_alloc_debugcheck_before(cachep, flags); |
3399 | local_irq_save(save_flags); | 3449 | local_irq_save(save_flags); |
3400 | 3450 | ||
3401 | if (nodeid == -1) | 3451 | if (nodeid == NUMA_NO_NODE) |
3402 | nodeid = slab_node; | 3452 | nodeid = slab_node; |
3403 | 3453 | ||
3404 | if (unlikely(!cachep->nodelists[nodeid])) { | 3454 | if (unlikely(!cachep->nodelists[nodeid])) { |
@@ -3929,7 +3979,7 @@ fail: | |||
3929 | 3979 | ||
3930 | struct ccupdate_struct { | 3980 | struct ccupdate_struct { |
3931 | struct kmem_cache *cachep; | 3981 | struct kmem_cache *cachep; |
3932 | struct array_cache *new[NR_CPUS]; | 3982 | struct array_cache *new[0]; |
3933 | }; | 3983 | }; |
3934 | 3984 | ||
3935 | static void do_ccupdate_local(void *info) | 3985 | static void do_ccupdate_local(void *info) |
@@ -3951,7 +4001,8 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | |||
3951 | struct ccupdate_struct *new; | 4001 | struct ccupdate_struct *new; |
3952 | int i; | 4002 | int i; |
3953 | 4003 | ||
3954 | new = kzalloc(sizeof(*new), gfp); | 4004 | new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *), |
4005 | gfp); | ||
3955 | if (!new) | 4006 | if (!new) |
3956 | return -ENOMEM; | 4007 | return -ENOMEM; |
3957 | 4008 | ||