diff options
| -rw-r--r-- | mm/slab.c | 108 |
1 files changed, 61 insertions, 47 deletions
| @@ -604,6 +604,26 @@ static struct kmem_cache cache_cache = { | |||
| 604 | 604 | ||
| 605 | #define BAD_ALIEN_MAGIC 0x01020304ul | 605 | #define BAD_ALIEN_MAGIC 0x01020304ul |
| 606 | 606 | ||
| 607 | /* | ||
| 608 | * chicken and egg problem: delay the per-cpu array allocation | ||
| 609 | * until the general caches are up. | ||
| 610 | */ | ||
| 611 | static enum { | ||
| 612 | NONE, | ||
| 613 | PARTIAL_AC, | ||
| 614 | PARTIAL_L3, | ||
| 615 | EARLY, | ||
| 616 | FULL | ||
| 617 | } g_cpucache_up; | ||
| 618 | |||
| 619 | /* | ||
| 620 | * used by boot code to determine if it can use slab based allocator | ||
| 621 | */ | ||
| 622 | int slab_is_available(void) | ||
| 623 | { | ||
| 624 | return g_cpucache_up >= EARLY; | ||
| 625 | } | ||
| 626 | |||
| 607 | #ifdef CONFIG_LOCKDEP | 627 | #ifdef CONFIG_LOCKDEP |
| 608 | 628 | ||
| 609 | /* | 629 | /* |
| @@ -620,40 +640,52 @@ static struct kmem_cache cache_cache = { | |||
| 620 | static struct lock_class_key on_slab_l3_key; | 640 | static struct lock_class_key on_slab_l3_key; |
| 621 | static struct lock_class_key on_slab_alc_key; | 641 | static struct lock_class_key on_slab_alc_key; |
| 622 | 642 | ||
| 623 | static inline void init_lock_keys(void) | 643 | static void init_node_lock_keys(int q) |
| 624 | |||
| 625 | { | 644 | { |
| 626 | int q; | ||
| 627 | struct cache_sizes *s = malloc_sizes; | 645 | struct cache_sizes *s = malloc_sizes; |
| 628 | 646 | ||
| 629 | while (s->cs_size != ULONG_MAX) { | 647 | if (g_cpucache_up != FULL) |
| 630 | for_each_node(q) { | 648 | return; |
| 631 | struct array_cache **alc; | 649 | |
| 632 | int r; | 650 | for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { |
| 633 | struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; | 651 | struct array_cache **alc; |
| 634 | if (!l3 || OFF_SLAB(s->cs_cachep)) | 652 | struct kmem_list3 *l3; |
| 635 | continue; | 653 | int r; |
| 636 | lockdep_set_class(&l3->list_lock, &on_slab_l3_key); | 654 | |
| 637 | alc = l3->alien; | 655 | l3 = s->cs_cachep->nodelists[q]; |
| 638 | /* | 656 | if (!l3 || OFF_SLAB(s->cs_cachep)) |
| 639 | * FIXME: This check for BAD_ALIEN_MAGIC | 657 | return; |
| 640 | * should go away when common slab code is taught to | 658 | lockdep_set_class(&l3->list_lock, &on_slab_l3_key); |
| 641 | * work even without alien caches. | 659 | alc = l3->alien; |
| 642 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC | 660 | /* |
| 643 | * for alloc_alien_cache, | 661 | * FIXME: This check for BAD_ALIEN_MAGIC |
| 644 | */ | 662 | * should go away when common slab code is taught to |
| 645 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) | 663 | * work even without alien caches. |
| 646 | continue; | 664 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC |
| 647 | for_each_node(r) { | 665 | * for alloc_alien_cache, |
| 648 | if (alc[r]) | 666 | */ |
| 649 | lockdep_set_class(&alc[r]->lock, | 667 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) |
| 650 | &on_slab_alc_key); | 668 | return; |
| 651 | } | 669 | for_each_node(r) { |
| 670 | if (alc[r]) | ||
| 671 | lockdep_set_class(&alc[r]->lock, | ||
| 672 | &on_slab_alc_key); | ||
| 652 | } | 673 | } |
| 653 | s++; | ||
| 654 | } | 674 | } |
| 655 | } | 675 | } |
| 676 | |||
| 677 | static inline void init_lock_keys(void) | ||
| 678 | { | ||
| 679 | int node; | ||
| 680 | |||
| 681 | for_each_node(node) | ||
| 682 | init_node_lock_keys(node); | ||
| 683 | } | ||
| 656 | #else | 684 | #else |
| 685 | static void init_node_lock_keys(int q) | ||
| 686 | { | ||
| 687 | } | ||
| 688 | |||
| 657 | static inline void init_lock_keys(void) | 689 | static inline void init_lock_keys(void) |
| 658 | { | 690 | { |
| 659 | } | 691 | } |
| @@ -665,26 +697,6 @@ static inline void init_lock_keys(void) | |||
| 665 | static DEFINE_MUTEX(cache_chain_mutex); | 697 | static DEFINE_MUTEX(cache_chain_mutex); |
| 666 | static struct list_head cache_chain; | 698 | static struct list_head cache_chain; |
| 667 | 699 | ||
| 668 | /* | ||
| 669 | * chicken and egg problem: delay the per-cpu array allocation | ||
| 670 | * until the general caches are up. | ||
| 671 | */ | ||
| 672 | static enum { | ||
| 673 | NONE, | ||
| 674 | PARTIAL_AC, | ||
| 675 | PARTIAL_L3, | ||
| 676 | EARLY, | ||
| 677 | FULL | ||
| 678 | } g_cpucache_up; | ||
| 679 | |||
| 680 | /* | ||
| 681 | * used by boot code to determine if it can use slab based allocator | ||
| 682 | */ | ||
| 683 | int slab_is_available(void) | ||
| 684 | { | ||
| 685 | return g_cpucache_up >= EARLY; | ||
| 686 | } | ||
| 687 | |||
| 688 | static DEFINE_PER_CPU(struct delayed_work, reap_work); | 700 | static DEFINE_PER_CPU(struct delayed_work, reap_work); |
| 689 | 701 | ||
| 690 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 702 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) |
| @@ -1254,6 +1266,8 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
| 1254 | kfree(shared); | 1266 | kfree(shared); |
| 1255 | free_alien_cache(alien); | 1267 | free_alien_cache(alien); |
| 1256 | } | 1268 | } |
| 1269 | init_node_lock_keys(node); | ||
| 1270 | |||
| 1257 | return 0; | 1271 | return 0; |
| 1258 | bad: | 1272 | bad: |
| 1259 | cpuup_canceled(cpu); | 1273 | cpuup_canceled(cpu); |
