diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/slab.c | 118 | ||||
| -rw-r--r-- | mm/slub.c | 20 |
2 files changed, 87 insertions, 51 deletions
| @@ -604,6 +604,26 @@ static struct kmem_cache cache_cache = { | |||
| 604 | 604 | ||
| 605 | #define BAD_ALIEN_MAGIC 0x01020304ul | 605 | #define BAD_ALIEN_MAGIC 0x01020304ul |
| 606 | 606 | ||
| 607 | /* | ||
| 608 | * chicken and egg problem: delay the per-cpu array allocation | ||
| 609 | * until the general caches are up. | ||
| 610 | */ | ||
| 611 | static enum { | ||
| 612 | NONE, | ||
| 613 | PARTIAL_AC, | ||
| 614 | PARTIAL_L3, | ||
| 615 | EARLY, | ||
| 616 | FULL | ||
| 617 | } g_cpucache_up; | ||
| 618 | |||
| 619 | /* | ||
| 620 | * used by boot code to determine if it can use slab based allocator | ||
| 621 | */ | ||
| 622 | int slab_is_available(void) | ||
| 623 | { | ||
| 624 | return g_cpucache_up >= EARLY; | ||
| 625 | } | ||
| 626 | |||
| 607 | #ifdef CONFIG_LOCKDEP | 627 | #ifdef CONFIG_LOCKDEP |
| 608 | 628 | ||
| 609 | /* | 629 | /* |
| @@ -620,40 +640,52 @@ static struct kmem_cache cache_cache = { | |||
| 620 | static struct lock_class_key on_slab_l3_key; | 640 | static struct lock_class_key on_slab_l3_key; |
| 621 | static struct lock_class_key on_slab_alc_key; | 641 | static struct lock_class_key on_slab_alc_key; |
| 622 | 642 | ||
| 623 | static inline void init_lock_keys(void) | 643 | static void init_node_lock_keys(int q) |
| 624 | |||
| 625 | { | 644 | { |
| 626 | int q; | ||
| 627 | struct cache_sizes *s = malloc_sizes; | 645 | struct cache_sizes *s = malloc_sizes; |
| 628 | 646 | ||
| 629 | while (s->cs_size != ULONG_MAX) { | 647 | if (g_cpucache_up != FULL) |
| 630 | for_each_node(q) { | 648 | return; |
| 631 | struct array_cache **alc; | 649 | |
| 632 | int r; | 650 | for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { |
| 633 | struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; | 651 | struct array_cache **alc; |
| 634 | if (!l3 || OFF_SLAB(s->cs_cachep)) | 652 | struct kmem_list3 *l3; |
| 635 | continue; | 653 | int r; |
| 636 | lockdep_set_class(&l3->list_lock, &on_slab_l3_key); | 654 | |
| 637 | alc = l3->alien; | 655 | l3 = s->cs_cachep->nodelists[q]; |
| 638 | /* | 656 | if (!l3 || OFF_SLAB(s->cs_cachep)) |
| 639 | * FIXME: This check for BAD_ALIEN_MAGIC | 657 | return; |
| 640 | * should go away when common slab code is taught to | 658 | lockdep_set_class(&l3->list_lock, &on_slab_l3_key); |
| 641 | * work even without alien caches. | 659 | alc = l3->alien; |
| 642 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC | 660 | /* |
| 643 | * for alloc_alien_cache, | 661 | * FIXME: This check for BAD_ALIEN_MAGIC |
| 644 | */ | 662 | * should go away when common slab code is taught to |
| 645 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) | 663 | * work even without alien caches. |
| 646 | continue; | 664 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC |
| 647 | for_each_node(r) { | 665 | * for alloc_alien_cache, |
| 648 | if (alc[r]) | 666 | */ |
| 649 | lockdep_set_class(&alc[r]->lock, | 667 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) |
| 650 | &on_slab_alc_key); | 668 | return; |
| 651 | } | 669 | for_each_node(r) { |
| 670 | if (alc[r]) | ||
| 671 | lockdep_set_class(&alc[r]->lock, | ||
| 672 | &on_slab_alc_key); | ||
| 652 | } | 673 | } |
| 653 | s++; | ||
| 654 | } | 674 | } |
| 655 | } | 675 | } |
| 676 | |||
| 677 | static inline void init_lock_keys(void) | ||
| 678 | { | ||
| 679 | int node; | ||
| 680 | |||
| 681 | for_each_node(node) | ||
| 682 | init_node_lock_keys(node); | ||
| 683 | } | ||
| 656 | #else | 684 | #else |
| 685 | static void init_node_lock_keys(int q) | ||
| 686 | { | ||
| 687 | } | ||
| 688 | |||
| 657 | static inline void init_lock_keys(void) | 689 | static inline void init_lock_keys(void) |
| 658 | { | 690 | { |
| 659 | } | 691 | } |
| @@ -665,26 +697,6 @@ static inline void init_lock_keys(void) | |||
| 665 | static DEFINE_MUTEX(cache_chain_mutex); | 697 | static DEFINE_MUTEX(cache_chain_mutex); |
| 666 | static struct list_head cache_chain; | 698 | static struct list_head cache_chain; |
| 667 | 699 | ||
| 668 | /* | ||
| 669 | * chicken and egg problem: delay the per-cpu array allocation | ||
| 670 | * until the general caches are up. | ||
| 671 | */ | ||
| 672 | static enum { | ||
| 673 | NONE, | ||
| 674 | PARTIAL_AC, | ||
| 675 | PARTIAL_L3, | ||
| 676 | EARLY, | ||
| 677 | FULL | ||
| 678 | } g_cpucache_up; | ||
| 679 | |||
| 680 | /* | ||
| 681 | * used by boot code to determine if it can use slab based allocator | ||
| 682 | */ | ||
| 683 | int slab_is_available(void) | ||
| 684 | { | ||
| 685 | return g_cpucache_up >= EARLY; | ||
| 686 | } | ||
| 687 | |||
| 688 | static DEFINE_PER_CPU(struct delayed_work, reap_work); | 700 | static DEFINE_PER_CPU(struct delayed_work, reap_work); |
| 689 | 701 | ||
| 690 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 702 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) |
| @@ -1254,6 +1266,8 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
| 1254 | kfree(shared); | 1266 | kfree(shared); |
| 1255 | free_alien_cache(alien); | 1267 | free_alien_cache(alien); |
| 1256 | } | 1268 | } |
| 1269 | init_node_lock_keys(node); | ||
| 1270 | |||
| 1257 | return 0; | 1271 | return 0; |
| 1258 | bad: | 1272 | bad: |
| 1259 | cpuup_canceled(cpu); | 1273 | cpuup_canceled(cpu); |
| @@ -3103,13 +3117,19 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
| 3103 | } else { | 3117 | } else { |
| 3104 | STATS_INC_ALLOCMISS(cachep); | 3118 | STATS_INC_ALLOCMISS(cachep); |
| 3105 | objp = cache_alloc_refill(cachep, flags); | 3119 | objp = cache_alloc_refill(cachep, flags); |
| 3120 | /* | ||
| 3121 | * the 'ac' may be updated by cache_alloc_refill(), | ||
| 3122 | * and kmemleak_erase() requires its correct value. | ||
| 3123 | */ | ||
| 3124 | ac = cpu_cache_get(cachep); | ||
| 3106 | } | 3125 | } |
| 3107 | /* | 3126 | /* |
| 3108 | * To avoid a false negative, if an object that is in one of the | 3127 | * To avoid a false negative, if an object that is in one of the |
| 3109 | * per-CPU caches is leaked, we need to make sure kmemleak doesn't | 3128 | * per-CPU caches is leaked, we need to make sure kmemleak doesn't |
| 3110 | * treat the array pointers as a reference to the object. | 3129 | * treat the array pointers as a reference to the object. |
| 3111 | */ | 3130 | */ |
| 3112 | kmemleak_erase(&ac->entry[ac->avail]); | 3131 | if (objp) |
| 3132 | kmemleak_erase(&ac->entry[ac->avail]); | ||
| 3113 | return objp; | 3133 | return objp; |
| 3114 | } | 3134 | } |
| 3115 | 3135 | ||
| @@ -3306,7 +3326,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
| 3306 | cache_alloc_debugcheck_before(cachep, flags); | 3326 | cache_alloc_debugcheck_before(cachep, flags); |
| 3307 | local_irq_save(save_flags); | 3327 | local_irq_save(save_flags); |
| 3308 | 3328 | ||
| 3309 | if (unlikely(nodeid == -1)) | 3329 | if (nodeid == -1) |
| 3310 | nodeid = numa_node_id(); | 3330 | nodeid = numa_node_id(); |
| 3311 | 3331 | ||
| 3312 | if (unlikely(!cachep->nodelists[nodeid])) { | 3332 | if (unlikely(!cachep->nodelists[nodeid])) { |
| @@ -1735,7 +1735,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
| 1735 | } | 1735 | } |
| 1736 | local_irq_restore(flags); | 1736 | local_irq_restore(flags); |
| 1737 | 1737 | ||
| 1738 | if (unlikely((gfpflags & __GFP_ZERO) && object)) | 1738 | if (unlikely(gfpflags & __GFP_ZERO) && object) |
| 1739 | memset(object, 0, objsize); | 1739 | memset(object, 0, objsize); |
| 1740 | 1740 | ||
| 1741 | kmemcheck_slab_alloc(s, gfpflags, object, c->objsize); | 1741 | kmemcheck_slab_alloc(s, gfpflags, object, c->objsize); |
| @@ -4371,12 +4371,28 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) | |||
| 4371 | return len + sprintf(buf + len, "\n"); | 4371 | return len + sprintf(buf + len, "\n"); |
| 4372 | } | 4372 | } |
| 4373 | 4373 | ||
| 4374 | static void clear_stat(struct kmem_cache *s, enum stat_item si) | ||
| 4375 | { | ||
| 4376 | int cpu; | ||
| 4377 | |||
| 4378 | for_each_online_cpu(cpu) | ||
| 4379 | get_cpu_slab(s, cpu)->stat[si] = 0; | ||
| 4380 | } | ||
| 4381 | |||
| 4374 | #define STAT_ATTR(si, text) \ | 4382 | #define STAT_ATTR(si, text) \ |
| 4375 | static ssize_t text##_show(struct kmem_cache *s, char *buf) \ | 4383 | static ssize_t text##_show(struct kmem_cache *s, char *buf) \ |
| 4376 | { \ | 4384 | { \ |
| 4377 | return show_stat(s, buf, si); \ | 4385 | return show_stat(s, buf, si); \ |
| 4378 | } \ | 4386 | } \ |
| 4379 | SLAB_ATTR_RO(text); \ | 4387 | static ssize_t text##_store(struct kmem_cache *s, \ |
| 4388 | const char *buf, size_t length) \ | ||
| 4389 | { \ | ||
| 4390 | if (buf[0] != '0') \ | ||
| 4391 | return -EINVAL; \ | ||
| 4392 | clear_stat(s, si); \ | ||
| 4393 | return length; \ | ||
| 4394 | } \ | ||
| 4395 | SLAB_ATTR(text); \ | ||
| 4380 | 4396 | ||
| 4381 | STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); | 4397 | STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); |
| 4382 | STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); | 4398 | STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); |
