diff options
-rw-r--r-- | mm/slab.c | 70 | ||||
-rw-r--r-- | mm/slab.h | 4 |
2 files changed, 31 insertions, 43 deletions
@@ -227,7 +227,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) | |||
227 | INIT_LIST_HEAD(&parent->slabs_full); | 227 | INIT_LIST_HEAD(&parent->slabs_full); |
228 | INIT_LIST_HEAD(&parent->slabs_partial); | 228 | INIT_LIST_HEAD(&parent->slabs_partial); |
229 | INIT_LIST_HEAD(&parent->slabs_free); | 229 | INIT_LIST_HEAD(&parent->slabs_free); |
230 | parent->active_slabs = 0; | 230 | parent->total_slabs = 0; |
231 | parent->free_slabs = 0; | 231 | parent->free_slabs = 0; |
232 | parent->shared = NULL; | 232 | parent->shared = NULL; |
233 | parent->alien = NULL; | 233 | parent->alien = NULL; |
@@ -1381,20 +1381,18 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | |||
1381 | cachep->name, cachep->size, cachep->gfporder); | 1381 | cachep->name, cachep->size, cachep->gfporder); |
1382 | 1382 | ||
1383 | for_each_kmem_cache_node(cachep, node, n) { | 1383 | for_each_kmem_cache_node(cachep, node, n) { |
1384 | unsigned long active_objs = 0, free_objs = 0; | 1384 | unsigned long total_slabs, free_slabs, free_objs; |
1385 | unsigned long active_slabs, num_slabs; | ||
1386 | 1385 | ||
1387 | spin_lock_irqsave(&n->list_lock, flags); | 1386 | spin_lock_irqsave(&n->list_lock, flags); |
1388 | active_slabs = n->active_slabs; | 1387 | total_slabs = n->total_slabs; |
1389 | num_slabs = active_slabs + n->free_slabs; | 1388 | free_slabs = n->free_slabs; |
1390 | 1389 | free_objs = n->free_objects; | |
1391 | active_objs += (num_slabs * cachep->num) - n->free_objects; | ||
1392 | free_objs += n->free_objects; | ||
1393 | spin_unlock_irqrestore(&n->list_lock, flags); | 1390 | spin_unlock_irqrestore(&n->list_lock, flags); |
1394 | 1391 | ||
1395 | pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n", | 1392 | pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n", |
1396 | node, active_slabs, num_slabs, active_objs, | 1393 | node, total_slabs - free_slabs, total_slabs, |
1397 | num_slabs * cachep->num, free_objs); | 1394 | (total_slabs * cachep->num) - free_objs, |
1395 | total_slabs * cachep->num); | ||
1398 | } | 1396 | } |
1399 | #endif | 1397 | #endif |
1400 | } | 1398 | } |
@@ -2307,6 +2305,7 @@ static int drain_freelist(struct kmem_cache *cache, | |||
2307 | page = list_entry(p, struct page, lru); | 2305 | page = list_entry(p, struct page, lru); |
2308 | list_del(&page->lru); | 2306 | list_del(&page->lru); |
2309 | n->free_slabs--; | 2307 | n->free_slabs--; |
2308 | n->total_slabs--; | ||
2310 | /* | 2309 | /* |
2311 | * Safe to drop the lock. The slab is no longer linked | 2310 | * Safe to drop the lock. The slab is no longer linked |
2312 | * to the cache. | 2311 | * to the cache. |
@@ -2741,13 +2740,12 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page) | |||
2741 | n = get_node(cachep, page_to_nid(page)); | 2740 | n = get_node(cachep, page_to_nid(page)); |
2742 | 2741 | ||
2743 | spin_lock(&n->list_lock); | 2742 | spin_lock(&n->list_lock); |
2743 | n->total_slabs++; | ||
2744 | if (!page->active) { | 2744 | if (!page->active) { |
2745 | list_add_tail(&page->lru, &(n->slabs_free)); | 2745 | list_add_tail(&page->lru, &(n->slabs_free)); |
2746 | n->free_slabs++; | 2746 | n->free_slabs++; |
2747 | } else { | 2747 | } else |
2748 | fixup_slab_list(cachep, n, page, &list); | 2748 | fixup_slab_list(cachep, n, page, &list); |
2749 | n->active_slabs++; | ||
2750 | } | ||
2751 | 2749 | ||
2752 | STATS_INC_GROWN(cachep); | 2750 | STATS_INC_GROWN(cachep); |
2753 | n->free_objects += cachep->num - page->active; | 2751 | n->free_objects += cachep->num - page->active; |
@@ -2874,7 +2872,7 @@ static inline void fixup_slab_list(struct kmem_cache *cachep, | |||
2874 | 2872 | ||
2875 | /* Try to find non-pfmemalloc slab if needed */ | 2873 | /* Try to find non-pfmemalloc slab if needed */ |
2876 | static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n, | 2874 | static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n, |
2877 | struct page *page, bool *page_is_free, bool pfmemalloc) | 2875 | struct page *page, bool pfmemalloc) |
2878 | { | 2876 | { |
2879 | if (!page) | 2877 | if (!page) |
2880 | return NULL; | 2878 | return NULL; |
@@ -2893,10 +2891,9 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n, | |||
2893 | 2891 | ||
2894 | /* Move pfmemalloc slab to the end of list to speed up next search */ | 2892 | /* Move pfmemalloc slab to the end of list to speed up next search */ |
2895 | list_del(&page->lru); | 2893 | list_del(&page->lru); |
2896 | if (*page_is_free) { | 2894 | if (!page->active) { |
2897 | WARN_ON(page->active); | ||
2898 | list_add_tail(&page->lru, &n->slabs_free); | 2895 | list_add_tail(&page->lru, &n->slabs_free); |
2899 | *page_is_free = false; | 2896 | n->free_slabs++; |
2900 | } else | 2897 | } else |
2901 | list_add_tail(&page->lru, &n->slabs_partial); | 2898 | list_add_tail(&page->lru, &n->slabs_partial); |
2902 | 2899 | ||
@@ -2908,7 +2905,7 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n, | |||
2908 | n->free_touched = 1; | 2905 | n->free_touched = 1; |
2909 | list_for_each_entry(page, &n->slabs_free, lru) { | 2906 | list_for_each_entry(page, &n->slabs_free, lru) { |
2910 | if (!PageSlabPfmemalloc(page)) { | 2907 | if (!PageSlabPfmemalloc(page)) { |
2911 | *page_is_free = true; | 2908 | n->free_slabs--; |
2912 | return page; | 2909 | return page; |
2913 | } | 2910 | } |
2914 | } | 2911 | } |
@@ -2919,26 +2916,19 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n, | |||
2919 | static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc) | 2916 | static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc) |
2920 | { | 2917 | { |
2921 | struct page *page; | 2918 | struct page *page; |
2922 | bool page_is_free = false; | ||
2923 | 2919 | ||
2924 | assert_spin_locked(&n->list_lock); | 2920 | assert_spin_locked(&n->list_lock); |
2925 | page = list_first_entry_or_null(&n->slabs_partial, | 2921 | page = list_first_entry_or_null(&n->slabs_partial, struct page, lru); |
2926 | struct page, lru); | ||
2927 | if (!page) { | 2922 | if (!page) { |
2928 | n->free_touched = 1; | 2923 | n->free_touched = 1; |
2929 | page = list_first_entry_or_null(&n->slabs_free, | 2924 | page = list_first_entry_or_null(&n->slabs_free, struct page, |
2930 | struct page, lru); | 2925 | lru); |
2931 | if (page) | 2926 | if (page) |
2932 | page_is_free = true; | 2927 | n->free_slabs--; |
2933 | } | 2928 | } |
2934 | 2929 | ||
2935 | if (sk_memalloc_socks()) | 2930 | if (sk_memalloc_socks()) |
2936 | page = get_valid_first_slab(n, page, &page_is_free, pfmemalloc); | 2931 | page = get_valid_first_slab(n, page, pfmemalloc); |
2937 | |||
2938 | if (page && page_is_free) { | ||
2939 | n->active_slabs++; | ||
2940 | n->free_slabs--; | ||
2941 | } | ||
2942 | 2932 | ||
2943 | return page; | 2933 | return page; |
2944 | } | 2934 | } |
@@ -3441,7 +3431,6 @@ static void free_block(struct kmem_cache *cachep, void **objpp, | |||
3441 | if (page->active == 0) { | 3431 | if (page->active == 0) { |
3442 | list_add(&page->lru, &n->slabs_free); | 3432 | list_add(&page->lru, &n->slabs_free); |
3443 | n->free_slabs++; | 3433 | n->free_slabs++; |
3444 | n->active_slabs--; | ||
3445 | } else { | 3434 | } else { |
3446 | /* Unconditionally move a slab to the end of the | 3435 | /* Unconditionally move a slab to the end of the |
3447 | * partial list on free - maximum time for the | 3436 | * partial list on free - maximum time for the |
@@ -3457,6 +3446,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, | |||
3457 | page = list_last_entry(&n->slabs_free, struct page, lru); | 3446 | page = list_last_entry(&n->slabs_free, struct page, lru); |
3458 | list_move(&page->lru, list); | 3447 | list_move(&page->lru, list); |
3459 | n->free_slabs--; | 3448 | n->free_slabs--; |
3449 | n->total_slabs--; | ||
3460 | } | 3450 | } |
3461 | } | 3451 | } |
3462 | 3452 | ||
@@ -4109,8 +4099,8 @@ out: | |||
4109 | void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | 4099 | void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) |
4110 | { | 4100 | { |
4111 | unsigned long active_objs, num_objs, active_slabs; | 4101 | unsigned long active_objs, num_objs, active_slabs; |
4112 | unsigned long num_slabs = 0, free_objs = 0, shared_avail = 0; | 4102 | unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0; |
4113 | unsigned long num_slabs_free = 0; | 4103 | unsigned long free_slabs = 0; |
4114 | int node; | 4104 | int node; |
4115 | struct kmem_cache_node *n; | 4105 | struct kmem_cache_node *n; |
4116 | 4106 | ||
@@ -4118,9 +4108,8 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | |||
4118 | check_irq_on(); | 4108 | check_irq_on(); |
4119 | spin_lock_irq(&n->list_lock); | 4109 | spin_lock_irq(&n->list_lock); |
4120 | 4110 | ||
4121 | num_slabs += n->active_slabs + n->free_slabs; | 4111 | total_slabs += n->total_slabs; |
4122 | num_slabs_free += n->free_slabs; | 4112 | free_slabs += n->free_slabs; |
4123 | |||
4124 | free_objs += n->free_objects; | 4113 | free_objs += n->free_objects; |
4125 | 4114 | ||
4126 | if (n->shared) | 4115 | if (n->shared) |
@@ -4128,15 +4117,14 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | |||
4128 | 4117 | ||
4129 | spin_unlock_irq(&n->list_lock); | 4118 | spin_unlock_irq(&n->list_lock); |
4130 | } | 4119 | } |
4131 | num_objs = num_slabs * cachep->num; | 4120 | num_objs = total_slabs * cachep->num; |
4132 | active_slabs = num_slabs - num_slabs_free; | 4121 | active_slabs = total_slabs - free_slabs; |
4133 | |||
4134 | active_objs = num_objs - free_objs; | 4122 | active_objs = num_objs - free_objs; |
4135 | 4123 | ||
4136 | sinfo->active_objs = active_objs; | 4124 | sinfo->active_objs = active_objs; |
4137 | sinfo->num_objs = num_objs; | 4125 | sinfo->num_objs = num_objs; |
4138 | sinfo->active_slabs = active_slabs; | 4126 | sinfo->active_slabs = active_slabs; |
4139 | sinfo->num_slabs = num_slabs; | 4127 | sinfo->num_slabs = total_slabs; |
4140 | sinfo->shared_avail = shared_avail; | 4128 | sinfo->shared_avail = shared_avail; |
4141 | sinfo->limit = cachep->limit; | 4129 | sinfo->limit = cachep->limit; |
4142 | sinfo->batchcount = cachep->batchcount; | 4130 | sinfo->batchcount = cachep->batchcount; |
@@ -447,8 +447,8 @@ struct kmem_cache_node { | |||
447 | struct list_head slabs_partial; /* partial list first, better asm code */ | 447 | struct list_head slabs_partial; /* partial list first, better asm code */ |
448 | struct list_head slabs_full; | 448 | struct list_head slabs_full; |
449 | struct list_head slabs_free; | 449 | struct list_head slabs_free; |
450 | unsigned long active_slabs; /* length of slabs_partial+slabs_full */ | 450 | unsigned long total_slabs; /* length of all slab lists */ |
451 | unsigned long free_slabs; /* length of slabs_free */ | 451 | unsigned long free_slabs; /* length of free slab list only */ |
452 | unsigned long free_objects; | 452 | unsigned long free_objects; |
453 | unsigned int free_limit; | 453 | unsigned int free_limit; |
454 | unsigned int colour_next; /* Per-node cache coloring */ | 454 | unsigned int colour_next; /* Per-node cache coloring */ |