diff options
author | Aruna Ramakrishna <aruna.ramakrishna@oracle.com> | 2016-10-27 20:46:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-27 21:43:43 -0400 |
commit | 07a63c41fa1f6533f5668e5b33a295bfd63aa534 (patch) | |
tree | baccc737b4c09ce0abdc4bb0209ab9eccb2f9612 /mm/slab.c | |
parent | 1f84a18fc010d7a62667199c9be35872bbf31526 (diff) |
mm/slab: improve performance of gathering slabinfo stats
On large systems, when some slab caches grow to millions of objects (and
many gigabytes), running 'cat /proc/slabinfo' can take up to 1-2
seconds. During this time, interrupts are disabled while walking the
slab lists (slabs_full, slabs_partial, and slabs_free) for each node,
and this sometimes causes timeouts in other drivers (for instance,
Infiniband).
This patch optimizes 'cat /proc/slabinfo' by maintaining a counter for
total number of allocated slabs per node, per cache. This counter is
updated when a slab is created or destroyed. This enables us to skip
traversing the slabs_full list while gathering slabinfo statistics, and
since slabs_full tends to be the biggest list when the cache is large,
it results in a dramatic performance improvement. Getting slabinfo
statistics now only requires walking the slabs_free and slabs_partial
lists, and those lists are usually much smaller than slabs_full.
We tested this after growing the dentry cache to 70GB, and the
performance improved from 2s to 5ms.
Link: http://lkml.kernel.org/r/1472517876-26814-1-git-send-email-aruna.ramakrishna@oracle.com
Signed-off-by: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 43 |
1 files changed, 27 insertions, 16 deletions
@@ -233,6 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) | |||
233 | spin_lock_init(&parent->list_lock); | 233 | spin_lock_init(&parent->list_lock); |
234 | parent->free_objects = 0; | 234 | parent->free_objects = 0; |
235 | parent->free_touched = 0; | 235 | parent->free_touched = 0; |
236 | parent->num_slabs = 0; | ||
236 | } | 237 | } |
237 | 238 | ||
238 | #define MAKE_LIST(cachep, listp, slab, nodeid) \ | 239 | #define MAKE_LIST(cachep, listp, slab, nodeid) \ |
@@ -1382,24 +1383,27 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | |||
1382 | for_each_kmem_cache_node(cachep, node, n) { | 1383 | for_each_kmem_cache_node(cachep, node, n) { |
1383 | unsigned long active_objs = 0, num_objs = 0, free_objects = 0; | 1384 | unsigned long active_objs = 0, num_objs = 0, free_objects = 0; |
1384 | unsigned long active_slabs = 0, num_slabs = 0; | 1385 | unsigned long active_slabs = 0, num_slabs = 0; |
1386 | unsigned long num_slabs_partial = 0, num_slabs_free = 0; | ||
1387 | unsigned long num_slabs_full; | ||
1385 | 1388 | ||
1386 | spin_lock_irqsave(&n->list_lock, flags); | 1389 | spin_lock_irqsave(&n->list_lock, flags); |
1387 | list_for_each_entry(page, &n->slabs_full, lru) { | 1390 | num_slabs = n->num_slabs; |
1388 | active_objs += cachep->num; | ||
1389 | active_slabs++; | ||
1390 | } | ||
1391 | list_for_each_entry(page, &n->slabs_partial, lru) { | 1391 | list_for_each_entry(page, &n->slabs_partial, lru) { |
1392 | active_objs += page->active; | 1392 | active_objs += page->active; |
1393 | active_slabs++; | 1393 | num_slabs_partial++; |
1394 | } | 1394 | } |
1395 | list_for_each_entry(page, &n->slabs_free, lru) | 1395 | list_for_each_entry(page, &n->slabs_free, lru) |
1396 | num_slabs++; | 1396 | num_slabs_free++; |
1397 | 1397 | ||
1398 | free_objects += n->free_objects; | 1398 | free_objects += n->free_objects; |
1399 | spin_unlock_irqrestore(&n->list_lock, flags); | 1399 | spin_unlock_irqrestore(&n->list_lock, flags); |
1400 | 1400 | ||
1401 | num_slabs += active_slabs; | ||
1402 | num_objs = num_slabs * cachep->num; | 1401 | num_objs = num_slabs * cachep->num; |
1402 | active_slabs = num_slabs - num_slabs_free; | ||
1403 | num_slabs_full = num_slabs - | ||
1404 | (num_slabs_partial + num_slabs_free); | ||
1405 | active_objs += (num_slabs_full * cachep->num); | ||
1406 | |||
1403 | pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n", | 1407 | pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n", |
1404 | node, active_slabs, num_slabs, active_objs, num_objs, | 1408 | node, active_slabs, num_slabs, active_objs, num_objs, |
1405 | free_objects); | 1409 | free_objects); |
@@ -2314,6 +2318,7 @@ static int drain_freelist(struct kmem_cache *cache, | |||
2314 | 2318 | ||
2315 | page = list_entry(p, struct page, lru); | 2319 | page = list_entry(p, struct page, lru); |
2316 | list_del(&page->lru); | 2320 | list_del(&page->lru); |
2321 | n->num_slabs--; | ||
2317 | /* | 2322 | /* |
2318 | * Safe to drop the lock. The slab is no longer linked | 2323 | * Safe to drop the lock. The slab is no longer linked |
2319 | * to the cache. | 2324 | * to the cache. |
@@ -2752,6 +2757,8 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page) | |||
2752 | list_add_tail(&page->lru, &(n->slabs_free)); | 2757 | list_add_tail(&page->lru, &(n->slabs_free)); |
2753 | else | 2758 | else |
2754 | fixup_slab_list(cachep, n, page, &list); | 2759 | fixup_slab_list(cachep, n, page, &list); |
2760 | |||
2761 | n->num_slabs++; | ||
2755 | STATS_INC_GROWN(cachep); | 2762 | STATS_INC_GROWN(cachep); |
2756 | n->free_objects += cachep->num - page->active; | 2763 | n->free_objects += cachep->num - page->active; |
2757 | spin_unlock(&n->list_lock); | 2764 | spin_unlock(&n->list_lock); |
@@ -3443,6 +3450,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, | |||
3443 | 3450 | ||
3444 | page = list_last_entry(&n->slabs_free, struct page, lru); | 3451 | page = list_last_entry(&n->slabs_free, struct page, lru); |
3445 | list_move(&page->lru, list); | 3452 | list_move(&page->lru, list); |
3453 | n->num_slabs--; | ||
3446 | } | 3454 | } |
3447 | } | 3455 | } |
3448 | 3456 | ||
@@ -4099,6 +4107,8 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | |||
4099 | unsigned long num_objs; | 4107 | unsigned long num_objs; |
4100 | unsigned long active_slabs = 0; | 4108 | unsigned long active_slabs = 0; |
4101 | unsigned long num_slabs, free_objects = 0, shared_avail = 0; | 4109 | unsigned long num_slabs, free_objects = 0, shared_avail = 0; |
4110 | unsigned long num_slabs_partial = 0, num_slabs_free = 0; | ||
4111 | unsigned long num_slabs_full = 0; | ||
4102 | const char *name; | 4112 | const char *name; |
4103 | char *error = NULL; | 4113 | char *error = NULL; |
4104 | int node; | 4114 | int node; |
@@ -4111,33 +4121,34 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | |||
4111 | check_irq_on(); | 4121 | check_irq_on(); |
4112 | spin_lock_irq(&n->list_lock); | 4122 | spin_lock_irq(&n->list_lock); |
4113 | 4123 | ||
4114 | list_for_each_entry(page, &n->slabs_full, lru) { | 4124 | num_slabs += n->num_slabs; |
4115 | if (page->active != cachep->num && !error) | 4125 | |
4116 | error = "slabs_full accounting error"; | ||
4117 | active_objs += cachep->num; | ||
4118 | active_slabs++; | ||
4119 | } | ||
4120 | list_for_each_entry(page, &n->slabs_partial, lru) { | 4126 | list_for_each_entry(page, &n->slabs_partial, lru) { |
4121 | if (page->active == cachep->num && !error) | 4127 | if (page->active == cachep->num && !error) |
4122 | error = "slabs_partial accounting error"; | 4128 | error = "slabs_partial accounting error"; |
4123 | if (!page->active && !error) | 4129 | if (!page->active && !error) |
4124 | error = "slabs_partial accounting error"; | 4130 | error = "slabs_partial accounting error"; |
4125 | active_objs += page->active; | 4131 | active_objs += page->active; |
4126 | active_slabs++; | 4132 | num_slabs_partial++; |
4127 | } | 4133 | } |
4134 | |||
4128 | list_for_each_entry(page, &n->slabs_free, lru) { | 4135 | list_for_each_entry(page, &n->slabs_free, lru) { |
4129 | if (page->active && !error) | 4136 | if (page->active && !error) |
4130 | error = "slabs_free accounting error"; | 4137 | error = "slabs_free accounting error"; |
4131 | num_slabs++; | 4138 | num_slabs_free++; |
4132 | } | 4139 | } |
4140 | |||
4133 | free_objects += n->free_objects; | 4141 | free_objects += n->free_objects; |
4134 | if (n->shared) | 4142 | if (n->shared) |
4135 | shared_avail += n->shared->avail; | 4143 | shared_avail += n->shared->avail; |
4136 | 4144 | ||
4137 | spin_unlock_irq(&n->list_lock); | 4145 | spin_unlock_irq(&n->list_lock); |
4138 | } | 4146 | } |
4139 | num_slabs += active_slabs; | ||
4140 | num_objs = num_slabs * cachep->num; | 4147 | num_objs = num_slabs * cachep->num; |
4148 | active_slabs = num_slabs - num_slabs_free; | ||
4149 | num_slabs_full = num_slabs - (num_slabs_partial + num_slabs_free); | ||
4150 | active_objs += (num_slabs_full * cachep->num); | ||
4151 | |||
4141 | if (num_objs - active_objs != free_objects && !error) | 4152 | if (num_objs - active_objs != free_objects && !error) |
4142 | error = "free_objects accounting error"; | 4153 | error = "free_objects accounting error"; |
4143 | 4154 | ||