diff options
| author | Wei Yang <richard.weiyang@gmail.com> | 2017-07-06 18:36:31 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-06 19:24:30 -0400 |
| commit | a93cf07bc3fb4e7bc924d33c387dabc85086ea38 (patch) | |
| tree | ed8c028af545abf8743afcff1000758cbd947d28 /mm/slub.c | |
| parent | d3111e6cce6001e71ddc4737d0d412c2300043a2 (diff) | |
mm/slub.c: wrap cpu_slab->partial in CONFIG_SLUB_CPU_PARTIAL
cpu_slab's field partial is used when CONFIG_SLUB_CPU_PARTIAL is set,
which means we can save a pointer's space on each cpu for every slub
item.
This patch wraps cpu_slab->partial in CONFIG_SLUB_CPU_PARTIAL and wraps
its sysfs use too.
[akpm@linux-foundation.org: avoid strange 80-col tricks]
Link: http://lkml.kernel.org/r/20170502144533.10729-3-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
| -rw-r--r-- | mm/slub.c | 18 |
1 files changed, 11 insertions, 7 deletions
| @@ -2303,7 +2303,7 @@ static bool has_cpu_slab(int cpu, void *info) | |||
| 2303 | struct kmem_cache *s = info; | 2303 | struct kmem_cache *s = info; |
| 2304 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); | 2304 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); |
| 2305 | 2305 | ||
| 2306 | return c->page || c->partial; | 2306 | return c->page || slub_percpu_partial(c); |
| 2307 | } | 2307 | } |
| 2308 | 2308 | ||
| 2309 | static void flush_all(struct kmem_cache *s) | 2309 | static void flush_all(struct kmem_cache *s) |
| @@ -2565,9 +2565,9 @@ load_freelist: | |||
| 2565 | 2565 | ||
| 2566 | new_slab: | 2566 | new_slab: |
| 2567 | 2567 | ||
| 2568 | if (c->partial) { | 2568 | if (slub_percpu_partial(c)) { |
| 2569 | page = c->page = c->partial; | 2569 | page = c->page = slub_percpu_partial(c); |
| 2570 | c->partial = page->next; | 2570 | slub_set_percpu_partial(c, page); |
| 2571 | stat(s, CPU_PARTIAL_ALLOC); | 2571 | stat(s, CPU_PARTIAL_ALLOC); |
| 2572 | goto redo; | 2572 | goto redo; |
| 2573 | } | 2573 | } |
| @@ -4754,7 +4754,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
| 4754 | total += x; | 4754 | total += x; |
| 4755 | nodes[node] += x; | 4755 | nodes[node] += x; |
| 4756 | 4756 | ||
| 4757 | page = READ_ONCE(c->partial); | 4757 | page = slub_percpu_partial_read_once(c); |
| 4758 | if (page) { | 4758 | if (page) { |
| 4759 | node = page_to_nid(page); | 4759 | node = page_to_nid(page); |
| 4760 | if (flags & SO_TOTAL) | 4760 | if (flags & SO_TOTAL) |
| @@ -4982,7 +4982,9 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) | |||
| 4982 | int len; | 4982 | int len; |
| 4983 | 4983 | ||
| 4984 | for_each_online_cpu(cpu) { | 4984 | for_each_online_cpu(cpu) { |
| 4985 | struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial; | 4985 | struct page *page; |
| 4986 | |||
| 4987 | page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); | ||
| 4986 | 4988 | ||
| 4987 | if (page) { | 4989 | if (page) { |
| 4988 | pages += page->pages; | 4990 | pages += page->pages; |
| @@ -4994,7 +4996,9 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) | |||
| 4994 | 4996 | ||
| 4995 | #ifdef CONFIG_SMP | 4997 | #ifdef CONFIG_SMP |
| 4996 | for_each_online_cpu(cpu) { | 4998 | for_each_online_cpu(cpu) { |
| 4997 | struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial; | 4999 | struct page *page; |
| 5000 | |||
| 5001 | page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); | ||
| 4998 | 5002 | ||
| 4999 | if (page && len < PAGE_SIZE - 20) | 5003 | if (page && len < PAGE_SIZE - 20) |
| 5000 | len += sprintf(buf + len, " C%d=%d(%d)", cpu, | 5004 | len += sprintf(buf + len, " C%d=%d(%d)", cpu, |
