diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 26 |
1 files changed, 21 insertions, 5 deletions
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/math64.h> | 29 | #include <linux/math64.h> |
30 | #include <linux/fault-inject.h> | 30 | #include <linux/fault-inject.h> |
31 | #include <linux/stacktrace.h> | 31 | #include <linux/stacktrace.h> |
32 | #include <linux/prefetch.h> | ||
32 | 33 | ||
33 | #include <trace/events/kmem.h> | 34 | #include <trace/events/kmem.h> |
34 | 35 | ||
@@ -269,6 +270,11 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object) | |||
269 | return *(void **)(object + s->offset); | 270 | return *(void **)(object + s->offset); |
270 | } | 271 | } |
271 | 272 | ||
273 | static void prefetch_freepointer(const struct kmem_cache *s, void *object) | ||
274 | { | ||
275 | prefetch(object + s->offset); | ||
276 | } | ||
277 | |||
272 | static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) | 278 | static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) |
273 | { | 279 | { |
274 | void *p; | 280 | void *p; |
@@ -1560,6 +1566,7 @@ static void *get_partial_node(struct kmem_cache *s, | |||
1560 | } else { | 1566 | } else { |
1561 | page->freelist = t; | 1567 | page->freelist = t; |
1562 | available = put_cpu_partial(s, page, 0); | 1568 | available = put_cpu_partial(s, page, 0); |
1569 | stat(s, CPU_PARTIAL_NODE); | ||
1563 | } | 1570 | } |
1564 | if (kmem_cache_debug(s) || available > s->cpu_partial / 2) | 1571 | if (kmem_cache_debug(s) || available > s->cpu_partial / 2) |
1565 | break; | 1572 | break; |
@@ -1983,6 +1990,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
1983 | local_irq_restore(flags); | 1990 | local_irq_restore(flags); |
1984 | pobjects = 0; | 1991 | pobjects = 0; |
1985 | pages = 0; | 1992 | pages = 0; |
1993 | stat(s, CPU_PARTIAL_DRAIN); | ||
1986 | } | 1994 | } |
1987 | } | 1995 | } |
1988 | 1996 | ||
@@ -1994,7 +2002,6 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
1994 | page->next = oldpage; | 2002 | page->next = oldpage; |
1995 | 2003 | ||
1996 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); | 2004 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); |
1997 | stat(s, CPU_PARTIAL_FREE); | ||
1998 | return pobjects; | 2005 | return pobjects; |
1999 | } | 2006 | } |
2000 | 2007 | ||
@@ -2319,6 +2326,8 @@ redo: | |||
2319 | object = __slab_alloc(s, gfpflags, node, addr, c); | 2326 | object = __slab_alloc(s, gfpflags, node, addr, c); |
2320 | 2327 | ||
2321 | else { | 2328 | else { |
2329 | void *next_object = get_freepointer_safe(s, object); | ||
2330 | |||
2322 | /* | 2331 | /* |
2323 | * The cmpxchg will only match if there was no additional | 2332 | * The cmpxchg will only match if there was no additional |
2324 | * operation and if we are on the right processor. | 2333 | * operation and if we are on the right processor. |
@@ -2334,11 +2343,12 @@ redo: | |||
2334 | if (unlikely(!this_cpu_cmpxchg_double( | 2343 | if (unlikely(!this_cpu_cmpxchg_double( |
2335 | s->cpu_slab->freelist, s->cpu_slab->tid, | 2344 | s->cpu_slab->freelist, s->cpu_slab->tid, |
2336 | object, tid, | 2345 | object, tid, |
2337 | get_freepointer_safe(s, object), next_tid(tid)))) { | 2346 | next_object, next_tid(tid)))) { |
2338 | 2347 | ||
2339 | note_cmpxchg_failure("slab_alloc", s, tid); | 2348 | note_cmpxchg_failure("slab_alloc", s, tid); |
2340 | goto redo; | 2349 | goto redo; |
2341 | } | 2350 | } |
2351 | prefetch_freepointer(s, next_object); | ||
2342 | stat(s, ALLOC_FASTPATH); | 2352 | stat(s, ALLOC_FASTPATH); |
2343 | } | 2353 | } |
2344 | 2354 | ||
@@ -2475,9 +2485,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2475 | * If we just froze the page then put it onto the | 2485 | * If we just froze the page then put it onto the |
2476 | * per cpu partial list. | 2486 | * per cpu partial list. |
2477 | */ | 2487 | */ |
2478 | if (new.frozen && !was_frozen) | 2488 | if (new.frozen && !was_frozen) { |
2479 | put_cpu_partial(s, page, 1); | 2489 | put_cpu_partial(s, page, 1); |
2480 | 2490 | stat(s, CPU_PARTIAL_FREE); | |
2491 | } | ||
2481 | /* | 2492 | /* |
2482 | * The list lock was not taken therefore no list | 2493 | * The list lock was not taken therefore no list |
2483 | * activity can be necessary. | 2494 | * activity can be necessary. |
@@ -3939,13 +3950,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3939 | if (kmem_cache_open(s, n, | 3950 | if (kmem_cache_open(s, n, |
3940 | size, align, flags, ctor)) { | 3951 | size, align, flags, ctor)) { |
3941 | list_add(&s->list, &slab_caches); | 3952 | list_add(&s->list, &slab_caches); |
3953 | up_write(&slub_lock); | ||
3942 | if (sysfs_slab_add(s)) { | 3954 | if (sysfs_slab_add(s)) { |
3955 | down_write(&slub_lock); | ||
3943 | list_del(&s->list); | 3956 | list_del(&s->list); |
3944 | kfree(n); | 3957 | kfree(n); |
3945 | kfree(s); | 3958 | kfree(s); |
3946 | goto err; | 3959 | goto err; |
3947 | } | 3960 | } |
3948 | up_write(&slub_lock); | ||
3949 | return s; | 3961 | return s; |
3950 | } | 3962 | } |
3951 | kfree(n); | 3963 | kfree(n); |
@@ -5069,6 +5081,8 @@ STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); | |||
5069 | STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); | 5081 | STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); |
5070 | STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); | 5082 | STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); |
5071 | STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); | 5083 | STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); |
5084 | STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); | ||
5085 | STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); | ||
5072 | #endif | 5086 | #endif |
5073 | 5087 | ||
5074 | static struct attribute *slab_attrs[] = { | 5088 | static struct attribute *slab_attrs[] = { |
@@ -5134,6 +5148,8 @@ static struct attribute *slab_attrs[] = { | |||
5134 | &cmpxchg_double_cpu_fail_attr.attr, | 5148 | &cmpxchg_double_cpu_fail_attr.attr, |
5135 | &cpu_partial_alloc_attr.attr, | 5149 | &cpu_partial_alloc_attr.attr, |
5136 | &cpu_partial_free_attr.attr, | 5150 | &cpu_partial_free_attr.attr, |
5151 | &cpu_partial_node_attr.attr, | ||
5152 | &cpu_partial_drain_attr.attr, | ||
5137 | #endif | 5153 | #endif |
5138 | #ifdef CONFIG_FAILSLAB | 5154 | #ifdef CONFIG_FAILSLAB |
5139 | &failslab_attr.attr, | 5155 | &failslab_attr.attr, |