diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 56 | ||||
-rw-r--r-- | mm/slub.c | 26 |
2 files changed, 73 insertions, 9 deletions
@@ -1731,6 +1731,52 @@ static int __init cpucache_init(void) | |||
1731 | } | 1731 | } |
1732 | __initcall(cpucache_init); | 1732 | __initcall(cpucache_init); |
1733 | 1733 | ||
1734 | static noinline void | ||
1735 | slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | ||
1736 | { | ||
1737 | struct kmem_list3 *l3; | ||
1738 | struct slab *slabp; | ||
1739 | unsigned long flags; | ||
1740 | int node; | ||
1741 | |||
1742 | printk(KERN_WARNING | ||
1743 | "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n", | ||
1744 | nodeid, gfpflags); | ||
1745 | printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n", | ||
1746 | cachep->name, cachep->buffer_size, cachep->gfporder); | ||
1747 | |||
1748 | for_each_online_node(node) { | ||
1749 | unsigned long active_objs = 0, num_objs = 0, free_objects = 0; | ||
1750 | unsigned long active_slabs = 0, num_slabs = 0; | ||
1751 | |||
1752 | l3 = cachep->nodelists[node]; | ||
1753 | if (!l3) | ||
1754 | continue; | ||
1755 | |||
1756 | spin_lock_irqsave(&l3->list_lock, flags); | ||
1757 | list_for_each_entry(slabp, &l3->slabs_full, list) { | ||
1758 | active_objs += cachep->num; | ||
1759 | active_slabs++; | ||
1760 | } | ||
1761 | list_for_each_entry(slabp, &l3->slabs_partial, list) { | ||
1762 | active_objs += slabp->inuse; | ||
1763 | active_slabs++; | ||
1764 | } | ||
1765 | list_for_each_entry(slabp, &l3->slabs_free, list) | ||
1766 | num_slabs++; | ||
1767 | |||
1768 | free_objects += l3->free_objects; | ||
1769 | spin_unlock_irqrestore(&l3->list_lock, flags); | ||
1770 | |||
1771 | num_slabs += active_slabs; | ||
1772 | num_objs = num_slabs * cachep->num; | ||
1773 | printk(KERN_WARNING | ||
1774 | " node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n", | ||
1775 | node, active_slabs, num_slabs, active_objs, num_objs, | ||
1776 | free_objects); | ||
1777 | } | ||
1778 | } | ||
1779 | |||
1734 | /* | 1780 | /* |
1735 | * Interface to system's page allocator. No need to hold the cache-lock. | 1781 | * Interface to system's page allocator. No need to hold the cache-lock. |
1736 | * | 1782 | * |
@@ -1757,8 +1803,11 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1757 | flags |= __GFP_RECLAIMABLE; | 1803 | flags |= __GFP_RECLAIMABLE; |
1758 | 1804 | ||
1759 | page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); | 1805 | page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); |
1760 | if (!page) | 1806 | if (!page) { |
1807 | if (!(flags & __GFP_NOWARN) && printk_ratelimit()) | ||
1808 | slab_out_of_memory(cachep, flags, nodeid); | ||
1761 | return NULL; | 1809 | return NULL; |
1810 | } | ||
1762 | 1811 | ||
1763 | nr_pages = (1 << cachep->gfporder); | 1812 | nr_pages = (1 << cachep->gfporder); |
1764 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1813 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
@@ -3696,13 +3745,12 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp, | |||
3696 | 3745 | ||
3697 | if (likely(ac->avail < ac->limit)) { | 3746 | if (likely(ac->avail < ac->limit)) { |
3698 | STATS_INC_FREEHIT(cachep); | 3747 | STATS_INC_FREEHIT(cachep); |
3699 | ac->entry[ac->avail++] = objp; | ||
3700 | return; | ||
3701 | } else { | 3748 | } else { |
3702 | STATS_INC_FREEMISS(cachep); | 3749 | STATS_INC_FREEMISS(cachep); |
3703 | cache_flusharray(cachep, ac); | 3750 | cache_flusharray(cachep, ac); |
3704 | ac->entry[ac->avail++] = objp; | ||
3705 | } | 3751 | } |
3752 | |||
3753 | ac->entry[ac->avail++] = objp; | ||
3706 | } | 3754 | } |
3707 | 3755 | ||
3708 | /** | 3756 | /** |
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/math64.h> | 29 | #include <linux/math64.h> |
30 | #include <linux/fault-inject.h> | 30 | #include <linux/fault-inject.h> |
31 | #include <linux/stacktrace.h> | 31 | #include <linux/stacktrace.h> |
32 | #include <linux/prefetch.h> | ||
32 | 33 | ||
33 | #include <trace/events/kmem.h> | 34 | #include <trace/events/kmem.h> |
34 | 35 | ||
@@ -269,6 +270,11 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object) | |||
269 | return *(void **)(object + s->offset); | 270 | return *(void **)(object + s->offset); |
270 | } | 271 | } |
271 | 272 | ||
273 | static void prefetch_freepointer(const struct kmem_cache *s, void *object) | ||
274 | { | ||
275 | prefetch(object + s->offset); | ||
276 | } | ||
277 | |||
272 | static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) | 278 | static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) |
273 | { | 279 | { |
274 | void *p; | 280 | void *p; |
@@ -1560,6 +1566,7 @@ static void *get_partial_node(struct kmem_cache *s, | |||
1560 | } else { | 1566 | } else { |
1561 | page->freelist = t; | 1567 | page->freelist = t; |
1562 | available = put_cpu_partial(s, page, 0); | 1568 | available = put_cpu_partial(s, page, 0); |
1569 | stat(s, CPU_PARTIAL_NODE); | ||
1563 | } | 1570 | } |
1564 | if (kmem_cache_debug(s) || available > s->cpu_partial / 2) | 1571 | if (kmem_cache_debug(s) || available > s->cpu_partial / 2) |
1565 | break; | 1572 | break; |
@@ -1983,6 +1990,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
1983 | local_irq_restore(flags); | 1990 | local_irq_restore(flags); |
1984 | pobjects = 0; | 1991 | pobjects = 0; |
1985 | pages = 0; | 1992 | pages = 0; |
1993 | stat(s, CPU_PARTIAL_DRAIN); | ||
1986 | } | 1994 | } |
1987 | } | 1995 | } |
1988 | 1996 | ||
@@ -1994,7 +2002,6 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
1994 | page->next = oldpage; | 2002 | page->next = oldpage; |
1995 | 2003 | ||
1996 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); | 2004 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); |
1997 | stat(s, CPU_PARTIAL_FREE); | ||
1998 | return pobjects; | 2005 | return pobjects; |
1999 | } | 2006 | } |
2000 | 2007 | ||
@@ -2327,6 +2334,8 @@ redo: | |||
2327 | object = __slab_alloc(s, gfpflags, node, addr, c); | 2334 | object = __slab_alloc(s, gfpflags, node, addr, c); |
2328 | 2335 | ||
2329 | else { | 2336 | else { |
2337 | void *next_object = get_freepointer_safe(s, object); | ||
2338 | |||
2330 | /* | 2339 | /* |
2331 | * The cmpxchg will only match if there was no additional | 2340 | * The cmpxchg will only match if there was no additional |
2332 | * operation and if we are on the right processor. | 2341 | * operation and if we are on the right processor. |
@@ -2342,11 +2351,12 @@ redo: | |||
2342 | if (unlikely(!this_cpu_cmpxchg_double( | 2351 | if (unlikely(!this_cpu_cmpxchg_double( |
2343 | s->cpu_slab->freelist, s->cpu_slab->tid, | 2352 | s->cpu_slab->freelist, s->cpu_slab->tid, |
2344 | object, tid, | 2353 | object, tid, |
2345 | get_freepointer_safe(s, object), next_tid(tid)))) { | 2354 | next_object, next_tid(tid)))) { |
2346 | 2355 | ||
2347 | note_cmpxchg_failure("slab_alloc", s, tid); | 2356 | note_cmpxchg_failure("slab_alloc", s, tid); |
2348 | goto redo; | 2357 | goto redo; |
2349 | } | 2358 | } |
2359 | prefetch_freepointer(s, next_object); | ||
2350 | stat(s, ALLOC_FASTPATH); | 2360 | stat(s, ALLOC_FASTPATH); |
2351 | } | 2361 | } |
2352 | 2362 | ||
@@ -2483,9 +2493,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2483 | * If we just froze the page then put it onto the | 2493 | * If we just froze the page then put it onto the |
2484 | * per cpu partial list. | 2494 | * per cpu partial list. |
2485 | */ | 2495 | */ |
2486 | if (new.frozen && !was_frozen) | 2496 | if (new.frozen && !was_frozen) { |
2487 | put_cpu_partial(s, page, 1); | 2497 | put_cpu_partial(s, page, 1); |
2488 | 2498 | stat(s, CPU_PARTIAL_FREE); | |
2499 | } | ||
2489 | /* | 2500 | /* |
2490 | * The list lock was not taken therefore no list | 2501 | * The list lock was not taken therefore no list |
2491 | * activity can be necessary. | 2502 | * activity can be necessary. |
@@ -3947,13 +3958,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3947 | if (kmem_cache_open(s, n, | 3958 | if (kmem_cache_open(s, n, |
3948 | size, align, flags, ctor)) { | 3959 | size, align, flags, ctor)) { |
3949 | list_add(&s->list, &slab_caches); | 3960 | list_add(&s->list, &slab_caches); |
3961 | up_write(&slub_lock); | ||
3950 | if (sysfs_slab_add(s)) { | 3962 | if (sysfs_slab_add(s)) { |
3963 | down_write(&slub_lock); | ||
3951 | list_del(&s->list); | 3964 | list_del(&s->list); |
3952 | kfree(n); | 3965 | kfree(n); |
3953 | kfree(s); | 3966 | kfree(s); |
3954 | goto err; | 3967 | goto err; |
3955 | } | 3968 | } |
3956 | up_write(&slub_lock); | ||
3957 | return s; | 3969 | return s; |
3958 | } | 3970 | } |
3959 | kfree(n); | 3971 | kfree(n); |
@@ -5077,6 +5089,8 @@ STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); | |||
5077 | STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); | 5089 | STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); |
5078 | STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); | 5090 | STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); |
5079 | STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); | 5091 | STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); |
5092 | STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); | ||
5093 | STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); | ||
5080 | #endif | 5094 | #endif |
5081 | 5095 | ||
5082 | static struct attribute *slab_attrs[] = { | 5096 | static struct attribute *slab_attrs[] = { |
@@ -5142,6 +5156,8 @@ static struct attribute *slab_attrs[] = { | |||
5142 | &cmpxchg_double_cpu_fail_attr.attr, | 5156 | &cmpxchg_double_cpu_fail_attr.attr, |
5143 | &cpu_partial_alloc_attr.attr, | 5157 | &cpu_partial_alloc_attr.attr, |
5144 | &cpu_partial_free_attr.attr, | 5158 | &cpu_partial_free_attr.attr, |
5159 | &cpu_partial_node_attr.attr, | ||
5160 | &cpu_partial_drain_attr.attr, | ||
5145 | #endif | 5161 | #endif |
5146 | #ifdef CONFIG_FAILSLAB | 5162 | #ifdef CONFIG_FAILSLAB |
5147 | &failslab_attr.attr, | 5163 | &failslab_attr.attr, |