aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c46
1 files changed, 28 insertions, 18 deletions
diff --git a/mm/slab.c b/mm/slab.c
index c32af7e7581e..f1b644eb39d8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -207,11 +207,6 @@ typedef unsigned int kmem_bufctl_t;
207#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) 207#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
208#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 208#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
209 209
210/* Max number of objs-per-slab for caches which use off-slab slabs.
211 * Needed to avoid a possible looping condition in cache_grow().
212 */
213static unsigned long offslab_limit;
214
215/* 210/*
216 * struct slab 211 * struct slab
217 * 212 *
@@ -700,6 +695,14 @@ static enum {
700 FULL 695 FULL
701} g_cpucache_up; 696} g_cpucache_up;
702 697
698/*
699 * used by boot code to determine if it can use slab based allocator
700 */
701int slab_is_available(void)
702{
703 return g_cpucache_up == FULL;
704}
705
703static DEFINE_PER_CPU(struct work_struct, reap_work); 706static DEFINE_PER_CPU(struct work_struct, reap_work);
704 707
705static void free_block(struct kmem_cache *cachep, void **objpp, int len, 708static void free_block(struct kmem_cache *cachep, void **objpp, int len,
@@ -1348,12 +1351,6 @@ void __init kmem_cache_init(void)
1348 NULL, NULL); 1351 NULL, NULL);
1349 } 1352 }
1350 1353
1351 /* Inc off-slab bufctl limit until the ceiling is hit. */
1352 if (!(OFF_SLAB(sizes->cs_cachep))) {
1353 offslab_limit = sizes->cs_size - sizeof(struct slab);
1354 offslab_limit /= sizeof(kmem_bufctl_t);
1355 }
1356
1357 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1354 sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
1358 sizes->cs_size, 1355 sizes->cs_size,
1359 ARCH_KMALLOC_MINALIGN, 1356 ARCH_KMALLOC_MINALIGN,
@@ -1772,6 +1769,7 @@ static void set_up_list3s(struct kmem_cache *cachep, int index)
1772static size_t calculate_slab_order(struct kmem_cache *cachep, 1769static size_t calculate_slab_order(struct kmem_cache *cachep,
1773 size_t size, size_t align, unsigned long flags) 1770 size_t size, size_t align, unsigned long flags)
1774{ 1771{
1772 unsigned long offslab_limit;
1775 size_t left_over = 0; 1773 size_t left_over = 0;
1776 int gfporder; 1774 int gfporder;
1777 1775
@@ -1783,9 +1781,18 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
1783 if (!num) 1781 if (!num)
1784 continue; 1782 continue;
1785 1783
1786 /* More than offslab_limit objects will cause problems */ 1784 if (flags & CFLGS_OFF_SLAB) {
1787 if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit) 1785 /*
1788 break; 1786 * Max number of objs-per-slab for caches which
1787 * use off-slab slabs. Needed to avoid a possible
1788 * looping condition in cache_grow().
1789 */
1790 offslab_limit = size - sizeof(struct slab);
1791 offslab_limit /= sizeof(kmem_bufctl_t);
1792
1793 if (num > offslab_limit)
1794 break;
1795 }
1789 1796
1790 /* Found something acceptable - save it away */ 1797 /* Found something acceptable - save it away */
1791 cachep->num = num; 1798 cachep->num = num;
@@ -2192,11 +2199,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2192 check_irq_on(); 2199 check_irq_on();
2193 for_each_online_node(node) { 2200 for_each_online_node(node) {
2194 l3 = cachep->nodelists[node]; 2201 l3 = cachep->nodelists[node];
2195 if (l3) { 2202 if (l3 && l3->alien)
2203 drain_alien_cache(cachep, l3->alien);
2204 }
2205
2206 for_each_online_node(node) {
2207 l3 = cachep->nodelists[node];
2208 if (l3)
2196 drain_array(cachep, l3, l3->shared, 1, node); 2209 drain_array(cachep, l3, l3->shared, 1, node);
2197 if (l3->alien)
2198 drain_alien_cache(cachep, l3->alien);
2199 }
2200 } 2210 }
2201} 2211}
2202 2212