aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c51
1 files changed, 31 insertions, 20 deletions
diff --git a/mm/slab.c b/mm/slab.c
index e6ef9bd52335..f1b644eb39d8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -207,11 +207,6 @@ typedef unsigned int kmem_bufctl_t;
207#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) 207#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
208#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 208#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
209 209
210/* Max number of objs-per-slab for caches which use off-slab slabs.
211 * Needed to avoid a possible looping condition in cache_grow().
212 */
213static unsigned long offslab_limit;
214
215/* 210/*
216 * struct slab 211 * struct slab
217 * 212 *
@@ -700,6 +695,14 @@ static enum {
700 FULL 695 FULL
701} g_cpucache_up; 696} g_cpucache_up;
702 697
698/*
699 * used by boot code to determine if it can use slab based allocator
700 */
701int slab_is_available(void)
702{
703 return g_cpucache_up == FULL;
704}
705
703static DEFINE_PER_CPU(struct work_struct, reap_work); 706static DEFINE_PER_CPU(struct work_struct, reap_work);
704 707
705static void free_block(struct kmem_cache *cachep, void **objpp, int len, 708static void free_block(struct kmem_cache *cachep, void **objpp, int len,
@@ -979,7 +982,8 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
979 * That way we could avoid the overhead of putting the objects 982 * That way we could avoid the overhead of putting the objects
980 * into the free lists and getting them back later. 983 * into the free lists and getting them back later.
981 */ 984 */
982 transfer_objects(rl3->shared, ac, ac->limit); 985 if (rl3->shared)
986 transfer_objects(rl3->shared, ac, ac->limit);
983 987
984 free_block(cachep, ac->entry, ac->avail, node); 988 free_block(cachep, ac->entry, ac->avail, node);
985 ac->avail = 0; 989 ac->avail = 0;
@@ -1036,7 +1040,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
1036 1040
1037#endif 1041#endif
1038 1042
1039static int __devinit cpuup_callback(struct notifier_block *nfb, 1043static int cpuup_callback(struct notifier_block *nfb,
1040 unsigned long action, void *hcpu) 1044 unsigned long action, void *hcpu)
1041{ 1045{
1042 long cpu = (long)hcpu; 1046 long cpu = (long)hcpu;
@@ -1347,12 +1351,6 @@ void __init kmem_cache_init(void)
1347 NULL, NULL); 1351 NULL, NULL);
1348 } 1352 }
1349 1353
1350 /* Inc off-slab bufctl limit until the ceiling is hit. */
1351 if (!(OFF_SLAB(sizes->cs_cachep))) {
1352 offslab_limit = sizes->cs_size - sizeof(struct slab);
1353 offslab_limit /= sizeof(kmem_bufctl_t);
1354 }
1355
1356 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1354 sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
1357 sizes->cs_size, 1355 sizes->cs_size,
1358 ARCH_KMALLOC_MINALIGN, 1356 ARCH_KMALLOC_MINALIGN,
@@ -1771,6 +1769,7 @@ static void set_up_list3s(struct kmem_cache *cachep, int index)
1771static size_t calculate_slab_order(struct kmem_cache *cachep, 1769static size_t calculate_slab_order(struct kmem_cache *cachep,
1772 size_t size, size_t align, unsigned long flags) 1770 size_t size, size_t align, unsigned long flags)
1773{ 1771{
1772 unsigned long offslab_limit;
1774 size_t left_over = 0; 1773 size_t left_over = 0;
1775 int gfporder; 1774 int gfporder;
1776 1775
@@ -1782,9 +1781,18 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
1782 if (!num) 1781 if (!num)
1783 continue; 1782 continue;
1784 1783
1785 /* More than offslab_limit objects will cause problems */ 1784 if (flags & CFLGS_OFF_SLAB) {
1786 if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit) 1785 /*
1787 break; 1786 * Max number of objs-per-slab for caches which
1787 * use off-slab slabs. Needed to avoid a possible
1788 * looping condition in cache_grow().
1789 */
1790 offslab_limit = size - sizeof(struct slab);
1791 offslab_limit /= sizeof(kmem_bufctl_t);
1792
1793 if (num > offslab_limit)
1794 break;
1795 }
1788 1796
1789 /* Found something acceptable - save it away */ 1797 /* Found something acceptable - save it away */
1790 cachep->num = num; 1798 cachep->num = num;
@@ -2191,11 +2199,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2191 check_irq_on(); 2199 check_irq_on();
2192 for_each_online_node(node) { 2200 for_each_online_node(node) {
2193 l3 = cachep->nodelists[node]; 2201 l3 = cachep->nodelists[node];
2194 if (l3) { 2202 if (l3 && l3->alien)
2203 drain_alien_cache(cachep, l3->alien);
2204 }
2205
2206 for_each_online_node(node) {
2207 l3 = cachep->nodelists[node];
2208 if (l3)
2195 drain_array(cachep, l3, l3->shared, 1, node); 2209 drain_array(cachep, l3, l3->shared, 1, node);
2196 if (l3->alien)
2197 drain_alien_cache(cachep, l3->alien);
2198 }
2199 } 2210 }
2200} 2211}
2201 2212