aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-04-14 12:11:40 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2008-04-27 11:28:18 -0400
commit65c3376aaca96c66aa76014aaf430398964b68cb (patch)
tree9cd8d64a2cf211e76afbad0de5ec1484af6bfafb /mm/slub.c
parent205ab99dd103e3dd5b0964dad8a16dfe2db69b2e (diff)
slub: Fallback to minimal order during slab page allocation
If any higher order allocation fails then fall back the smallest order necessary to contain at least one object. This enables fallback for all allocations to order 0 pages. The fallback will waste more memory (objects will not fit neatly) and the fallback slabs will be not as efficient as larger slabs since they contain less objects. Note that SLAB also depends on order 1 allocations for some slabs that waste too much memory if forced into PAGE_SIZE'd page. SLUB now can now deal with failing order 1 allocs which SLAB cannot do. Add a new field min that will contain the objects for the smallest possible order for a slab cache. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c39
1 files changed, 28 insertions, 11 deletions
diff --git a/mm/slub.c b/mm/slub.c
index c8514e93ffdf..35c22d940ba7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1113,28 +1113,43 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
1113/* 1113/*
1114 * Slab allocation and freeing 1114 * Slab allocation and freeing
1115 */ 1115 */
1116static inline struct page *alloc_slab_page(gfp_t flags, int node,
1117 struct kmem_cache_order_objects oo)
1118{
1119 int order = oo_order(oo);
1120
1121 if (node == -1)
1122 return alloc_pages(flags, order);
1123 else
1124 return alloc_pages_node(node, flags, order);
1125}
1126
1116static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1127static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1117{ 1128{
1118 struct page *page; 1129 struct page *page;
1119 struct kmem_cache_order_objects oo = s->oo; 1130 struct kmem_cache_order_objects oo = s->oo;
1120 int order = oo_order(oo);
1121 int pages = 1 << order;
1122 1131
1123 flags |= s->allocflags; 1132 flags |= s->allocflags;
1124 1133
1125 if (node == -1) 1134 page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
1126 page = alloc_pages(flags, order); 1135 oo);
1127 else 1136 if (unlikely(!page)) {
1128 page = alloc_pages_node(node, flags, order); 1137 oo = s->min;
1129 1138 /*
1130 if (!page) 1139 * Allocation may have failed due to fragmentation.
1131 return NULL; 1140 * Try a lower order alloc if possible
1141 */
1142 page = alloc_slab_page(flags, node, oo);
1143 if (!page)
1144 return NULL;
1132 1145
1146 stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
1147 }
1133 page->objects = oo_objects(oo); 1148 page->objects = oo_objects(oo);
1134 mod_zone_page_state(page_zone(page), 1149 mod_zone_page_state(page_zone(page),
1135 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1150 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1136 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1151 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1137 pages); 1152 1 << oo_order(oo));
1138 1153
1139 return page; 1154 return page;
1140} 1155}
@@ -2347,6 +2362,7 @@ static int calculate_sizes(struct kmem_cache *s)
2347 * Determine the number of objects per slab 2362 * Determine the number of objects per slab
2348 */ 2363 */
2349 s->oo = oo_make(order, size); 2364 s->oo = oo_make(order, size);
2365 s->min = oo_make(get_order(size), size);
2350 if (oo_objects(s->oo) > oo_objects(s->max)) 2366 if (oo_objects(s->oo) > oo_objects(s->max))
2351 s->max = s->oo; 2367 s->max = s->oo;
2352 2368
@@ -4163,7 +4179,7 @@ STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4163STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 4179STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4164STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 4180STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4165STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 4181STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
4166 4182STAT_ATTR(ORDER_FALLBACK, order_fallback);
4167#endif 4183#endif
4168 4184
4169static struct attribute *slab_attrs[] = { 4185static struct attribute *slab_attrs[] = {
@@ -4216,6 +4232,7 @@ static struct attribute *slab_attrs[] = {
4216 &deactivate_to_head_attr.attr, 4232 &deactivate_to_head_attr.attr,
4217 &deactivate_to_tail_attr.attr, 4233 &deactivate_to_tail_attr.attr,
4218 &deactivate_remote_frees_attr.attr, 4234 &deactivate_remote_frees_attr.attr,
4235 &order_fallback_attr.attr,
4219#endif 4236#endif
4220 NULL 4237 NULL
4221}; 4238};