diff options
-rw-r--r-- | include/linux/slub_def.h | 2 | ||||
-rw-r--r-- | mm/slub.c | 39 |
2 files changed, 30 insertions, 11 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 4236b5dee812..71e43a12ebbb 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -29,6 +29,7 @@ enum stat_item { | |||
29 | DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ | 29 | DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ |
30 | DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ | 30 | DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ |
31 | DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ | 31 | DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ |
32 | ORDER_FALLBACK, /* Number of times fallback was necessary */ | ||
32 | NR_SLUB_STAT_ITEMS }; | 33 | NR_SLUB_STAT_ITEMS }; |
33 | 34 | ||
34 | struct kmem_cache_cpu { | 35 | struct kmem_cache_cpu { |
@@ -81,6 +82,7 @@ struct kmem_cache { | |||
81 | 82 | ||
82 | /* Allocation and freeing of slabs */ | 83 | /* Allocation and freeing of slabs */ |
83 | struct kmem_cache_order_objects max; | 84 | struct kmem_cache_order_objects max; |
85 | struct kmem_cache_order_objects min; | ||
84 | gfp_t allocflags; /* gfp flags to use on each alloc */ | 86 | gfp_t allocflags; /* gfp flags to use on each alloc */ |
85 | int refcount; /* Refcount for slab cache destroy */ | 87 | int refcount; /* Refcount for slab cache destroy */ |
86 | void (*ctor)(struct kmem_cache *, void *); | 88 | void (*ctor)(struct kmem_cache *, void *); |
@@ -1113,28 +1113,43 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, | |||
1113 | /* | 1113 | /* |
1114 | * Slab allocation and freeing | 1114 | * Slab allocation and freeing |
1115 | */ | 1115 | */ |
1116 | static inline struct page *alloc_slab_page(gfp_t flags, int node, | ||
1117 | struct kmem_cache_order_objects oo) | ||
1118 | { | ||
1119 | int order = oo_order(oo); | ||
1120 | |||
1121 | if (node == -1) | ||
1122 | return alloc_pages(flags, order); | ||
1123 | else | ||
1124 | return alloc_pages_node(node, flags, order); | ||
1125 | } | ||
1126 | |||
1116 | static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | 1127 | static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) |
1117 | { | 1128 | { |
1118 | struct page *page; | 1129 | struct page *page; |
1119 | struct kmem_cache_order_objects oo = s->oo; | 1130 | struct kmem_cache_order_objects oo = s->oo; |
1120 | int order = oo_order(oo); | ||
1121 | int pages = 1 << order; | ||
1122 | 1131 | ||
1123 | flags |= s->allocflags; | 1132 | flags |= s->allocflags; |
1124 | 1133 | ||
1125 | if (node == -1) | 1134 | page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node, |
1126 | page = alloc_pages(flags, order); | 1135 | oo); |
1127 | else | 1136 | if (unlikely(!page)) { |
1128 | page = alloc_pages_node(node, flags, order); | 1137 | oo = s->min; |
1129 | 1138 | /* | |
1130 | if (!page) | 1139 | * Allocation may have failed due to fragmentation. |
1131 | return NULL; | 1140 | * Try a lower order alloc if possible |
1141 | */ | ||
1142 | page = alloc_slab_page(flags, node, oo); | ||
1143 | if (!page) | ||
1144 | return NULL; | ||
1132 | 1145 | ||
1146 | stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); | ||
1147 | } | ||
1133 | page->objects = oo_objects(oo); | 1148 | page->objects = oo_objects(oo); |
1134 | mod_zone_page_state(page_zone(page), | 1149 | mod_zone_page_state(page_zone(page), |
1135 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? | 1150 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
1136 | NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, | 1151 | NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, |
1137 | pages); | 1152 | 1 << oo_order(oo)); |
1138 | 1153 | ||
1139 | return page; | 1154 | return page; |
1140 | } | 1155 | } |
@@ -2347,6 +2362,7 @@ static int calculate_sizes(struct kmem_cache *s) | |||
2347 | * Determine the number of objects per slab | 2362 | * Determine the number of objects per slab |
2348 | */ | 2363 | */ |
2349 | s->oo = oo_make(order, size); | 2364 | s->oo = oo_make(order, size); |
2365 | s->min = oo_make(get_order(size), size); | ||
2350 | if (oo_objects(s->oo) > oo_objects(s->max)) | 2366 | if (oo_objects(s->oo) > oo_objects(s->max)) |
2351 | s->max = s->oo; | 2367 | s->max = s->oo; |
2352 | 2368 | ||
@@ -4163,7 +4179,7 @@ STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); | |||
4163 | STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); | 4179 | STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); |
4164 | STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); | 4180 | STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); |
4165 | STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); | 4181 | STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); |
4166 | 4182 | STAT_ATTR(ORDER_FALLBACK, order_fallback); | |
4167 | #endif | 4183 | #endif |
4168 | 4184 | ||
4169 | static struct attribute *slab_attrs[] = { | 4185 | static struct attribute *slab_attrs[] = { |
@@ -4216,6 +4232,7 @@ static struct attribute *slab_attrs[] = { | |||
4216 | &deactivate_to_head_attr.attr, | 4232 | &deactivate_to_head_attr.attr, |
4217 | &deactivate_to_tail_attr.attr, | 4233 | &deactivate_to_tail_attr.attr, |
4218 | &deactivate_remote_frees_attr.attr, | 4234 | &deactivate_remote_frees_attr.attr, |
4235 | &order_fallback_attr.attr, | ||
4219 | #endif | 4236 | #endif |
4220 | NULL | 4237 | NULL |
4221 | }; | 4238 | }; |