diff options
author | Christoph Lameter <cl@linux.com> | 2011-06-01 13:25:44 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2011-07-02 06:26:52 -0400 |
commit | 7e0528dadc9f8b04e4de0dba48a075100c2afe75 (patch) | |
tree | 708bd8384d867d276d36faa6bfc33fb41fe051f3 /mm/slub.c | |
parent | e4a46182e1bcc2ddacff5a35f6b52398b51f1b11 (diff) |
slub: Push irq disable into allocate_slab()
Do the irq handling in allocate_slab() instead of __slab_alloc().
__slab_alloc() is already cluttered and allocate_slab() is already
fiddling around with gfp flags.
v6->v7:
Only increment ORDER_FALLBACK if we get a page during fallback
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 23 |
1 files changed, 13 insertions, 10 deletions
@@ -1187,6 +1187,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1187 | struct kmem_cache_order_objects oo = s->oo; | 1187 | struct kmem_cache_order_objects oo = s->oo; |
1188 | gfp_t alloc_gfp; | 1188 | gfp_t alloc_gfp; |
1189 | 1189 | ||
1190 | flags &= gfp_allowed_mask; | ||
1191 | |||
1192 | if (flags & __GFP_WAIT) | ||
1193 | local_irq_enable(); | ||
1194 | |||
1190 | flags |= s->allocflags; | 1195 | flags |= s->allocflags; |
1191 | 1196 | ||
1192 | /* | 1197 | /* |
@@ -1203,12 +1208,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1203 | * Try a lower order alloc if possible | 1208 | * Try a lower order alloc if possible |
1204 | */ | 1209 | */ |
1205 | page = alloc_slab_page(flags, node, oo); | 1210 | page = alloc_slab_page(flags, node, oo); |
1206 | if (!page) | ||
1207 | return NULL; | ||
1208 | 1211 | ||
1209 | stat(s, ORDER_FALLBACK); | 1212 | if (page) |
1213 | stat(s, ORDER_FALLBACK); | ||
1210 | } | 1214 | } |
1211 | 1215 | ||
1216 | if (flags & __GFP_WAIT) | ||
1217 | local_irq_disable(); | ||
1218 | |||
1219 | if (!page) | ||
1220 | return NULL; | ||
1221 | |||
1212 | if (kmemcheck_enabled | 1222 | if (kmemcheck_enabled |
1213 | && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { | 1223 | && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { |
1214 | int pages = 1 << oo_order(oo); | 1224 | int pages = 1 << oo_order(oo); |
@@ -1849,15 +1859,8 @@ new_slab: | |||
1849 | goto load_freelist; | 1859 | goto load_freelist; |
1850 | } | 1860 | } |
1851 | 1861 | ||
1852 | gfpflags &= gfp_allowed_mask; | ||
1853 | if (gfpflags & __GFP_WAIT) | ||
1854 | local_irq_enable(); | ||
1855 | |||
1856 | page = new_slab(s, gfpflags, node); | 1862 | page = new_slab(s, gfpflags, node); |
1857 | 1863 | ||
1858 | if (gfpflags & __GFP_WAIT) | ||
1859 | local_irq_disable(); | ||
1860 | |||
1861 | if (page) { | 1864 | if (page) { |
1862 | c = __this_cpu_ptr(s->cpu_slab); | 1865 | c = __this_cpu_ptr(s->cpu_slab); |
1863 | stat(s, ALLOC_SLAB); | 1866 | stat(s, ALLOC_SLAB); |