diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 24 |
1 files changed, 17 insertions, 7 deletions
@@ -1312,17 +1312,26 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) | |||
1312 | /* | 1312 | /* |
1313 | * Slab allocation and freeing | 1313 | * Slab allocation and freeing |
1314 | */ | 1314 | */ |
1315 | static inline struct page *alloc_slab_page(gfp_t flags, int node, | 1315 | static inline struct page *alloc_slab_page(struct kmem_cache *s, |
1316 | struct kmem_cache_order_objects oo) | 1316 | gfp_t flags, int node, struct kmem_cache_order_objects oo) |
1317 | { | 1317 | { |
1318 | struct page *page; | ||
1318 | int order = oo_order(oo); | 1319 | int order = oo_order(oo); |
1319 | 1320 | ||
1320 | flags |= __GFP_NOTRACK; | 1321 | flags |= __GFP_NOTRACK; |
1321 | 1322 | ||
1323 | if (memcg_charge_slab(s, flags, order)) | ||
1324 | return NULL; | ||
1325 | |||
1322 | if (node == NUMA_NO_NODE) | 1326 | if (node == NUMA_NO_NODE) |
1323 | return alloc_pages(flags, order); | 1327 | page = alloc_pages(flags, order); |
1324 | else | 1328 | else |
1325 | return alloc_pages_exact_node(node, flags, order); | 1329 | page = alloc_pages_exact_node(node, flags, order); |
1330 | |||
1331 | if (!page) | ||
1332 | memcg_uncharge_slab(s, order); | ||
1333 | |||
1334 | return page; | ||
1326 | } | 1335 | } |
1327 | 1336 | ||
1328 | static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | 1337 | static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) |
@@ -1344,7 +1353,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1344 | */ | 1353 | */ |
1345 | alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; | 1354 | alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; |
1346 | 1355 | ||
1347 | page = alloc_slab_page(alloc_gfp, node, oo); | 1356 | page = alloc_slab_page(s, alloc_gfp, node, oo); |
1348 | if (unlikely(!page)) { | 1357 | if (unlikely(!page)) { |
1349 | oo = s->min; | 1358 | oo = s->min; |
1350 | alloc_gfp = flags; | 1359 | alloc_gfp = flags; |
@@ -1352,7 +1361,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1352 | * Allocation may have failed due to fragmentation. | 1361 | * Allocation may have failed due to fragmentation. |
1353 | * Try a lower order alloc if possible | 1362 | * Try a lower order alloc if possible |
1354 | */ | 1363 | */ |
1355 | page = alloc_slab_page(alloc_gfp, node, oo); | 1364 | page = alloc_slab_page(s, alloc_gfp, node, oo); |
1356 | 1365 | ||
1357 | if (page) | 1366 | if (page) |
1358 | stat(s, ORDER_FALLBACK); | 1367 | stat(s, ORDER_FALLBACK); |
@@ -1468,7 +1477,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1468 | page_mapcount_reset(page); | 1477 | page_mapcount_reset(page); |
1469 | if (current->reclaim_state) | 1478 | if (current->reclaim_state) |
1470 | current->reclaim_state->reclaimed_slab += pages; | 1479 | current->reclaim_state->reclaimed_slab += pages; |
1471 | __free_memcg_kmem_pages(page, order); | 1480 | __free_pages(page, order); |
1481 | memcg_uncharge_slab(s, order); | ||
1472 | } | 1482 | } |
1473 | 1483 | ||
1474 | #define need_reserve_slab_rcu \ | 1484 | #define need_reserve_slab_rcu \ |