diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 45 |
1 files changed, 16 insertions, 29 deletions
@@ -1621,10 +1621,16 @@ __initcall(cpucache_init); | |||
1621 | static noinline void | 1621 | static noinline void |
1622 | slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | 1622 | slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) |
1623 | { | 1623 | { |
1624 | #if DEBUG | ||
1624 | struct kmem_cache_node *n; | 1625 | struct kmem_cache_node *n; |
1625 | struct page *page; | 1626 | struct page *page; |
1626 | unsigned long flags; | 1627 | unsigned long flags; |
1627 | int node; | 1628 | int node; |
1629 | static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL, | ||
1630 | DEFAULT_RATELIMIT_BURST); | ||
1631 | |||
1632 | if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs)) | ||
1633 | return; | ||
1628 | 1634 | ||
1629 | printk(KERN_WARNING | 1635 | printk(KERN_WARNING |
1630 | "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n", | 1636 | "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n", |
@@ -1662,6 +1668,7 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | |||
1662 | node, active_slabs, num_slabs, active_objs, num_objs, | 1668 | node, active_slabs, num_slabs, active_objs, num_objs, |
1663 | free_objects); | 1669 | free_objects); |
1664 | } | 1670 | } |
1671 | #endif | ||
1665 | } | 1672 | } |
1666 | 1673 | ||
1667 | /* | 1674 | /* |
@@ -1681,10 +1688,13 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, | |||
1681 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1688 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1682 | flags |= __GFP_RECLAIMABLE; | 1689 | flags |= __GFP_RECLAIMABLE; |
1683 | 1690 | ||
1691 | if (memcg_charge_slab(cachep, flags, cachep->gfporder)) | ||
1692 | return NULL; | ||
1693 | |||
1684 | page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); | 1694 | page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); |
1685 | if (!page) { | 1695 | if (!page) { |
1686 | if (!(flags & __GFP_NOWARN) && printk_ratelimit()) | 1696 | memcg_uncharge_slab(cachep, cachep->gfporder); |
1687 | slab_out_of_memory(cachep, flags, nodeid); | 1697 | slab_out_of_memory(cachep, flags, nodeid); |
1688 | return NULL; | 1698 | return NULL; |
1689 | } | 1699 | } |
1690 | 1700 | ||
@@ -1702,7 +1712,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, | |||
1702 | __SetPageSlab(page); | 1712 | __SetPageSlab(page); |
1703 | if (page->pfmemalloc) | 1713 | if (page->pfmemalloc) |
1704 | SetPageSlabPfmemalloc(page); | 1714 | SetPageSlabPfmemalloc(page); |
1705 | memcg_bind_pages(cachep, cachep->gfporder); | ||
1706 | 1715 | ||
1707 | if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { | 1716 | if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { |
1708 | kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); | 1717 | kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); |
@@ -1738,10 +1747,10 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page) | |||
1738 | page_mapcount_reset(page); | 1747 | page_mapcount_reset(page); |
1739 | page->mapping = NULL; | 1748 | page->mapping = NULL; |
1740 | 1749 | ||
1741 | memcg_release_pages(cachep, cachep->gfporder); | ||
1742 | if (current->reclaim_state) | 1750 | if (current->reclaim_state) |
1743 | current->reclaim_state->reclaimed_slab += nr_freed; | 1751 | current->reclaim_state->reclaimed_slab += nr_freed; |
1744 | __free_memcg_kmem_pages(page, cachep->gfporder); | 1752 | __free_pages(page, cachep->gfporder); |
1753 | memcg_uncharge_slab(cachep, cachep->gfporder); | ||
1745 | } | 1754 | } |
1746 | 1755 | ||
1747 | static void kmem_rcu_free(struct rcu_head *head) | 1756 | static void kmem_rcu_free(struct rcu_head *head) |
@@ -2469,8 +2478,7 @@ out: | |||
2469 | return nr_freed; | 2478 | return nr_freed; |
2470 | } | 2479 | } |
2471 | 2480 | ||
2472 | /* Called with slab_mutex held to protect against cpu hotplug */ | 2481 | int __kmem_cache_shrink(struct kmem_cache *cachep) |
2473 | static int __cache_shrink(struct kmem_cache *cachep) | ||
2474 | { | 2482 | { |
2475 | int ret = 0, i = 0; | 2483 | int ret = 0, i = 0; |
2476 | struct kmem_cache_node *n; | 2484 | struct kmem_cache_node *n; |
@@ -2491,32 +2499,11 @@ static int __cache_shrink(struct kmem_cache *cachep) | |||
2491 | return (ret ? 1 : 0); | 2499 | return (ret ? 1 : 0); |
2492 | } | 2500 | } |
2493 | 2501 | ||
2494 | /** | ||
2495 | * kmem_cache_shrink - Shrink a cache. | ||
2496 | * @cachep: The cache to shrink. | ||
2497 | * | ||
2498 | * Releases as many slabs as possible for a cache. | ||
2499 | * To help debugging, a zero exit status indicates all slabs were released. | ||
2500 | */ | ||
2501 | int kmem_cache_shrink(struct kmem_cache *cachep) | ||
2502 | { | ||
2503 | int ret; | ||
2504 | BUG_ON(!cachep || in_interrupt()); | ||
2505 | |||
2506 | get_online_cpus(); | ||
2507 | mutex_lock(&slab_mutex); | ||
2508 | ret = __cache_shrink(cachep); | ||
2509 | mutex_unlock(&slab_mutex); | ||
2510 | put_online_cpus(); | ||
2511 | return ret; | ||
2512 | } | ||
2513 | EXPORT_SYMBOL(kmem_cache_shrink); | ||
2514 | |||
2515 | int __kmem_cache_shutdown(struct kmem_cache *cachep) | 2502 | int __kmem_cache_shutdown(struct kmem_cache *cachep) |
2516 | { | 2503 | { |
2517 | int i; | 2504 | int i; |
2518 | struct kmem_cache_node *n; | 2505 | struct kmem_cache_node *n; |
2519 | int rc = __cache_shrink(cachep); | 2506 | int rc = __kmem_cache_shrink(cachep); |
2520 | 2507 | ||
2521 | if (rc) | 2508 | if (rc) |
2522 | return rc; | 2509 | return rc; |