aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2013-10-23 21:07:44 -0400
committerPekka Enberg <penberg@iki.fi>2013-10-24 13:17:33 -0400
commita57a49887eb3398d31bfaa8009531f7121d6537d (patch)
tree3716d744ffd5483ad22151a60072ccba5743a7b4 /mm/slab.c
parent56f295ef0dfa7e1d0be18deebe0c15fb6b2d9d5b (diff)
slab: use __GFP_COMP flag for allocating slab pages
If we use 'struct page' of first page as 'struct slab', there is no advantage not to use __GFP_COMP. So use __GFP_COMP flag for all the cases. Acked-by: Andi Kleen <ak@linux.intel.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Pekka Enberg <penberg@iki.fi>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c43
1 files changed, 9 insertions, 34 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f9e676edeb0f..75c60821e382 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1718,15 +1718,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1718{ 1718{
1719 struct page *page; 1719 struct page *page;
1720 int nr_pages; 1720 int nr_pages;
1721 int i;
1722
1723#ifndef CONFIG_MMU
1724 /*
1725 * Nommu uses slab's for process anonymous memory allocations, and thus
1726 * requires __GFP_COMP to properly refcount higher order allocations
1727 */
1728 flags |= __GFP_COMP;
1729#endif
1730 1721
1731 flags |= cachep->allocflags; 1722 flags |= cachep->allocflags;
1732 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1723 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
@@ -1750,12 +1741,9 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1750 else 1741 else
1751 add_zone_page_state(page_zone(page), 1742 add_zone_page_state(page_zone(page),
1752 NR_SLAB_UNRECLAIMABLE, nr_pages); 1743 NR_SLAB_UNRECLAIMABLE, nr_pages);
1753 for (i = 0; i < nr_pages; i++) { 1744 __SetPageSlab(page);
1754 __SetPageSlab(page + i); 1745 if (page->pfmemalloc)
1755 1746 SetPageSlabPfmemalloc(page);
1756 if (page->pfmemalloc)
1757 SetPageSlabPfmemalloc(page);
1758 }
1759 memcg_bind_pages(cachep, cachep->gfporder); 1747 memcg_bind_pages(cachep, cachep->gfporder);
1760 1748
1761 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { 1749 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
@@ -1775,8 +1763,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1775 */ 1763 */
1776static void kmem_freepages(struct kmem_cache *cachep, struct page *page) 1764static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1777{ 1765{
1778 unsigned long i = (1 << cachep->gfporder); 1766 const unsigned long nr_freed = (1 << cachep->gfporder);
1779 const unsigned long nr_freed = i;
1780 1767
1781 kmemcheck_free_shadow(page, cachep->gfporder); 1768 kmemcheck_free_shadow(page, cachep->gfporder);
1782 1769
@@ -1787,12 +1774,9 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1787 sub_zone_page_state(page_zone(page), 1774 sub_zone_page_state(page_zone(page),
1788 NR_SLAB_UNRECLAIMABLE, nr_freed); 1775 NR_SLAB_UNRECLAIMABLE, nr_freed);
1789 1776
1777 BUG_ON(!PageSlab(page));
1790 __ClearPageSlabPfmemalloc(page); 1778 __ClearPageSlabPfmemalloc(page);
1791 while (i--) { 1779 __ClearPageSlab(page);
1792 BUG_ON(!PageSlab(page));
1793 __ClearPageSlab(page);
1794 page++;
1795 }
1796 1780
1797 memcg_release_pages(cachep, cachep->gfporder); 1781 memcg_release_pages(cachep, cachep->gfporder);
1798 if (current->reclaim_state) 1782 if (current->reclaim_state)
@@ -2362,7 +2346,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2362 cachep->colour = left_over / cachep->colour_off; 2346 cachep->colour = left_over / cachep->colour_off;
2363 cachep->slab_size = slab_size; 2347 cachep->slab_size = slab_size;
2364 cachep->flags = flags; 2348 cachep->flags = flags;
2365 cachep->allocflags = 0; 2349 cachep->allocflags = __GFP_COMP;
2366 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) 2350 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2367 cachep->allocflags |= GFP_DMA; 2351 cachep->allocflags |= GFP_DMA;
2368 cachep->size = size; 2352 cachep->size = size;
@@ -2729,17 +2713,8 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2729static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, 2713static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2730 struct page *page) 2714 struct page *page)
2731{ 2715{
2732 int nr_pages; 2716 page->slab_cache = cache;
2733 2717 page->slab_page = slab;
2734 nr_pages = 1;
2735 if (likely(!PageCompound(page)))
2736 nr_pages <<= cache->gfporder;
2737
2738 do {
2739 page->slab_cache = cache;
2740 page->slab_page = slab;
2741 page++;
2742 } while (--nr_pages);
2743} 2718}
2744 2719
2745/* 2720/*