aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2013-10-23 21:07:38 -0400
committerPekka Enberg <penberg@iki.fi>2013-10-24 13:17:23 -0400
commit0c3aa83e00a9cd93f08e7aa42fba01924aa5f2fc (patch)
tree14390a0785ec404ee157fc120bf6aaba1d1cb284 /mm
parent73293c2f900d0adbb6a415b312cd57976d5ae242 (diff)
slab: change return type of kmem_getpages() to struct page
It is more understandable that kmem_getpages() return struct page. And, with this, we can reduce one translation from virt addr to page and makes better code than before. Below is a change of this patch. * Before text data bss dec hex filename 22123 23434 4 45561 b1f9 mm/slab.o * After text data bss dec hex filename 22074 23434 4 45512 b1c8 mm/slab.o And this help following patch to remove struct slab's colouroff. Acked-by: Andi Kleen <ak@linux.intel.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Pekka Enberg <penberg@iki.fi>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c60
1 files changed, 30 insertions, 30 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 0b4ddafd8a03..7d79bd766002 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -205,7 +205,7 @@ typedef unsigned int kmem_bufctl_t;
205struct slab_rcu { 205struct slab_rcu {
206 struct rcu_head head; 206 struct rcu_head head;
207 struct kmem_cache *cachep; 207 struct kmem_cache *cachep;
208 void *addr; 208 struct page *page;
209}; 209};
210 210
211/* 211/*
@@ -1737,7 +1737,8 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1737 * did not request dmaable memory, we might get it, but that 1737 * did not request dmaable memory, we might get it, but that
1738 * would be relatively rare and ignorable. 1738 * would be relatively rare and ignorable.
1739 */ 1739 */
1740static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) 1740static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1741 int nodeid)
1741{ 1742{
1742 struct page *page; 1743 struct page *page;
1743 int nr_pages; 1744 int nr_pages;
@@ -1790,16 +1791,15 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1790 kmemcheck_mark_unallocated_pages(page, nr_pages); 1791 kmemcheck_mark_unallocated_pages(page, nr_pages);
1791 } 1792 }
1792 1793
1793 return page_address(page); 1794 return page;
1794} 1795}
1795 1796
1796/* 1797/*
1797 * Interface to system's page release. 1798 * Interface to system's page release.
1798 */ 1799 */
1799static void kmem_freepages(struct kmem_cache *cachep, void *addr) 1800static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1800{ 1801{
1801 unsigned long i = (1 << cachep->gfporder); 1802 unsigned long i = (1 << cachep->gfporder);
1802 struct page *page = virt_to_page(addr);
1803 const unsigned long nr_freed = i; 1803 const unsigned long nr_freed = i;
1804 1804
1805 kmemcheck_free_shadow(page, cachep->gfporder); 1805 kmemcheck_free_shadow(page, cachep->gfporder);
@@ -1821,7 +1821,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1821 memcg_release_pages(cachep, cachep->gfporder); 1821 memcg_release_pages(cachep, cachep->gfporder);
1822 if (current->reclaim_state) 1822 if (current->reclaim_state)
1823 current->reclaim_state->reclaimed_slab += nr_freed; 1823 current->reclaim_state->reclaimed_slab += nr_freed;
1824 free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); 1824 __free_memcg_kmem_pages(page, cachep->gfporder);
1825} 1825}
1826 1826
1827static void kmem_rcu_free(struct rcu_head *head) 1827static void kmem_rcu_free(struct rcu_head *head)
@@ -1829,7 +1829,7 @@ static void kmem_rcu_free(struct rcu_head *head)
1829 struct slab_rcu *slab_rcu = (struct slab_rcu *)head; 1829 struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
1830 struct kmem_cache *cachep = slab_rcu->cachep; 1830 struct kmem_cache *cachep = slab_rcu->cachep;
1831 1831
1832 kmem_freepages(cachep, slab_rcu->addr); 1832 kmem_freepages(cachep, slab_rcu->page);
1833 if (OFF_SLAB(cachep)) 1833 if (OFF_SLAB(cachep))
1834 kmem_cache_free(cachep->slabp_cache, slab_rcu); 1834 kmem_cache_free(cachep->slabp_cache, slab_rcu);
1835} 1835}
@@ -2048,7 +2048,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
2048 */ 2048 */
2049static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) 2049static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
2050{ 2050{
2051 void *addr = slabp->s_mem - slabp->colouroff; 2051 struct page *page = virt_to_head_page(slabp->s_mem);
2052 2052
2053 slab_destroy_debugcheck(cachep, slabp); 2053 slab_destroy_debugcheck(cachep, slabp);
2054 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 2054 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
@@ -2056,10 +2056,10 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
2056 2056
2057 slab_rcu = (struct slab_rcu *)slabp; 2057 slab_rcu = (struct slab_rcu *)slabp;
2058 slab_rcu->cachep = cachep; 2058 slab_rcu->cachep = cachep;
2059 slab_rcu->addr = addr; 2059 slab_rcu->page = page;
2060 call_rcu(&slab_rcu->head, kmem_rcu_free); 2060 call_rcu(&slab_rcu->head, kmem_rcu_free);
2061 } else { 2061 } else {
2062 kmem_freepages(cachep, addr); 2062 kmem_freepages(cachep, page);
2063 if (OFF_SLAB(cachep)) 2063 if (OFF_SLAB(cachep))
2064 kmem_cache_free(cachep->slabp_cache, slabp); 2064 kmem_cache_free(cachep->slabp_cache, slabp);
2065 } 2065 }
@@ -2604,11 +2604,12 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
2604 * kmem_find_general_cachep till the initialization is complete. 2604 * kmem_find_general_cachep till the initialization is complete.
2605 * Hence we cannot have slabp_cache same as the original cache. 2605 * Hence we cannot have slabp_cache same as the original cache.
2606 */ 2606 */
2607static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, 2607static struct slab *alloc_slabmgmt(struct kmem_cache *cachep,
2608 int colour_off, gfp_t local_flags, 2608 struct page *page, int colour_off,
2609 int nodeid) 2609 gfp_t local_flags, int nodeid)
2610{ 2610{
2611 struct slab *slabp; 2611 struct slab *slabp;
2612 void *addr = page_address(page);
2612 2613
2613 if (OFF_SLAB(cachep)) { 2614 if (OFF_SLAB(cachep)) {
2614 /* Slab management obj is off-slab. */ 2615 /* Slab management obj is off-slab. */
@@ -2625,12 +2626,12 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2625 if (!slabp) 2626 if (!slabp)
2626 return NULL; 2627 return NULL;
2627 } else { 2628 } else {
2628 slabp = objp + colour_off; 2629 slabp = addr + colour_off;
2629 colour_off += cachep->slab_size; 2630 colour_off += cachep->slab_size;
2630 } 2631 }
2631 slabp->inuse = 0; 2632 slabp->inuse = 0;
2632 slabp->colouroff = colour_off; 2633 slabp->colouroff = colour_off;
2633 slabp->s_mem = objp + colour_off; 2634 slabp->s_mem = addr + colour_off;
2634 slabp->nodeid = nodeid; 2635 slabp->nodeid = nodeid;
2635 slabp->free = 0; 2636 slabp->free = 0;
2636 return slabp; 2637 return slabp;
@@ -2741,12 +2742,9 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2741 * virtual address for kfree, ksize, and slab debugging. 2742 * virtual address for kfree, ksize, and slab debugging.
2742 */ 2743 */
2743static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, 2744static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2744 void *addr) 2745 struct page *page)
2745{ 2746{
2746 int nr_pages; 2747 int nr_pages;
2747 struct page *page;
2748
2749 page = virt_to_page(addr);
2750 2748
2751 nr_pages = 1; 2749 nr_pages = 1;
2752 if (likely(!PageCompound(page))) 2750 if (likely(!PageCompound(page)))
@@ -2764,7 +2762,7 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2764 * kmem_cache_alloc() when there are no active objs left in a cache. 2762 * kmem_cache_alloc() when there are no active objs left in a cache.
2765 */ 2763 */
2766static int cache_grow(struct kmem_cache *cachep, 2764static int cache_grow(struct kmem_cache *cachep,
2767 gfp_t flags, int nodeid, void *objp) 2765 gfp_t flags, int nodeid, struct page *page)
2768{ 2766{
2769 struct slab *slabp; 2767 struct slab *slabp;
2770 size_t offset; 2768 size_t offset;
@@ -2807,18 +2805,18 @@ static int cache_grow(struct kmem_cache *cachep,
2807 * Get mem for the objs. Attempt to allocate a physical page from 2805 * Get mem for the objs. Attempt to allocate a physical page from
2808 * 'nodeid'. 2806 * 'nodeid'.
2809 */ 2807 */
2810 if (!objp) 2808 if (!page)
2811 objp = kmem_getpages(cachep, local_flags, nodeid); 2809 page = kmem_getpages(cachep, local_flags, nodeid);
2812 if (!objp) 2810 if (!page)
2813 goto failed; 2811 goto failed;
2814 2812
2815 /* Get slab management. */ 2813 /* Get slab management. */
2816 slabp = alloc_slabmgmt(cachep, objp, offset, 2814 slabp = alloc_slabmgmt(cachep, page, offset,
2817 local_flags & ~GFP_CONSTRAINT_MASK, nodeid); 2815 local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2818 if (!slabp) 2816 if (!slabp)
2819 goto opps1; 2817 goto opps1;
2820 2818
2821 slab_map_pages(cachep, slabp, objp); 2819 slab_map_pages(cachep, slabp, page);
2822 2820
2823 cache_init_objs(cachep, slabp); 2821 cache_init_objs(cachep, slabp);
2824 2822
@@ -2834,7 +2832,7 @@ static int cache_grow(struct kmem_cache *cachep,
2834 spin_unlock(&n->list_lock); 2832 spin_unlock(&n->list_lock);
2835 return 1; 2833 return 1;
2836opps1: 2834opps1:
2837 kmem_freepages(cachep, objp); 2835 kmem_freepages(cachep, page);
2838failed: 2836failed:
2839 if (local_flags & __GFP_WAIT) 2837 if (local_flags & __GFP_WAIT)
2840 local_irq_disable(); 2838 local_irq_disable();
@@ -3250,18 +3248,20 @@ retry:
3250 * We may trigger various forms of reclaim on the allowed 3248 * We may trigger various forms of reclaim on the allowed
3251 * set and go into memory reserves if necessary. 3249 * set and go into memory reserves if necessary.
3252 */ 3250 */
3251 struct page *page;
3252
3253 if (local_flags & __GFP_WAIT) 3253 if (local_flags & __GFP_WAIT)
3254 local_irq_enable(); 3254 local_irq_enable();
3255 kmem_flagcheck(cache, flags); 3255 kmem_flagcheck(cache, flags);
3256 obj = kmem_getpages(cache, local_flags, numa_mem_id()); 3256 page = kmem_getpages(cache, local_flags, numa_mem_id());
3257 if (local_flags & __GFP_WAIT) 3257 if (local_flags & __GFP_WAIT)
3258 local_irq_disable(); 3258 local_irq_disable();
3259 if (obj) { 3259 if (page) {
3260 /* 3260 /*
3261 * Insert into the appropriate per node queues 3261 * Insert into the appropriate per node queues
3262 */ 3262 */
3263 nid = page_to_nid(virt_to_page(obj)); 3263 nid = page_to_nid(page);
3264 if (cache_grow(cache, flags, nid, obj)) { 3264 if (cache_grow(cache, flags, nid, page)) {
3265 obj = ____cache_alloc_node(cache, 3265 obj = ____cache_alloc_node(cache,
3266 flags | GFP_THISNODE, nid); 3266 flags | GFP_THISNODE, nid);
3267 if (!obj) 3267 if (!obj)