summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@redhat.com>2015-05-07 00:11:57 -0400
committerDavid S. Miller <davem@davemloft.net>2015-05-12 10:39:26 -0400
commitb63ae8ca096dfdbfeef6a209c30a93a966518853 (patch)
treefb9a8648b7e68712d0e0009fcc8cc845cbb95507 /mm/page_alloc.c
parent0e39250845c0f91acc64264709b25f7f9b85c2c3 (diff)
mm/net: Rename and move page fragment handling from net/ to mm/
This change moves the __alloc_page_frag functionality out of the networking stack and into the page allocation portion of mm. The idea it so help make this maintainable by placing it with other page allocation functions. Since we are moving it from skbuff.c to page_alloc.c I have also renamed the basic defines and structure from netdev_alloc_cache to page_frag_cache to reflect that this is now part of a different kernel subsystem. I have also added a simple __free_page_frag function which can handle freeing the frags based on the skb->head pointer. The model for this is based off of __free_pages since we don't actually need to deal with all of the cases that put_page handles. I incorporated the virt_to_head_page call and compound_order into the function as it actually allows for a signficant size reduction by reducing code duplication. Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c98
1 files changed, 98 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ebffa0e4a9c0..2fd31aebef30 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2967,6 +2967,104 @@ void free_pages(unsigned long addr, unsigned int order)
2967EXPORT_SYMBOL(free_pages); 2967EXPORT_SYMBOL(free_pages);
2968 2968
2969/* 2969/*
2970 * Page Fragment:
2971 * An arbitrary-length arbitrary-offset area of memory which resides
2972 * within a 0 or higher order page. Multiple fragments within that page
2973 * are individually refcounted, in the page's reference counter.
2974 *
2975 * The page_frag functions below provide a simple allocation framework for
2976 * page fragments. This is used by the network stack and network device
2977 * drivers to provide a backing region of memory for use as either an
2978 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
2979 */
2980static struct page *__page_frag_refill(struct page_frag_cache *nc,
2981 gfp_t gfp_mask)
2982{
2983 struct page *page = NULL;
2984 gfp_t gfp = gfp_mask;
2985
2986#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
2987 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
2988 __GFP_NOMEMALLOC;
2989 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
2990 PAGE_FRAG_CACHE_MAX_ORDER);
2991 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
2992#endif
2993 if (unlikely(!page))
2994 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
2995
2996 nc->va = page ? page_address(page) : NULL;
2997
2998 return page;
2999}
3000
3001void *__alloc_page_frag(struct page_frag_cache *nc,
3002 unsigned int fragsz, gfp_t gfp_mask)
3003{
3004 unsigned int size = PAGE_SIZE;
3005 struct page *page;
3006 int offset;
3007
3008 if (unlikely(!nc->va)) {
3009refill:
3010 page = __page_frag_refill(nc, gfp_mask);
3011 if (!page)
3012 return NULL;
3013
3014#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3015 /* if size can vary use size else just use PAGE_SIZE */
3016 size = nc->size;
3017#endif
3018 /* Even if we own the page, we do not use atomic_set().
3019 * This would break get_page_unless_zero() users.
3020 */
3021 atomic_add(size - 1, &page->_count);
3022
3023 /* reset page count bias and offset to start of new frag */
3024 nc->pfmemalloc = page->pfmemalloc;
3025 nc->pagecnt_bias = size;
3026 nc->offset = size;
3027 }
3028
3029 offset = nc->offset - fragsz;
3030 if (unlikely(offset < 0)) {
3031 page = virt_to_page(nc->va);
3032
3033 if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
3034 goto refill;
3035
3036#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3037 /* if size can vary use size else just use PAGE_SIZE */
3038 size = nc->size;
3039#endif
3040 /* OK, page count is 0, we can safely set it */
3041 atomic_set(&page->_count, size);
3042
3043 /* reset page count bias and offset to start of new frag */
3044 nc->pagecnt_bias = size;
3045 offset = size - fragsz;
3046 }
3047
3048 nc->pagecnt_bias--;
3049 nc->offset = offset;
3050
3051 return nc->va + offset;
3052}
3053EXPORT_SYMBOL(__alloc_page_frag);
3054
3055/*
3056 * Frees a page fragment allocated out of either a compound or order 0 page.
3057 */
3058void __free_page_frag(void *addr)
3059{
3060 struct page *page = virt_to_head_page(addr);
3061
3062 if (unlikely(put_page_testzero(page)))
3063 __free_pages_ok(page, compound_order(page));
3064}
3065EXPORT_SYMBOL(__free_page_frag);
3066
3067/*
2970 * alloc_kmem_pages charges newly allocated pages to the kmem resource counter 3068 * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
2971 * of the current memory cgroup. 3069 * of the current memory cgroup.
2972 * 3070 *