aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c66
1 files changed, 13 insertions, 53 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index de2491c42d4f..7023a31edc5c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -63,6 +63,7 @@
63#include <linux/sched/rt.h> 63#include <linux/sched/rt.h>
64#include <linux/page_owner.h> 64#include <linux/page_owner.h>
65#include <linux/kthread.h> 65#include <linux/kthread.h>
66#include <linux/memcontrol.h>
66 67
67#include <asm/sections.h> 68#include <asm/sections.h>
68#include <asm/tlbflush.h> 69#include <asm/tlbflush.h>
@@ -1018,6 +1019,10 @@ static __always_inline bool free_pages_prepare(struct page *page,
1018 } 1019 }
1019 if (PageMappingFlags(page)) 1020 if (PageMappingFlags(page))
1020 page->mapping = NULL; 1021 page->mapping = NULL;
1022 if (memcg_kmem_enabled() && PageKmemcg(page)) {
1023 memcg_kmem_uncharge(page, order);
1024 __ClearPageKmemcg(page);
1025 }
1021 if (check_free) 1026 if (check_free)
1022 bad += free_pages_check(page); 1027 bad += free_pages_check(page);
1023 if (bad) 1028 if (bad)
@@ -3841,6 +3846,14 @@ no_zone:
3841 } 3846 }
3842 3847
3843out: 3848out:
3849 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page) {
3850 if (unlikely(memcg_kmem_charge(page, gfp_mask, order))) {
3851 __free_pages(page, order);
3852 page = NULL;
3853 } else
3854 __SetPageKmemcg(page);
3855 }
3856
3844 if (kmemcheck_enabled && page) 3857 if (kmemcheck_enabled && page)
3845 kmemcheck_pagealloc_alloc(page, order, gfp_mask); 3858 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
3846 3859
@@ -3996,59 +4009,6 @@ void __free_page_frag(void *addr)
3996} 4009}
3997EXPORT_SYMBOL(__free_page_frag); 4010EXPORT_SYMBOL(__free_page_frag);
3998 4011
3999/*
4000 * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
4001 * of the current memory cgroup if __GFP_ACCOUNT is set, other than that it is
4002 * equivalent to alloc_pages.
4003 *
4004 * It should be used when the caller would like to use kmalloc, but since the
4005 * allocation is large, it has to fall back to the page allocator.
4006 */
4007struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
4008{
4009 struct page *page;
4010
4011 page = alloc_pages(gfp_mask, order);
4012 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) &&
4013 page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
4014 __free_pages(page, order);
4015 page = NULL;
4016 }
4017 return page;
4018}
4019
4020struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
4021{
4022 struct page *page;
4023
4024 page = alloc_pages_node(nid, gfp_mask, order);
4025 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) &&
4026 page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
4027 __free_pages(page, order);
4028 page = NULL;
4029 }
4030 return page;
4031}
4032
4033/*
4034 * __free_kmem_pages and free_kmem_pages will free pages allocated with
4035 * alloc_kmem_pages.
4036 */
4037void __free_kmem_pages(struct page *page, unsigned int order)
4038{
4039 if (memcg_kmem_enabled())
4040 memcg_kmem_uncharge(page, order);
4041 __free_pages(page, order);
4042}
4043
4044void free_kmem_pages(unsigned long addr, unsigned int order)
4045{
4046 if (addr != 0) {
4047 VM_BUG_ON(!virt_addr_valid((void *)addr));
4048 __free_kmem_pages(virt_to_page((void *)addr), order);
4049 }
4050}
4051
4052static void *make_alloc_exact(unsigned long addr, unsigned int order, 4012static void *make_alloc_exact(unsigned long addr, unsigned int order,
4053 size_t size) 4013 size_t size)
4054{ 4014{