aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c26
-rw-r--r--mm/slub.c2
-rw-r--r--mm/sparse-vmemmap.c8
3 files changed, 26 insertions, 10 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9b648bd63451..2e0bfc93484b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -533,6 +533,9 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
533 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 533 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
534 struct mem_cgroup_per_zone *mz; 534 struct mem_cgroup_per_zone *mz;
535 535
536 if (mem_cgroup_subsys.disabled)
537 return 0;
538
536 /* 539 /*
537 * Should page_cgroup's go to their own slab? 540 * Should page_cgroup's go to their own slab?
538 * One could optimize the performance of the charging routine 541 * One could optimize the performance of the charging routine
@@ -665,6 +668,9 @@ void mem_cgroup_uncharge_page(struct page *page)
665 struct mem_cgroup_per_zone *mz; 668 struct mem_cgroup_per_zone *mz;
666 unsigned long flags; 669 unsigned long flags;
667 670
671 if (mem_cgroup_subsys.disabled)
672 return;
673
668 /* 674 /*
669 * Check if our page_cgroup is valid 675 * Check if our page_cgroup is valid
670 */ 676 */
@@ -705,6 +711,9 @@ int mem_cgroup_prepare_migration(struct page *page)
705{ 711{
706 struct page_cgroup *pc; 712 struct page_cgroup *pc;
707 713
714 if (mem_cgroup_subsys.disabled)
715 return 0;
716
708 lock_page_cgroup(page); 717 lock_page_cgroup(page);
709 pc = page_get_page_cgroup(page); 718 pc = page_get_page_cgroup(page);
710 if (pc) 719 if (pc)
@@ -803,6 +812,9 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
803 int ret = -EBUSY; 812 int ret = -EBUSY;
804 int node, zid; 813 int node, zid;
805 814
815 if (mem_cgroup_subsys.disabled)
816 return 0;
817
806 css_get(&mem->css); 818 css_get(&mem->css);
807 /* 819 /*
808 * page reclaim code (kswapd etc..) will move pages between 820 * page reclaim code (kswapd etc..) will move pages between
@@ -966,7 +978,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
966{ 978{
967 struct mem_cgroup_per_node *pn; 979 struct mem_cgroup_per_node *pn;
968 struct mem_cgroup_per_zone *mz; 980 struct mem_cgroup_per_zone *mz;
969 int zone; 981 int zone, tmp = node;
970 /* 982 /*
971 * This routine is called against possible nodes. 983 * This routine is called against possible nodes.
972 * But it's BUG to call kmalloc() against offline node. 984 * But it's BUG to call kmalloc() against offline node.
@@ -975,10 +987,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
975 * never be onlined. It's better to use memory hotplug callback 987 * never be onlined. It's better to use memory hotplug callback
976 * function. 988 * function.
977 */ 989 */
978 if (node_state(node, N_HIGH_MEMORY)) 990 if (!node_state(node, N_NORMAL_MEMORY))
979 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node); 991 tmp = -1;
980 else 992 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
981 pn = kmalloc(sizeof(*pn), GFP_KERNEL);
982 if (!pn) 993 if (!pn)
983 return 1; 994 return 1;
984 995
@@ -1053,6 +1064,8 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1053static int mem_cgroup_populate(struct cgroup_subsys *ss, 1064static int mem_cgroup_populate(struct cgroup_subsys *ss,
1054 struct cgroup *cont) 1065 struct cgroup *cont)
1055{ 1066{
1067 if (mem_cgroup_subsys.disabled)
1068 return 0;
1056 return cgroup_add_files(cont, ss, mem_cgroup_files, 1069 return cgroup_add_files(cont, ss, mem_cgroup_files,
1057 ARRAY_SIZE(mem_cgroup_files)); 1070 ARRAY_SIZE(mem_cgroup_files));
1058} 1071}
@@ -1065,6 +1078,9 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1065 struct mm_struct *mm; 1078 struct mm_struct *mm;
1066 struct mem_cgroup *mem, *old_mem; 1079 struct mem_cgroup *mem, *old_mem;
1067 1080
1081 if (mem_cgroup_subsys.disabled)
1082 return;
1083
1068 mm = get_task_mm(p); 1084 mm = get_task_mm(p);
1069 if (mm == NULL) 1085 if (mm == NULL)
1070 return; 1086 return;
diff --git a/mm/slub.c b/mm/slub.c
index 84ed734b96b3..acc975fcc8cc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2688,7 +2688,7 @@ void kfree(const void *x)
2688} 2688}
2689EXPORT_SYMBOL(kfree); 2689EXPORT_SYMBOL(kfree);
2690 2690
2691#if defined(SLUB_DEBUG) || defined(CONFIG_SLABINFO) 2691#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLABINFO)
2692static unsigned long count_partial(struct kmem_cache_node *n) 2692static unsigned long count_partial(struct kmem_cache_node *n)
2693{ 2693{
2694 unsigned long flags; 2694 unsigned long flags;
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index cd75b21dd4c3..99c4f36eb8a3 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -76,7 +76,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
76 pte_t entry; 76 pte_t entry;
77 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 77 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
78 if (!p) 78 if (!p)
79 return 0; 79 return NULL;
80 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); 80 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
81 set_pte_at(&init_mm, addr, pte, entry); 81 set_pte_at(&init_mm, addr, pte, entry);
82 } 82 }
@@ -89,7 +89,7 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
89 if (pmd_none(*pmd)) { 89 if (pmd_none(*pmd)) {
90 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 90 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
91 if (!p) 91 if (!p)
92 return 0; 92 return NULL;
93 pmd_populate_kernel(&init_mm, pmd, p); 93 pmd_populate_kernel(&init_mm, pmd, p);
94 } 94 }
95 return pmd; 95 return pmd;
@@ -101,7 +101,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
101 if (pud_none(*pud)) { 101 if (pud_none(*pud)) {
102 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 102 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
103 if (!p) 103 if (!p)
104 return 0; 104 return NULL;
105 pud_populate(&init_mm, pud, p); 105 pud_populate(&init_mm, pud, p);
106 } 106 }
107 return pud; 107 return pud;
@@ -113,7 +113,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
113 if (pgd_none(*pgd)) { 113 if (pgd_none(*pgd)) {
114 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 114 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
115 if (!p) 115 if (!p)
116 return 0; 116 return NULL;
117 pgd_populate(&init_mm, pgd, p); 117 pgd_populate(&init_mm, pgd, p);
118 } 118 }
119 return pgd; 119 return pgd;