aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2012-07-31 19:42:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 21:42:41 -0400
commit94ae8ba7176666d1e7d8bbb9f93670a27540b6a8 (patch)
tree5c8a1478e2463c29be6a3e6f63d5acabaac65a17 /mm
parent79dbb2368ae3515fad9c8b7c8f831cd86be59b1d (diff)
hugetlb/cgroup: assign the page hugetlb cgroup when we move the page to active list.
A page's hugetlb cgroup assignment and movement to the active list should occur with hugetlb_lock held. Otherwise when we remove the hugetlb cgroup we will iterate the active list and find pages with NULL hugetlb cgroup values. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c22
-rw-r--r--mm/hugetlb_cgroup.c5
2 files changed, 13 insertions, 14 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ec7b86ebf9d9..c39e4beeb63a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -928,14 +928,8 @@ struct page *alloc_huge_page_node(struct hstate *h, int nid)
928 page = dequeue_huge_page_node(h, nid); 928 page = dequeue_huge_page_node(h, nid);
929 spin_unlock(&hugetlb_lock); 929 spin_unlock(&hugetlb_lock);
930 930
931 if (!page) { 931 if (!page)
932 page = alloc_buddy_huge_page(h, nid); 932 page = alloc_buddy_huge_page(h, nid);
933 if (page) {
934 spin_lock(&hugetlb_lock);
935 list_move(&page->lru, &h->hugepage_activelist);
936 spin_unlock(&hugetlb_lock);
937 }
938 }
939 933
940 return page; 934 return page;
941} 935}
@@ -1150,9 +1144,13 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1150 } 1144 }
1151 spin_lock(&hugetlb_lock); 1145 spin_lock(&hugetlb_lock);
1152 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); 1146 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1153 spin_unlock(&hugetlb_lock); 1147 if (page) {
1154 1148 /* update page cgroup details */
1155 if (!page) { 1149 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
1150 h_cg, page);
1151 spin_unlock(&hugetlb_lock);
1152 } else {
1153 spin_unlock(&hugetlb_lock);
1156 page = alloc_buddy_huge_page(h, NUMA_NO_NODE); 1154 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1157 if (!page) { 1155 if (!page) {
1158 hugetlb_cgroup_uncharge_cgroup(idx, 1156 hugetlb_cgroup_uncharge_cgroup(idx,
@@ -1162,6 +1160,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1162 return ERR_PTR(-ENOSPC); 1160 return ERR_PTR(-ENOSPC);
1163 } 1161 }
1164 spin_lock(&hugetlb_lock); 1162 spin_lock(&hugetlb_lock);
1163 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
1164 h_cg, page);
1165 list_move(&page->lru, &h->hugepage_activelist); 1165 list_move(&page->lru, &h->hugepage_activelist);
1166 spin_unlock(&hugetlb_lock); 1166 spin_unlock(&hugetlb_lock);
1167 } 1167 }
@@ -1169,8 +1169,6 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1169 set_page_private(page, (unsigned long)spool); 1169 set_page_private(page, (unsigned long)spool);
1170 1170
1171 vma_commit_reservation(h, vma, addr); 1171 vma_commit_reservation(h, vma, addr);
1172 /* update page cgroup details */
1173 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1174 return page; 1172 return page;
1175} 1173}
1176 1174
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 680e4819e077..9834a01c79dc 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -213,6 +213,7 @@ done:
213 return ret; 213 return ret;
214} 214}
215 215
216/* Should be called with hugetlb_lock held */
216void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, 217void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
217 struct hugetlb_cgroup *h_cg, 218 struct hugetlb_cgroup *h_cg,
218 struct page *page) 219 struct page *page)
@@ -220,9 +221,7 @@ void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
220 if (hugetlb_cgroup_disabled() || !h_cg) 221 if (hugetlb_cgroup_disabled() || !h_cg)
221 return; 222 return;
222 223
223 spin_lock(&hugetlb_lock);
224 set_hugetlb_cgroup(page, h_cg); 224 set_hugetlb_cgroup(page, h_cg);
225 spin_unlock(&hugetlb_lock);
226 return; 225 return;
227} 226}
228 227
@@ -389,6 +388,7 @@ int __init hugetlb_cgroup_file_init(int idx)
389void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) 388void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
390{ 389{
391 struct hugetlb_cgroup *h_cg; 390 struct hugetlb_cgroup *h_cg;
391 struct hstate *h = page_hstate(oldhpage);
392 392
393 if (hugetlb_cgroup_disabled()) 393 if (hugetlb_cgroup_disabled())
394 return; 394 return;
@@ -401,6 +401,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
401 401
402 /* move the h_cg details to new cgroup */ 402 /* move the h_cg details to new cgroup */
403 set_hugetlb_cgroup(newhpage, h_cg); 403 set_hugetlb_cgroup(newhpage, h_cg);
404 list_move(&newhpage->lru, &h->hugepage_activelist);
404 spin_unlock(&hugetlb_lock); 405 spin_unlock(&hugetlb_lock);
405 cgroup_release_and_wakeup_rmdir(&h_cg->css); 406 cgroup_release_and_wakeup_rmdir(&h_cg->css);
406 return; 407 return;