diff options
author | Andrea Arcangeli <aarcange@redhat.com> | 2011-01-13 18:46:57 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:43 -0500 |
commit | b9bbfbe30ae088cc88a4b2ba7732baeebd1a0162 (patch) | |
tree | 5f3d69ad2de2bdb8782180c7ce33bf1b9b190774 /mm | |
parent | 152c9ccb75548c027fa3103efa4fa4e19a345449 (diff) |
thp: memcg huge memory
Add memcg charge/uncharge to hugepage faults in huge_memory.c.
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 36 |
1 files changed, 31 insertions, 5 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 620891f4e54f..a313403b3c5e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -233,6 +233,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, | |||
233 | VM_BUG_ON(!PageCompound(page)); | 233 | VM_BUG_ON(!PageCompound(page)); |
234 | pgtable = pte_alloc_one(mm, haddr); | 234 | pgtable = pte_alloc_one(mm, haddr); |
235 | if (unlikely(!pgtable)) { | 235 | if (unlikely(!pgtable)) { |
236 | mem_cgroup_uncharge_page(page); | ||
236 | put_page(page); | 237 | put_page(page); |
237 | return VM_FAULT_OOM; | 238 | return VM_FAULT_OOM; |
238 | } | 239 | } |
@@ -243,6 +244,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, | |||
243 | spin_lock(&mm->page_table_lock); | 244 | spin_lock(&mm->page_table_lock); |
244 | if (unlikely(!pmd_none(*pmd))) { | 245 | if (unlikely(!pmd_none(*pmd))) { |
245 | spin_unlock(&mm->page_table_lock); | 246 | spin_unlock(&mm->page_table_lock); |
247 | mem_cgroup_uncharge_page(page); | ||
246 | put_page(page); | 248 | put_page(page); |
247 | pte_free(mm, pgtable); | 249 | pte_free(mm, pgtable); |
248 | } else { | 250 | } else { |
@@ -286,6 +288,10 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
286 | page = alloc_hugepage(transparent_hugepage_defrag(vma)); | 288 | page = alloc_hugepage(transparent_hugepage_defrag(vma)); |
287 | if (unlikely(!page)) | 289 | if (unlikely(!page)) |
288 | goto out; | 290 | goto out; |
291 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { | ||
292 | put_page(page); | ||
293 | goto out; | ||
294 | } | ||
289 | 295 | ||
290 | return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page); | 296 | return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page); |
291 | } | 297 | } |
@@ -402,9 +408,17 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | |||
402 | for (i = 0; i < HPAGE_PMD_NR; i++) { | 408 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
403 | pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE, | 409 | pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE, |
404 | vma, address); | 410 | vma, address); |
405 | if (unlikely(!pages[i])) { | 411 | if (unlikely(!pages[i] || |
406 | while (--i >= 0) | 412 | mem_cgroup_newpage_charge(pages[i], mm, |
413 | GFP_KERNEL))) { | ||
414 | if (pages[i]) | ||
407 | put_page(pages[i]); | 415 | put_page(pages[i]); |
416 | mem_cgroup_uncharge_start(); | ||
417 | while (--i >= 0) { | ||
418 | mem_cgroup_uncharge_page(pages[i]); | ||
419 | put_page(pages[i]); | ||
420 | } | ||
421 | mem_cgroup_uncharge_end(); | ||
408 | kfree(pages); | 422 | kfree(pages); |
409 | ret |= VM_FAULT_OOM; | 423 | ret |= VM_FAULT_OOM; |
410 | goto out; | 424 | goto out; |
@@ -455,8 +469,12 @@ out: | |||
455 | 469 | ||
456 | out_free_pages: | 470 | out_free_pages: |
457 | spin_unlock(&mm->page_table_lock); | 471 | spin_unlock(&mm->page_table_lock); |
458 | for (i = 0; i < HPAGE_PMD_NR; i++) | 472 | mem_cgroup_uncharge_start(); |
473 | for (i = 0; i < HPAGE_PMD_NR; i++) { | ||
474 | mem_cgroup_uncharge_page(pages[i]); | ||
459 | put_page(pages[i]); | 475 | put_page(pages[i]); |
476 | } | ||
477 | mem_cgroup_uncharge_end(); | ||
460 | kfree(pages); | 478 | kfree(pages); |
461 | goto out; | 479 | goto out; |
462 | } | 480 | } |
@@ -501,14 +519,22 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
501 | goto out; | 519 | goto out; |
502 | } | 520 | } |
503 | 521 | ||
522 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { | ||
523 | put_page(new_page); | ||
524 | put_page(page); | ||
525 | ret |= VM_FAULT_OOM; | ||
526 | goto out; | ||
527 | } | ||
528 | |||
504 | copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); | 529 | copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); |
505 | __SetPageUptodate(new_page); | 530 | __SetPageUptodate(new_page); |
506 | 531 | ||
507 | spin_lock(&mm->page_table_lock); | 532 | spin_lock(&mm->page_table_lock); |
508 | put_page(page); | 533 | put_page(page); |
509 | if (unlikely(!pmd_same(*pmd, orig_pmd))) | 534 | if (unlikely(!pmd_same(*pmd, orig_pmd))) { |
535 | mem_cgroup_uncharge_page(new_page); | ||
510 | put_page(new_page); | 536 | put_page(new_page); |
511 | else { | 537 | } else { |
512 | pmd_t entry; | 538 | pmd_t entry; |
513 | VM_BUG_ON(!PageHead(page)); | 539 | VM_BUG_ON(!PageHead(page)); |
514 | entry = mk_pmd(new_page, vma->vm_page_prot); | 540 | entry = mk_pmd(new_page, vma->vm_page_prot); |