aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c59
1 files changed, 59 insertions, 0 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 38a336e2eea1..0ad53572eaf2 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -56,6 +56,7 @@
56#include <linux/memcontrol.h> 56#include <linux/memcontrol.h>
57#include <linux/mmu_notifier.h> 57#include <linux/mmu_notifier.h>
58#include <linux/migrate.h> 58#include <linux/migrate.h>
59#include <linux/hugetlb.h>
59 60
60#include <asm/tlbflush.h> 61#include <asm/tlbflush.h>
61 62
@@ -326,6 +327,8 @@ vma_address(struct page *page, struct vm_area_struct *vma)
326 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 327 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
327 unsigned long address; 328 unsigned long address;
328 329
330 if (unlikely(is_vm_hugetlb_page(vma)))
331 pgoff = page->index << huge_page_order(page_hstate(page));
329 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 332 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
330 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 333 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
331 /* page should be within @vma mapping range */ 334 /* page should be within @vma mapping range */
@@ -369,6 +372,12 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm,
369 pte_t *pte; 372 pte_t *pte;
370 spinlock_t *ptl; 373 spinlock_t *ptl;
371 374
375 if (unlikely(PageHuge(page))) {
376 pte = huge_pte_offset(mm, address);
377 ptl = &mm->page_table_lock;
378 goto check;
379 }
380
372 pgd = pgd_offset(mm, address); 381 pgd = pgd_offset(mm, address);
373 if (!pgd_present(*pgd)) 382 if (!pgd_present(*pgd))
374 return NULL; 383 return NULL;
@@ -389,6 +398,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm,
389 } 398 }
390 399
391 ptl = pte_lockptr(mm, pmd); 400 ptl = pte_lockptr(mm, pmd);
401check:
392 spin_lock(ptl); 402 spin_lock(ptl);
393 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 403 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
394 *ptlp = ptl; 404 *ptlp = ptl;
@@ -873,6 +883,12 @@ void page_remove_rmap(struct page *page)
873 page_clear_dirty(page); 883 page_clear_dirty(page);
874 set_page_dirty(page); 884 set_page_dirty(page);
875 } 885 }
886 /*
887 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
888 * and not charged by memcg for now.
889 */
890 if (unlikely(PageHuge(page)))
891 return;
876 if (PageAnon(page)) { 892 if (PageAnon(page)) {
877 mem_cgroup_uncharge_page(page); 893 mem_cgroup_uncharge_page(page);
878 __dec_zone_page_state(page, NR_ANON_PAGES); 894 __dec_zone_page_state(page, NR_ANON_PAGES);
@@ -1445,3 +1461,46 @@ int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
1445 return rmap_walk_file(page, rmap_one, arg); 1461 return rmap_walk_file(page, rmap_one, arg);
1446} 1462}
1447#endif /* CONFIG_MIGRATION */ 1463#endif /* CONFIG_MIGRATION */
1464
1465#ifdef CONFIG_HUGETLBFS
1466/*
1467 * The following three functions are for anonymous (private mapped) hugepages.
1468 * Unlike common anonymous pages, anonymous hugepages have no accounting code
1469 * and no lru code, because we handle hugepages differently from common pages.
1470 */
1471static void __hugepage_set_anon_rmap(struct page *page,
1472 struct vm_area_struct *vma, unsigned long address, int exclusive)
1473{
1474 struct anon_vma *anon_vma = vma->anon_vma;
1475 BUG_ON(!anon_vma);
1476 if (!exclusive) {
1477 struct anon_vma_chain *avc;
1478 avc = list_entry(vma->anon_vma_chain.prev,
1479 struct anon_vma_chain, same_vma);
1480 anon_vma = avc->anon_vma;
1481 }
1482 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1483 page->mapping = (struct address_space *) anon_vma;
1484 page->index = linear_page_index(vma, address);
1485}
1486
1487void hugepage_add_anon_rmap(struct page *page,
1488 struct vm_area_struct *vma, unsigned long address)
1489{
1490 struct anon_vma *anon_vma = vma->anon_vma;
1491 int first;
1492 BUG_ON(!anon_vma);
1493 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1494 first = atomic_inc_and_test(&page->_mapcount);
1495 if (first)
1496 __hugepage_set_anon_rmap(page, vma, address, 0);
1497}
1498
1499void hugepage_add_new_anon_rmap(struct page *page,
1500 struct vm_area_struct *vma, unsigned long address)
1501{
1502 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1503 atomic_set(&page->_mapcount, 0);
1504 __hugepage_set_anon_rmap(page, vma, address, 1);
1505}
1506#endif /* CONFIG_HUGETLBFS */