diff options
Diffstat (limited to 'mm/rmap.c')
| -rw-r--r-- | mm/rmap.c | 59 |
1 files changed, 59 insertions, 0 deletions
| @@ -56,6 +56,7 @@ | |||
| 56 | #include <linux/memcontrol.h> | 56 | #include <linux/memcontrol.h> |
| 57 | #include <linux/mmu_notifier.h> | 57 | #include <linux/mmu_notifier.h> |
| 58 | #include <linux/migrate.h> | 58 | #include <linux/migrate.h> |
| 59 | #include <linux/hugetlb.h> | ||
| 59 | 60 | ||
| 60 | #include <asm/tlbflush.h> | 61 | #include <asm/tlbflush.h> |
| 61 | 62 | ||
| @@ -350,6 +351,8 @@ vma_address(struct page *page, struct vm_area_struct *vma) | |||
| 350 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 351 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
| 351 | unsigned long address; | 352 | unsigned long address; |
| 352 | 353 | ||
| 354 | if (unlikely(is_vm_hugetlb_page(vma))) | ||
| 355 | pgoff = page->index << huge_page_order(page_hstate(page)); | ||
| 353 | address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | 356 | address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); |
| 354 | if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { | 357 | if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { |
| 355 | /* page should be within @vma mapping range */ | 358 | /* page should be within @vma mapping range */ |
| @@ -394,6 +397,12 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm, | |||
| 394 | pte_t *pte; | 397 | pte_t *pte; |
| 395 | spinlock_t *ptl; | 398 | spinlock_t *ptl; |
| 396 | 399 | ||
| 400 | if (unlikely(PageHuge(page))) { | ||
| 401 | pte = huge_pte_offset(mm, address); | ||
| 402 | ptl = &mm->page_table_lock; | ||
| 403 | goto check; | ||
| 404 | } | ||
| 405 | |||
| 397 | pgd = pgd_offset(mm, address); | 406 | pgd = pgd_offset(mm, address); |
| 398 | if (!pgd_present(*pgd)) | 407 | if (!pgd_present(*pgd)) |
| 399 | return NULL; | 408 | return NULL; |
| @@ -414,6 +423,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm, | |||
| 414 | } | 423 | } |
| 415 | 424 | ||
| 416 | ptl = pte_lockptr(mm, pmd); | 425 | ptl = pte_lockptr(mm, pmd); |
| 426 | check: | ||
| 417 | spin_lock(ptl); | 427 | spin_lock(ptl); |
| 418 | if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { | 428 | if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { |
| 419 | *ptlp = ptl; | 429 | *ptlp = ptl; |
| @@ -916,6 +926,12 @@ void page_remove_rmap(struct page *page) | |||
| 916 | page_clear_dirty(page); | 926 | page_clear_dirty(page); |
| 917 | set_page_dirty(page); | 927 | set_page_dirty(page); |
| 918 | } | 928 | } |
| 929 | /* | ||
| 930 | * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED | ||
| 931 | * and not charged by memcg for now. | ||
| 932 | */ | ||
| 933 | if (unlikely(PageHuge(page))) | ||
| 934 | return; | ||
| 919 | if (PageAnon(page)) { | 935 | if (PageAnon(page)) { |
| 920 | mem_cgroup_uncharge_page(page); | 936 | mem_cgroup_uncharge_page(page); |
| 921 | __dec_zone_page_state(page, NR_ANON_PAGES); | 937 | __dec_zone_page_state(page, NR_ANON_PAGES); |
| @@ -1524,3 +1540,46 @@ int rmap_walk(struct page *page, int (*rmap_one)(struct page *, | |||
| 1524 | return rmap_walk_file(page, rmap_one, arg); | 1540 | return rmap_walk_file(page, rmap_one, arg); |
| 1525 | } | 1541 | } |
| 1526 | #endif /* CONFIG_MIGRATION */ | 1542 | #endif /* CONFIG_MIGRATION */ |
| 1543 | |||
| 1544 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 1545 | /* | ||
| 1546 | * The following three functions are for anonymous (private mapped) hugepages. | ||
| 1547 | * Unlike common anonymous pages, anonymous hugepages have no accounting code | ||
| 1548 | * and no lru code, because we handle hugepages differently from common pages. | ||
| 1549 | */ | ||
| 1550 | static void __hugepage_set_anon_rmap(struct page *page, | ||
| 1551 | struct vm_area_struct *vma, unsigned long address, int exclusive) | ||
| 1552 | { | ||
| 1553 | struct anon_vma *anon_vma = vma->anon_vma; | ||
| 1554 | BUG_ON(!anon_vma); | ||
| 1555 | if (!exclusive) { | ||
| 1556 | struct anon_vma_chain *avc; | ||
| 1557 | avc = list_entry(vma->anon_vma_chain.prev, | ||
| 1558 | struct anon_vma_chain, same_vma); | ||
| 1559 | anon_vma = avc->anon_vma; | ||
| 1560 | } | ||
| 1561 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | ||
| 1562 | page->mapping = (struct address_space *) anon_vma; | ||
| 1563 | page->index = linear_page_index(vma, address); | ||
| 1564 | } | ||
| 1565 | |||
| 1566 | void hugepage_add_anon_rmap(struct page *page, | ||
| 1567 | struct vm_area_struct *vma, unsigned long address) | ||
| 1568 | { | ||
| 1569 | struct anon_vma *anon_vma = vma->anon_vma; | ||
| 1570 | int first; | ||
| 1571 | BUG_ON(!anon_vma); | ||
| 1572 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | ||
| 1573 | first = atomic_inc_and_test(&page->_mapcount); | ||
| 1574 | if (first) | ||
| 1575 | __hugepage_set_anon_rmap(page, vma, address, 0); | ||
| 1576 | } | ||
| 1577 | |||
| 1578 | void hugepage_add_new_anon_rmap(struct page *page, | ||
| 1579 | struct vm_area_struct *vma, unsigned long address) | ||
| 1580 | { | ||
| 1581 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | ||
| 1582 | atomic_set(&page->_mapcount, 0); | ||
| 1583 | __hugepage_set_anon_rmap(page, vma, address, 1); | ||
| 1584 | } | ||
| 1585 | #endif /* CONFIG_HUGETLB_PAGE */ | ||
