diff options
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 22 |
1 files changed, 13 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a088f593a807..0ccc7f230252 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -109,7 +109,7 @@ static int alloc_fresh_huge_page(void) | |||
109 | if (nid == MAX_NUMNODES) | 109 | if (nid == MAX_NUMNODES) |
110 | nid = first_node(node_online_map); | 110 | nid = first_node(node_online_map); |
111 | if (page) { | 111 | if (page) { |
112 | page[1].lru.next = (void *)free_huge_page; /* dtor */ | 112 | set_compound_page_dtor(page, free_huge_page); |
113 | spin_lock(&hugetlb_lock); | 113 | spin_lock(&hugetlb_lock); |
114 | nr_huge_pages++; | 114 | nr_huge_pages++; |
115 | nr_huge_pages_node[page_to_nid(page)]++; | 115 | nr_huge_pages_node[page_to_nid(page)]++; |
@@ -344,7 +344,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
344 | entry = *src_pte; | 344 | entry = *src_pte; |
345 | ptepage = pte_page(entry); | 345 | ptepage = pte_page(entry); |
346 | get_page(ptepage); | 346 | get_page(ptepage); |
347 | add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE); | ||
348 | set_huge_pte_at(dst, addr, dst_pte, entry); | 347 | set_huge_pte_at(dst, addr, dst_pte, entry); |
349 | } | 348 | } |
350 | spin_unlock(&src->page_table_lock); | 349 | spin_unlock(&src->page_table_lock); |
@@ -365,6 +364,11 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
365 | pte_t pte; | 364 | pte_t pte; |
366 | struct page *page; | 365 | struct page *page; |
367 | struct page *tmp; | 366 | struct page *tmp; |
367 | /* | ||
368 | * A page gathering list, protected by per file i_mmap_lock. The | ||
369 | * lock is used to avoid list corruption from multiple unmapping | ||
370 | * of the same page since we are using page->lru. | ||
371 | */ | ||
368 | LIST_HEAD(page_list); | 372 | LIST_HEAD(page_list); |
369 | 373 | ||
370 | WARN_ON(!is_vm_hugetlb_page(vma)); | 374 | WARN_ON(!is_vm_hugetlb_page(vma)); |
@@ -372,24 +376,21 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
372 | BUG_ON(end & ~HPAGE_MASK); | 376 | BUG_ON(end & ~HPAGE_MASK); |
373 | 377 | ||
374 | spin_lock(&mm->page_table_lock); | 378 | spin_lock(&mm->page_table_lock); |
375 | |||
376 | /* Update high watermark before we lower rss */ | ||
377 | update_hiwater_rss(mm); | ||
378 | |||
379 | for (address = start; address < end; address += HPAGE_SIZE) { | 379 | for (address = start; address < end; address += HPAGE_SIZE) { |
380 | ptep = huge_pte_offset(mm, address); | 380 | ptep = huge_pte_offset(mm, address); |
381 | if (!ptep) | 381 | if (!ptep) |
382 | continue; | 382 | continue; |
383 | 383 | ||
384 | if (huge_pmd_unshare(mm, &address, ptep)) | ||
385 | continue; | ||
386 | |||
384 | pte = huge_ptep_get_and_clear(mm, address, ptep); | 387 | pte = huge_ptep_get_and_clear(mm, address, ptep); |
385 | if (pte_none(pte)) | 388 | if (pte_none(pte)) |
386 | continue; | 389 | continue; |
387 | 390 | ||
388 | page = pte_page(pte); | 391 | page = pte_page(pte); |
389 | list_add(&page->lru, &page_list); | 392 | list_add(&page->lru, &page_list); |
390 | add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE)); | ||
391 | } | 393 | } |
392 | |||
393 | spin_unlock(&mm->page_table_lock); | 394 | spin_unlock(&mm->page_table_lock); |
394 | flush_tlb_range(vma, start, end); | 395 | flush_tlb_range(vma, start, end); |
395 | list_for_each_entry_safe(page, tmp, &page_list, lru) { | 396 | list_for_each_entry_safe(page, tmp, &page_list, lru) { |
@@ -515,7 +516,6 @@ retry: | |||
515 | if (!pte_none(*ptep)) | 516 | if (!pte_none(*ptep)) |
516 | goto backout; | 517 | goto backout; |
517 | 518 | ||
518 | add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE); | ||
519 | new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) | 519 | new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) |
520 | && (vma->vm_flags & VM_SHARED))); | 520 | && (vma->vm_flags & VM_SHARED))); |
521 | set_huge_pte_at(mm, address, ptep, new_pte); | 521 | set_huge_pte_at(mm, address, ptep, new_pte); |
@@ -653,11 +653,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma, | |||
653 | BUG_ON(address >= end); | 653 | BUG_ON(address >= end); |
654 | flush_cache_range(vma, address, end); | 654 | flush_cache_range(vma, address, end); |
655 | 655 | ||
656 | spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); | ||
656 | spin_lock(&mm->page_table_lock); | 657 | spin_lock(&mm->page_table_lock); |
657 | for (; address < end; address += HPAGE_SIZE) { | 658 | for (; address < end; address += HPAGE_SIZE) { |
658 | ptep = huge_pte_offset(mm, address); | 659 | ptep = huge_pte_offset(mm, address); |
659 | if (!ptep) | 660 | if (!ptep) |
660 | continue; | 661 | continue; |
662 | if (huge_pmd_unshare(mm, &address, ptep)) | ||
663 | continue; | ||
661 | if (!pte_none(*ptep)) { | 664 | if (!pte_none(*ptep)) { |
662 | pte = huge_ptep_get_and_clear(mm, address, ptep); | 665 | pte = huge_ptep_get_and_clear(mm, address, ptep); |
663 | pte = pte_mkhuge(pte_modify(pte, newprot)); | 666 | pte = pte_mkhuge(pte_modify(pte, newprot)); |
@@ -666,6 +669,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma, | |||
666 | } | 669 | } |
667 | } | 670 | } |
668 | spin_unlock(&mm->page_table_lock); | 671 | spin_unlock(&mm->page_table_lock); |
672 | spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); | ||
669 | 673 | ||
670 | flush_tlb_range(vma, start, end); | 674 | flush_tlb_range(vma, start, end); |
671 | } | 675 | } |