aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c31
1 files changed, 15 insertions, 16 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c6d342d313c7..dae27ba3be2c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -24,7 +24,7 @@
24 24
25#include <asm/page.h> 25#include <asm/page.h>
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/io.h> 27#include <linux/io.h>
28 28
29#include <linux/hugetlb.h> 29#include <linux/hugetlb.h>
30#include <linux/node.h> 30#include <linux/node.h>
@@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(hugetlb_lock);
62 * must either hold the mmap_sem for write, or the mmap_sem for read and 62 * must either hold the mmap_sem for write, or the mmap_sem for read and
63 * the hugetlb_instantiation mutex: 63 * the hugetlb_instantiation mutex:
64 * 64 *
65 * down_write(&mm->mmap_sem); 65 * down_write(&mm->mmap_sem);
66 * or 66 * or
67 * down_read(&mm->mmap_sem); 67 * down_read(&mm->mmap_sem);
68 * mutex_lock(&hugetlb_instantiation_mutex); 68 * mutex_lock(&hugetlb_instantiation_mutex);
69 */ 69 */
70struct file_region { 70struct file_region {
71 struct list_head link; 71 struct list_head link;
@@ -503,9 +503,10 @@ static void update_and_free_page(struct hstate *h, struct page *page)
503 h->nr_huge_pages--; 503 h->nr_huge_pages--;
504 h->nr_huge_pages_node[page_to_nid(page)]--; 504 h->nr_huge_pages_node[page_to_nid(page)]--;
505 for (i = 0; i < pages_per_huge_page(h); i++) { 505 for (i = 0; i < pages_per_huge_page(h); i++) {
506 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 506 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
507 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 507 1 << PG_referenced | 1 << PG_dirty |
508 1 << PG_private | 1<< PG_writeback); 508 1 << PG_active | 1 << PG_reserved |
509 1 << PG_private | 1 << PG_writeback);
509 } 510 }
510 set_compound_page_dtor(page, NULL); 511 set_compound_page_dtor(page, NULL);
511 set_page_refcounted(page); 512 set_page_refcounted(page);
@@ -591,7 +592,6 @@ int PageHuge(struct page *page)
591 592
592 return dtor == free_huge_page; 593 return dtor == free_huge_page;
593} 594}
594
595EXPORT_SYMBOL_GPL(PageHuge); 595EXPORT_SYMBOL_GPL(PageHuge);
596 596
597static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) 597static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
@@ -2132,9 +2132,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
2132 pte_t entry; 2132 pte_t entry;
2133 2133
2134 entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); 2134 entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2135 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { 2135 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2136 update_mmu_cache(vma, address, ptep); 2136 update_mmu_cache(vma, address, ptep);
2137 }
2138} 2137}
2139 2138
2140 2139
@@ -2189,9 +2188,9 @@ static int is_hugetlb_entry_migration(pte_t pte)
2189 if (huge_pte_none(pte) || pte_present(pte)) 2188 if (huge_pte_none(pte) || pte_present(pte))
2190 return 0; 2189 return 0;
2191 swp = pte_to_swp_entry(pte); 2190 swp = pte_to_swp_entry(pte);
2192 if (non_swap_entry(swp) && is_migration_entry(swp)) { 2191 if (non_swap_entry(swp) && is_migration_entry(swp))
2193 return 1; 2192 return 1;
2194 } else 2193 else
2195 return 0; 2194 return 0;
2196} 2195}
2197 2196
@@ -2202,9 +2201,9 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2202 if (huge_pte_none(pte) || pte_present(pte)) 2201 if (huge_pte_none(pte) || pte_present(pte))
2203 return 0; 2202 return 0;
2204 swp = pte_to_swp_entry(pte); 2203 swp = pte_to_swp_entry(pte);
2205 if (non_swap_entry(swp) && is_hwpoison_entry(swp)) { 2204 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2206 return 1; 2205 return 1;
2207 } else 2206 else
2208 return 0; 2207 return 0;
2209} 2208}
2210 2209
@@ -2567,7 +2566,7 @@ retry:
2567 * So we need to block hugepage fault by PG_hwpoison bit check. 2566 * So we need to block hugepage fault by PG_hwpoison bit check.
2568 */ 2567 */
2569 if (unlikely(PageHWPoison(page))) { 2568 if (unlikely(PageHWPoison(page))) {
2570 ret = VM_FAULT_HWPOISON | 2569 ret = VM_FAULT_HWPOISON |
2571 VM_FAULT_SET_HINDEX(h - hstates); 2570 VM_FAULT_SET_HINDEX(h - hstates);
2572 goto backout_unlocked; 2571 goto backout_unlocked;
2573 } 2572 }
@@ -2635,7 +2634,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2635 migration_entry_wait(mm, (pmd_t *)ptep, address); 2634 migration_entry_wait(mm, (pmd_t *)ptep, address);
2636 return 0; 2635 return 0;
2637 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 2636 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2638 return VM_FAULT_HWPOISON_LARGE | 2637 return VM_FAULT_HWPOISON_LARGE |
2639 VM_FAULT_SET_HINDEX(h - hstates); 2638 VM_FAULT_SET_HINDEX(h - hstates);
2640 } 2639 }
2641 2640