diff options
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 44 |
1 files changed, 29 insertions, 15 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 38633864a93e..ce8cbb29860b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -262,7 +262,7 @@ struct resv_map { | |||
262 | struct list_head regions; | 262 | struct list_head regions; |
263 | }; | 263 | }; |
264 | 264 | ||
265 | struct resv_map *resv_map_alloc(void) | 265 | static struct resv_map *resv_map_alloc(void) |
266 | { | 266 | { |
267 | struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); | 267 | struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); |
268 | if (!resv_map) | 268 | if (!resv_map) |
@@ -274,7 +274,7 @@ struct resv_map *resv_map_alloc(void) | |||
274 | return resv_map; | 274 | return resv_map; |
275 | } | 275 | } |
276 | 276 | ||
277 | void resv_map_release(struct kref *ref) | 277 | static void resv_map_release(struct kref *ref) |
278 | { | 278 | { |
279 | struct resv_map *resv_map = container_of(ref, struct resv_map, refs); | 279 | struct resv_map *resv_map = container_of(ref, struct resv_map, refs); |
280 | 280 | ||
@@ -289,7 +289,7 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma) | |||
289 | if (!(vma->vm_flags & VM_SHARED)) | 289 | if (!(vma->vm_flags & VM_SHARED)) |
290 | return (struct resv_map *)(get_vma_private_data(vma) & | 290 | return (struct resv_map *)(get_vma_private_data(vma) & |
291 | ~HPAGE_RESV_MASK); | 291 | ~HPAGE_RESV_MASK); |
292 | return 0; | 292 | return NULL; |
293 | } | 293 | } |
294 | 294 | ||
295 | static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) | 295 | static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) |
@@ -1459,11 +1459,11 @@ int hugetlb_report_meminfo(char *buf) | |||
1459 | { | 1459 | { |
1460 | struct hstate *h = &default_hstate; | 1460 | struct hstate *h = &default_hstate; |
1461 | return sprintf(buf, | 1461 | return sprintf(buf, |
1462 | "HugePages_Total: %5lu\n" | 1462 | "HugePages_Total: %5lu\n" |
1463 | "HugePages_Free: %5lu\n" | 1463 | "HugePages_Free: %5lu\n" |
1464 | "HugePages_Rsvd: %5lu\n" | 1464 | "HugePages_Rsvd: %5lu\n" |
1465 | "HugePages_Surp: %5lu\n" | 1465 | "HugePages_Surp: %5lu\n" |
1466 | "Hugepagesize: %5lu kB\n", | 1466 | "Hugepagesize: %8lu kB\n", |
1467 | h->nr_huge_pages, | 1467 | h->nr_huge_pages, |
1468 | h->free_huge_pages, | 1468 | h->free_huge_pages, |
1469 | h->resv_huge_pages, | 1469 | h->resv_huge_pages, |
@@ -1747,10 +1747,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
1747 | * from other VMAs and let the children be SIGKILLed if they are faulting the | 1747 | * from other VMAs and let the children be SIGKILLed if they are faulting the |
1748 | * same region. | 1748 | * same region. |
1749 | */ | 1749 | */ |
1750 | int unmap_ref_private(struct mm_struct *mm, | 1750 | static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, |
1751 | struct vm_area_struct *vma, | 1751 | struct page *page, unsigned long address) |
1752 | struct page *page, | ||
1753 | unsigned long address) | ||
1754 | { | 1752 | { |
1755 | struct vm_area_struct *iter_vma; | 1753 | struct vm_area_struct *iter_vma; |
1756 | struct address_space *mapping; | 1754 | struct address_space *mapping; |
@@ -2073,6 +2071,14 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address, | |||
2073 | return NULL; | 2071 | return NULL; |
2074 | } | 2072 | } |
2075 | 2073 | ||
2074 | static int huge_zeropage_ok(pte_t *ptep, int write, int shared) | ||
2075 | { | ||
2076 | if (!ptep || write || shared) | ||
2077 | return 0; | ||
2078 | else | ||
2079 | return huge_pte_none(huge_ptep_get(ptep)); | ||
2080 | } | ||
2081 | |||
2076 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | 2082 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, |
2077 | struct page **pages, struct vm_area_struct **vmas, | 2083 | struct page **pages, struct vm_area_struct **vmas, |
2078 | unsigned long *position, int *length, int i, | 2084 | unsigned long *position, int *length, int i, |
@@ -2082,6 +2088,8 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2082 | unsigned long vaddr = *position; | 2088 | unsigned long vaddr = *position; |
2083 | int remainder = *length; | 2089 | int remainder = *length; |
2084 | struct hstate *h = hstate_vma(vma); | 2090 | struct hstate *h = hstate_vma(vma); |
2091 | int zeropage_ok = 0; | ||
2092 | int shared = vma->vm_flags & VM_SHARED; | ||
2085 | 2093 | ||
2086 | spin_lock(&mm->page_table_lock); | 2094 | spin_lock(&mm->page_table_lock); |
2087 | while (vaddr < vma->vm_end && remainder) { | 2095 | while (vaddr < vma->vm_end && remainder) { |
@@ -2094,8 +2102,11 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2094 | * first, for the page indexing below to work. | 2102 | * first, for the page indexing below to work. |
2095 | */ | 2103 | */ |
2096 | pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); | 2104 | pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); |
2105 | if (huge_zeropage_ok(pte, write, shared)) | ||
2106 | zeropage_ok = 1; | ||
2097 | 2107 | ||
2098 | if (!pte || huge_pte_none(huge_ptep_get(pte)) || | 2108 | if (!pte || |
2109 | (huge_pte_none(huge_ptep_get(pte)) && !zeropage_ok) || | ||
2099 | (write && !pte_write(huge_ptep_get(pte)))) { | 2110 | (write && !pte_write(huge_ptep_get(pte)))) { |
2100 | int ret; | 2111 | int ret; |
2101 | 2112 | ||
@@ -2115,8 +2126,11 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2115 | page = pte_page(huge_ptep_get(pte)); | 2126 | page = pte_page(huge_ptep_get(pte)); |
2116 | same_page: | 2127 | same_page: |
2117 | if (pages) { | 2128 | if (pages) { |
2118 | get_page(page); | 2129 | if (zeropage_ok) |
2119 | pages[i] = page + pfn_offset; | 2130 | pages[i] = ZERO_PAGE(0); |
2131 | else | ||
2132 | pages[i] = page + pfn_offset; | ||
2133 | get_page(pages[i]); | ||
2120 | } | 2134 | } |
2121 | 2135 | ||
2122 | if (vmas) | 2136 | if (vmas) |