aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c35
1 files changed, 26 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 5a16423a512c..e198831276a3 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -273,8 +273,8 @@ static long region_count(struct list_head *head, long f, long t)
273 273
274 /* Locate each segment we overlap with, and count that overlap. */ 274 /* Locate each segment we overlap with, and count that overlap. */
275 list_for_each_entry(rg, head, link) { 275 list_for_each_entry(rg, head, link) {
276 int seg_from; 276 long seg_from;
277 int seg_to; 277 long seg_to;
278 278
279 if (rg->to <= f) 279 if (rg->to <= f)
280 continue; 280 continue;
@@ -2157,6 +2157,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2157 kref_get(&reservations->refs); 2157 kref_get(&reservations->refs);
2158} 2158}
2159 2159
2160static void resv_map_put(struct vm_area_struct *vma)
2161{
2162 struct resv_map *reservations = vma_resv_map(vma);
2163
2164 if (!reservations)
2165 return;
2166 kref_put(&reservations->refs, resv_map_release);
2167}
2168
2160static void hugetlb_vm_op_close(struct vm_area_struct *vma) 2169static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2161{ 2170{
2162 struct hstate *h = hstate_vma(vma); 2171 struct hstate *h = hstate_vma(vma);
@@ -2173,7 +2182,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2173 reserve = (end - start) - 2182 reserve = (end - start) -
2174 region_count(&reservations->regions, start, end); 2183 region_count(&reservations->regions, start, end);
2175 2184
2176 kref_put(&reservations->refs, resv_map_release); 2185 resv_map_put(vma);
2177 2186
2178 if (reserve) { 2187 if (reserve) {
2179 hugetlb_acct_memory(h, -reserve); 2188 hugetlb_acct_memory(h, -reserve);
@@ -2213,6 +2222,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2213 } 2222 }
2214 entry = pte_mkyoung(entry); 2223 entry = pte_mkyoung(entry);
2215 entry = pte_mkhuge(entry); 2224 entry = pte_mkhuge(entry);
2225 entry = arch_make_huge_pte(entry, vma, page, writable);
2216 2226
2217 return entry; 2227 return entry;
2218} 2228}
@@ -2498,7 +2508,6 @@ retry_avoidcopy:
2498 if (outside_reserve) { 2508 if (outside_reserve) {
2499 BUG_ON(huge_pte_none(pte)); 2509 BUG_ON(huge_pte_none(pte));
2500 if (unmap_ref_private(mm, vma, old_page, address)) { 2510 if (unmap_ref_private(mm, vma, old_page, address)) {
2501 BUG_ON(page_count(old_page) != 1);
2502 BUG_ON(huge_pte_none(pte)); 2511 BUG_ON(huge_pte_none(pte));
2503 spin_lock(&mm->page_table_lock); 2512 spin_lock(&mm->page_table_lock);
2504 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2513 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
@@ -2991,12 +3000,16 @@ int hugetlb_reserve_pages(struct inode *inode,
2991 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 3000 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2992 } 3001 }
2993 3002
2994 if (chg < 0) 3003 if (chg < 0) {
2995 return chg; 3004 ret = chg;
3005 goto out_err;
3006 }
2996 3007
2997 /* There must be enough pages in the subpool for the mapping */ 3008 /* There must be enough pages in the subpool for the mapping */
2998 if (hugepage_subpool_get_pages(spool, chg)) 3009 if (hugepage_subpool_get_pages(spool, chg)) {
2999 return -ENOSPC; 3010 ret = -ENOSPC;
3011 goto out_err;
3012 }
3000 3013
3001 /* 3014 /*
3002 * Check enough hugepages are available for the reservation. 3015 * Check enough hugepages are available for the reservation.
@@ -3005,7 +3018,7 @@ int hugetlb_reserve_pages(struct inode *inode,
3005 ret = hugetlb_acct_memory(h, chg); 3018 ret = hugetlb_acct_memory(h, chg);
3006 if (ret < 0) { 3019 if (ret < 0) {
3007 hugepage_subpool_put_pages(spool, chg); 3020 hugepage_subpool_put_pages(spool, chg);
3008 return ret; 3021 goto out_err;
3009 } 3022 }
3010 3023
3011 /* 3024 /*
@@ -3022,6 +3035,10 @@ int hugetlb_reserve_pages(struct inode *inode,
3022 if (!vma || vma->vm_flags & VM_MAYSHARE) 3035 if (!vma || vma->vm_flags & VM_MAYSHARE)
3023 region_add(&inode->i_mapping->private_list, from, to); 3036 region_add(&inode->i_mapping->private_list, from, to);
3024 return 0; 3037 return 0;
3038out_err:
3039 if (vma)
3040 resv_map_put(vma);
3041 return ret;
3025} 3042}
3026 3043
3027void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 3044void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)