diff options
Diffstat (limited to 'mm/hugetlb.c')
| -rw-r--r-- | mm/hugetlb.c | 39 |
1 files changed, 25 insertions, 14 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 618e98304080..207464209546 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -2269,14 +2269,12 @@ void hugetlb_change_protection(struct vm_area_struct *vma, | |||
| 2269 | 2269 | ||
| 2270 | int hugetlb_reserve_pages(struct inode *inode, | 2270 | int hugetlb_reserve_pages(struct inode *inode, |
| 2271 | long from, long to, | 2271 | long from, long to, |
| 2272 | struct vm_area_struct *vma) | 2272 | struct vm_area_struct *vma, |
| 2273 | int acctflag) | ||
| 2273 | { | 2274 | { |
| 2274 | long ret, chg; | 2275 | long ret = 0, chg; |
| 2275 | struct hstate *h = hstate_inode(inode); | 2276 | struct hstate *h = hstate_inode(inode); |
| 2276 | 2277 | ||
| 2277 | if (vma && vma->vm_flags & VM_NORESERVE) | ||
| 2278 | return 0; | ||
| 2279 | |||
| 2280 | /* | 2278 | /* |
| 2281 | * Shared mappings base their reservation on the number of pages that | 2279 | * Shared mappings base their reservation on the number of pages that |
| 2282 | * are already allocated on behalf of the file. Private mappings need | 2280 | * are already allocated on behalf of the file. Private mappings need |
| @@ -2285,22 +2283,25 @@ int hugetlb_reserve_pages(struct inode *inode, | |||
| 2285 | */ | 2283 | */ |
| 2286 | if (!vma || vma->vm_flags & VM_SHARED) | 2284 | if (!vma || vma->vm_flags & VM_SHARED) |
| 2287 | chg = region_chg(&inode->i_mapping->private_list, from, to); | 2285 | chg = region_chg(&inode->i_mapping->private_list, from, to); |
| 2288 | else { | 2286 | else |
| 2289 | struct resv_map *resv_map = resv_map_alloc(); | ||
| 2290 | if (!resv_map) | ||
| 2291 | return -ENOMEM; | ||
| 2292 | |||
| 2293 | chg = to - from; | 2287 | chg = to - from; |
| 2294 | 2288 | ||
| 2295 | set_vma_resv_map(vma, resv_map); | ||
| 2296 | set_vma_resv_flags(vma, HPAGE_RESV_OWNER); | ||
| 2297 | } | ||
| 2298 | |||
| 2299 | if (chg < 0) | 2289 | if (chg < 0) |
| 2300 | return chg; | 2290 | return chg; |
| 2301 | 2291 | ||
| 2302 | if (hugetlb_get_quota(inode->i_mapping, chg)) | 2292 | if (hugetlb_get_quota(inode->i_mapping, chg)) |
| 2303 | return -ENOSPC; | 2293 | return -ENOSPC; |
| 2294 | |||
| 2295 | /* | ||
| 2296 | * Only apply hugepage reservation if asked. We still have to | ||
| 2297 | * take the filesystem quota because it is an upper limit | ||
| 2298 | * defined for the mount and not necessarily memory as a whole | ||
| 2299 | */ | ||
| 2300 | if (acctflag & VM_NORESERVE) { | ||
| 2301 | reset_vma_resv_huge_pages(vma); | ||
| 2302 | return 0; | ||
| 2303 | } | ||
| 2304 | |||
| 2304 | ret = hugetlb_acct_memory(h, chg); | 2305 | ret = hugetlb_acct_memory(h, chg); |
| 2305 | if (ret < 0) { | 2306 | if (ret < 0) { |
| 2306 | hugetlb_put_quota(inode->i_mapping, chg); | 2307 | hugetlb_put_quota(inode->i_mapping, chg); |
| @@ -2308,6 +2309,16 @@ int hugetlb_reserve_pages(struct inode *inode, | |||
| 2308 | } | 2309 | } |
| 2309 | if (!vma || vma->vm_flags & VM_SHARED) | 2310 | if (!vma || vma->vm_flags & VM_SHARED) |
| 2310 | region_add(&inode->i_mapping->private_list, from, to); | 2311 | region_add(&inode->i_mapping->private_list, from, to); |
| 2312 | else { | ||
| 2313 | struct resv_map *resv_map = resv_map_alloc(); | ||
| 2314 | |||
| 2315 | if (!resv_map) | ||
| 2316 | return -ENOMEM; | ||
| 2317 | |||
| 2318 | set_vma_resv_map(vma, resv_map); | ||
| 2319 | set_vma_resv_flags(vma, HPAGE_RESV_OWNER); | ||
| 2320 | } | ||
| 2321 | |||
| 2311 | return 0; | 2322 | return 0; |
| 2312 | } | 2323 | } |
| 2313 | 2324 | ||
