diff options
author | Mel Gorman <mel@csn.ul.ie> | 2009-02-11 11:34:16 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-02-11 15:38:09 -0500 |
commit | 17c9d12e126cb0de8d535dc1908c4819d712bc68 (patch) | |
tree | a1a9a07e2eb70e8c474d3bd040d724413bde9718 /mm/hugetlb.c | |
parent | 6c6f1f0f4db31a192916eaa31ec2f114fda7d5e5 (diff) |
Do not account for hugetlbfs quota at mmap() time if mapping [SHM|MAP]_NORESERVE
Commit 5a6fe125950676015f5108fb71b2a67441755003 brought hugetlbfs more
in line with the core VM by obeying VM_NORESERVE and not reserving
hugepages for both shared and private mappings when [SHM|MAP]_NORESERVE
are specified. However, it is still taking filesystem quota
unconditionally.
At fault time, if there are no reserves and attempt is made to allocate
the page and account for filesystem quota. If either fail, the fault
fails. The impact is that quota is getting accounted for twice. This
patch partially reverts 5a6fe125950676015f5108fb71b2a67441755003. To
help prevent this mistake happening again, it improves the documentation
of hugetlb_reserve_pages()
Reported-by: Andy Whitcroft <apw@canonical.com>
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Andy Whitcroft <apw@canonical.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 53 |
1 files changed, 33 insertions, 20 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 20746420954..107da3d809a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2272,10 +2272,18 @@ int hugetlb_reserve_pages(struct inode *inode, | |||
2272 | struct vm_area_struct *vma, | 2272 | struct vm_area_struct *vma, |
2273 | int acctflag) | 2273 | int acctflag) |
2274 | { | 2274 | { |
2275 | long ret = 0, chg; | 2275 | long ret, chg; |
2276 | struct hstate *h = hstate_inode(inode); | 2276 | struct hstate *h = hstate_inode(inode); |
2277 | 2277 | ||
2278 | /* | 2278 | /* |
2279 | * Only apply hugepage reservation if asked. At fault time, an | ||
2280 | * attempt will be made for VM_NORESERVE to allocate a page | ||
2281 | * and filesystem quota without using reserves | ||
2282 | */ | ||
2283 | if (acctflag & VM_NORESERVE) | ||
2284 | return 0; | ||
2285 | |||
2286 | /* | ||
2279 | * Shared mappings base their reservation on the number of pages that | 2287 | * Shared mappings base their reservation on the number of pages that |
2280 | * are already allocated on behalf of the file. Private mappings need | 2288 | * are already allocated on behalf of the file. Private mappings need |
2281 | * to reserve the full area even if read-only as mprotect() may be | 2289 | * to reserve the full area even if read-only as mprotect() may be |
@@ -2283,42 +2291,47 @@ int hugetlb_reserve_pages(struct inode *inode, | |||
2283 | */ | 2291 | */ |
2284 | if (!vma || vma->vm_flags & VM_SHARED) | 2292 | if (!vma || vma->vm_flags & VM_SHARED) |
2285 | chg = region_chg(&inode->i_mapping->private_list, from, to); | 2293 | chg = region_chg(&inode->i_mapping->private_list, from, to); |
2286 | else | 2294 | else { |
2295 | struct resv_map *resv_map = resv_map_alloc(); | ||
2296 | if (!resv_map) | ||
2297 | return -ENOMEM; | ||
2298 | |||
2287 | chg = to - from; | 2299 | chg = to - from; |
2288 | 2300 | ||
2301 | set_vma_resv_map(vma, resv_map); | ||
2302 | set_vma_resv_flags(vma, HPAGE_RESV_OWNER); | ||
2303 | } | ||
2304 | |||
2289 | if (chg < 0) | 2305 | if (chg < 0) |
2290 | return chg; | 2306 | return chg; |
2291 | 2307 | ||
2308 | /* There must be enough filesystem quota for the mapping */ | ||
2292 | if (hugetlb_get_quota(inode->i_mapping, chg)) | 2309 | if (hugetlb_get_quota(inode->i_mapping, chg)) |
2293 | return -ENOSPC; | 2310 | return -ENOSPC; |
2294 | 2311 | ||
2295 | /* | 2312 | /* |
2296 | * Only apply hugepage reservation if asked. We still have to | 2313 | * Check enough hugepages are available for the reservation. |
2297 | * take the filesystem quota because it is an upper limit | 2314 | * Hand back the quota if there are not |
2298 | * defined for the mount and not necessarily memory as a whole | ||
2299 | */ | 2315 | */ |
2300 | if (acctflag & VM_NORESERVE) { | ||
2301 | reset_vma_resv_huge_pages(vma); | ||
2302 | return 0; | ||
2303 | } | ||
2304 | |||
2305 | ret = hugetlb_acct_memory(h, chg); | 2316 | ret = hugetlb_acct_memory(h, chg); |
2306 | if (ret < 0) { | 2317 | if (ret < 0) { |
2307 | hugetlb_put_quota(inode->i_mapping, chg); | 2318 | hugetlb_put_quota(inode->i_mapping, chg); |
2308 | return ret; | 2319 | return ret; |
2309 | } | 2320 | } |
2321 | |||
2322 | /* | ||
2323 | * Account for the reservations made. Shared mappings record regions | ||
2324 | * that have reservations as they are shared by multiple VMAs. | ||
2325 | * When the last VMA disappears, the region map says how much | ||
2326 | * the reservation was and the page cache tells how much of | ||
2327 | * the reservation was consumed. Private mappings are per-VMA and | ||
2328 | * only the consumed reservations are tracked. When the VMA | ||
2329 | * disappears, the original reservation is the VMA size and the | ||
2330 | * consumed reservations are stored in the map. Hence, nothing | ||
2331 | * else has to be done for private mappings here | ||
2332 | */ | ||
2310 | if (!vma || vma->vm_flags & VM_SHARED) | 2333 | if (!vma || vma->vm_flags & VM_SHARED) |
2311 | region_add(&inode->i_mapping->private_list, from, to); | 2334 | region_add(&inode->i_mapping->private_list, from, to); |
2312 | else { | ||
2313 | struct resv_map *resv_map = resv_map_alloc(); | ||
2314 | |||
2315 | if (!resv_map) | ||
2316 | return -ENOMEM; | ||
2317 | |||
2318 | set_vma_resv_map(vma, resv_map); | ||
2319 | set_vma_resv_flags(vma, HPAGE_RESV_OWNER); | ||
2320 | } | ||
2321 | |||
2322 | return 0; | 2335 | return 0; |
2323 | } | 2336 | } |
2324 | 2337 | ||