diff options
| author | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2006-11-22 13:06:44 -0500 |
|---|---|---|
| committer | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2006-11-22 13:06:44 -0500 |
| commit | 0bd2af46839ad6262d25714a6ec0365db9d6b98f (patch) | |
| tree | dcced72d230d69fd0c5816ac6dd03ab84799a93e /mm/hugetlb.c | |
| parent | e138a5d2356729b8752e88520cc1525fae9794ac (diff) | |
| parent | f26b90440cd74c78fe10c9bd5160809704a9627c (diff) | |
Merge ../scsi-rc-fixes-2.6
Diffstat (limited to 'mm/hugetlb.c')
| -rw-r--r-- | mm/hugetlb.c | 25 |
1 files changed, 23 insertions, 2 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1d709ff528e1..a088f593a807 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -356,8 +356,8 @@ nomem: | |||
| 356 | return -ENOMEM; | 356 | return -ENOMEM; |
| 357 | } | 357 | } |
| 358 | 358 | ||
| 359 | void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | 359 | void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, |
| 360 | unsigned long end) | 360 | unsigned long end) |
| 361 | { | 361 | { |
| 362 | struct mm_struct *mm = vma->vm_mm; | 362 | struct mm_struct *mm = vma->vm_mm; |
| 363 | unsigned long address; | 363 | unsigned long address; |
| @@ -398,6 +398,24 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
| 398 | } | 398 | } |
| 399 | } | 399 | } |
| 400 | 400 | ||
| 401 | void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | ||
| 402 | unsigned long end) | ||
| 403 | { | ||
| 404 | /* | ||
| 405 | * It is undesirable to test vma->vm_file as it should be non-null | ||
| 406 | * for valid hugetlb area. However, vm_file will be NULL in the error | ||
| 407 | * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, | ||
| 408 | * do_mmap_pgoff() nullifies vma->vm_file before calling this function | ||
| 409 | * to clean up. Since no pte has actually been setup, it is safe to | ||
| 410 | * do nothing in this case. | ||
| 411 | */ | ||
| 412 | if (vma->vm_file) { | ||
| 413 | spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); | ||
| 414 | __unmap_hugepage_range(vma, start, end); | ||
| 415 | spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); | ||
| 416 | } | ||
| 417 | } | ||
| 418 | |||
| 401 | static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, | 419 | static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, |
| 402 | unsigned long address, pte_t *ptep, pte_t pte) | 420 | unsigned long address, pte_t *ptep, pte_t pte) |
| 403 | { | 421 | { |
| @@ -460,6 +478,9 @@ int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 460 | retry: | 478 | retry: |
| 461 | page = find_lock_page(mapping, idx); | 479 | page = find_lock_page(mapping, idx); |
| 462 | if (!page) { | 480 | if (!page) { |
| 481 | size = i_size_read(mapping->host) >> HPAGE_SHIFT; | ||
| 482 | if (idx >= size) | ||
| 483 | goto out; | ||
| 463 | if (hugetlb_get_quota(mapping)) | 484 | if (hugetlb_get_quota(mapping)) |
| 464 | goto out; | 485 | goto out; |
| 465 | page = alloc_huge_page(vma, address); | 486 | page = alloc_huge_page(vma, address); |
