diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 34 |
1 files changed, 25 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index fdbbbb90caa2..cf8225108b2f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -376,20 +376,15 @@ out: | |||
376 | return page; | 376 | return page; |
377 | } | 377 | } |
378 | 378 | ||
379 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 379 | int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, |
380 | unsigned long address, int write_access) | 380 | unsigned long address, pte_t *ptep) |
381 | { | 381 | { |
382 | int ret = VM_FAULT_SIGBUS; | 382 | int ret = VM_FAULT_SIGBUS; |
383 | unsigned long idx; | 383 | unsigned long idx; |
384 | unsigned long size; | 384 | unsigned long size; |
385 | pte_t *pte; | ||
386 | struct page *page; | 385 | struct page *page; |
387 | struct address_space *mapping; | 386 | struct address_space *mapping; |
388 | 387 | ||
389 | pte = huge_pte_alloc(mm, address); | ||
390 | if (!pte) | ||
391 | goto out; | ||
392 | |||
393 | mapping = vma->vm_file->f_mapping; | 388 | mapping = vma->vm_file->f_mapping; |
394 | idx = ((address - vma->vm_start) >> HPAGE_SHIFT) | 389 | idx = ((address - vma->vm_start) >> HPAGE_SHIFT) |
395 | + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); | 390 | + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); |
@@ -408,11 +403,11 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
408 | goto backout; | 403 | goto backout; |
409 | 404 | ||
410 | ret = VM_FAULT_MINOR; | 405 | ret = VM_FAULT_MINOR; |
411 | if (!pte_none(*pte)) | 406 | if (!pte_none(*ptep)) |
412 | goto backout; | 407 | goto backout; |
413 | 408 | ||
414 | add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE); | 409 | add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE); |
415 | set_huge_pte_at(mm, address, pte, make_huge_pte(vma, page)); | 410 | set_huge_pte_at(mm, address, ptep, make_huge_pte(vma, page)); |
416 | spin_unlock(&mm->page_table_lock); | 411 | spin_unlock(&mm->page_table_lock); |
417 | unlock_page(page); | 412 | unlock_page(page); |
418 | out: | 413 | out: |
@@ -426,6 +421,27 @@ backout: | |||
426 | goto out; | 421 | goto out; |
427 | } | 422 | } |
428 | 423 | ||
424 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | ||
425 | unsigned long address, int write_access) | ||
426 | { | ||
427 | pte_t *ptep; | ||
428 | pte_t entry; | ||
429 | |||
430 | ptep = huge_pte_alloc(mm, address); | ||
431 | if (!ptep) | ||
432 | return VM_FAULT_OOM; | ||
433 | |||
434 | entry = *ptep; | ||
435 | if (pte_none(entry)) | ||
436 | return hugetlb_no_page(mm, vma, address, ptep); | ||
437 | |||
438 | /* | ||
439 | * We could get here if another thread instantiated the pte | ||
440 | * before the test above. | ||
441 | */ | ||
442 | return VM_FAULT_MINOR; | ||
443 | } | ||
444 | |||
429 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | 445 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, |
430 | struct page **pages, struct vm_area_struct **vmas, | 446 | struct page **pages, struct vm_area_struct **vmas, |
431 | unsigned long *position, int *length, int i) | 447 | unsigned long *position, int *length, int i) |