diff options
Diffstat (limited to 'mm/hugetlb.c')
| -rw-r--r-- | mm/hugetlb.c | 22 | 
1 files changed, 22 insertions, 0 deletions
| diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a1b30d45459e..61d380678030 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -394,6 +394,28 @@ out: | |||
| 394 | return ret; | 394 | return ret; | 
| 395 | } | 395 | } | 
| 396 | 396 | ||
| 397 | /* | ||
| 398 | * On ia64 at least, it is possible to receive a hugetlb fault from a | ||
| 399 | * stale zero entry left in the TLB from earlier hardware prefetching. | ||
| 400 | * Low-level arch code should already have flushed the stale entry as | ||
| 401 | * part of its fault handling, but we do need to accept this minor fault | ||
| 402 | * and return successfully. Whereas the "normal" case is that this is | ||
| 403 | * an access to a hugetlb page which has been truncated off since mmap. | ||
| 404 | */ | ||
| 405 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | ||
| 406 | unsigned long address, int write_access) | ||
| 407 | { | ||
| 408 | int ret = VM_FAULT_SIGBUS; | ||
| 409 | pte_t *pte; | ||
| 410 | |||
| 411 | spin_lock(&mm->page_table_lock); | ||
| 412 | pte = huge_pte_offset(mm, address); | ||
| 413 | if (pte && !pte_none(*pte)) | ||
| 414 | ret = VM_FAULT_MINOR; | ||
| 415 | spin_unlock(&mm->page_table_lock); | ||
| 416 | return ret; | ||
| 417 | } | ||
| 418 | |||
| 397 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | 419 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | 
| 398 | struct page **pages, struct vm_area_struct **vmas, | 420 | struct page **pages, struct vm_area_struct **vmas, | 
| 399 | unsigned long *position, int *length, int i) | 421 | unsigned long *position, int *length, int i) | 
