diff options
| author | Dave Airlie <airlied@starflyer.(none)> | 2005-10-22 01:24:35 -0400 |
|---|---|---|
| committer | Dave Airlie <airlied@linux.ie> | 2005-10-22 01:24:35 -0400 |
| commit | 23bfc1a339e98510f2ce25a2764a0cfe195faa9e (patch) | |
| tree | 51652ad15f85d9d1367ae6f9b8641dfe46b4c501 /mm | |
| parent | 312f5726055534be1dc9dd369be13aabd2943fcb (diff) | |
| parent | 63172cb3d5ef762dcb60a292bc7f016b85cf6e1f (diff) | |
merge linus head to drm-mm branch
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/hugetlb.c | 22 | ||||
| -rw-r--r-- | mm/memory.c | 14 |
2 files changed, 24 insertions, 12 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a1b30d45459e..61d380678030 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -394,6 +394,28 @@ out: | |||
| 394 | return ret; | 394 | return ret; |
| 395 | } | 395 | } |
| 396 | 396 | ||
| 397 | /* | ||
| 398 | * On ia64 at least, it is possible to receive a hugetlb fault from a | ||
| 399 | * stale zero entry left in the TLB from earlier hardware prefetching. | ||
| 400 | * Low-level arch code should already have flushed the stale entry as | ||
| 401 | * part of its fault handling, but we do need to accept this minor fault | ||
| 402 | * and return successfully. Whereas the "normal" case is that this is | ||
| 403 | * an access to a hugetlb page which has been truncated off since mmap. | ||
| 404 | */ | ||
| 405 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | ||
| 406 | unsigned long address, int write_access) | ||
| 407 | { | ||
| 408 | int ret = VM_FAULT_SIGBUS; | ||
| 409 | pte_t *pte; | ||
| 410 | |||
| 411 | spin_lock(&mm->page_table_lock); | ||
| 412 | pte = huge_pte_offset(mm, address); | ||
| 413 | if (pte && !pte_none(*pte)) | ||
| 414 | ret = VM_FAULT_MINOR; | ||
| 415 | spin_unlock(&mm->page_table_lock); | ||
| 416 | return ret; | ||
| 417 | } | ||
| 418 | |||
| 397 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | 419 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 398 | struct page **pages, struct vm_area_struct **vmas, | 420 | struct page **pages, struct vm_area_struct **vmas, |
| 399 | unsigned long *position, int *length, int i) | 421 | unsigned long *position, int *length, int i) |
diff --git a/mm/memory.c b/mm/memory.c index 8c88b973abc5..1db40e935e55 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -2045,18 +2045,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma, | |||
| 2045 | 2045 | ||
| 2046 | inc_page_state(pgfault); | 2046 | inc_page_state(pgfault); |
| 2047 | 2047 | ||
| 2048 | if (unlikely(is_vm_hugetlb_page(vma))) { | 2048 | if (unlikely(is_vm_hugetlb_page(vma))) |
| 2049 | if (valid_hugetlb_file_off(vma, address)) | 2049 | return hugetlb_fault(mm, vma, address, write_access); |
| 2050 | /* We get here only if there was a stale(zero) TLB entry | ||
| 2051 | * (because of HW prefetching). | ||
| 2052 | * Low-level arch code (if needed) should have already | ||
| 2053 | * purged the stale entry as part of this fault handling. | ||
| 2054 | * Here we just return. | ||
| 2055 | */ | ||
| 2056 | return VM_FAULT_MINOR; | ||
| 2057 | else | ||
| 2058 | return VM_FAULT_SIGBUS; /* mapping truncation does this. */ | ||
| 2059 | } | ||
| 2060 | 2050 | ||
| 2061 | /* | 2051 | /* |
| 2062 | * We need the page table lock to synchronize with kswapd | 2052 | * We need the page table lock to synchronize with kswapd |
