aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAdam Litke <agl@us.ibm.com>2006-01-06 03:10:43 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 11:33:22 -0500
commit86e5216f8d8aa258ba836caffe2613d79cc9aead (patch)
tree33669c0194490700b575fceb0e5e010a4468a5fe
parent85ef47f74afe96c8c23eaa605f28cc01443c905f (diff)
[PATCH] Hugetlb: Reorganize hugetlb_fault to prepare for COW
This patch splits the "no_page()" type activity into its own function, hugetlb_no_page(). hugetlb_fault() becomes the entry point for hugetlb faults and delegates to the appropriate handler depending on the type of fault. Right now we still have only hugetlb_no_page() but a later patch introduces a COW fault. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Adam Litke <agl@us.ibm.com> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: "Seth, Rohit" <rohit.seth@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/hugetlb.c34
1 files changed, 25 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index fdbbbb90caa2..cf8225108b2f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -376,20 +376,15 @@ out:
376 return page; 376 return page;
377} 377}
378 378
379int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 379int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
380 unsigned long address, int write_access) 380 unsigned long address, pte_t *ptep)
381{ 381{
382 int ret = VM_FAULT_SIGBUS; 382 int ret = VM_FAULT_SIGBUS;
383 unsigned long idx; 383 unsigned long idx;
384 unsigned long size; 384 unsigned long size;
385 pte_t *pte;
386 struct page *page; 385 struct page *page;
387 struct address_space *mapping; 386 struct address_space *mapping;
388 387
389 pte = huge_pte_alloc(mm, address);
390 if (!pte)
391 goto out;
392
393 mapping = vma->vm_file->f_mapping; 388 mapping = vma->vm_file->f_mapping;
394 idx = ((address - vma->vm_start) >> HPAGE_SHIFT) 389 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
395 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); 390 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
@@ -408,11 +403,11 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
408 goto backout; 403 goto backout;
409 404
410 ret = VM_FAULT_MINOR; 405 ret = VM_FAULT_MINOR;
411 if (!pte_none(*pte)) 406 if (!pte_none(*ptep))
412 goto backout; 407 goto backout;
413 408
414 add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE); 409 add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
415 set_huge_pte_at(mm, address, pte, make_huge_pte(vma, page)); 410 set_huge_pte_at(mm, address, ptep, make_huge_pte(vma, page));
416 spin_unlock(&mm->page_table_lock); 411 spin_unlock(&mm->page_table_lock);
417 unlock_page(page); 412 unlock_page(page);
418out: 413out:
@@ -426,6 +421,27 @@ backout:
426 goto out; 421 goto out;
427} 422}
428 423
424int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
425 unsigned long address, int write_access)
426{
427 pte_t *ptep;
428 pte_t entry;
429
430 ptep = huge_pte_alloc(mm, address);
431 if (!ptep)
432 return VM_FAULT_OOM;
433
434 entry = *ptep;
435 if (pte_none(entry))
436 return hugetlb_no_page(mm, vma, address, ptep);
437
438 /*
439 * We could get here if another thread instantiated the pte
440 * before the test above.
441 */
442 return VM_FAULT_MINOR;
443}
444
429int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 445int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
430 struct page **pages, struct vm_area_struct **vmas, 446 struct page **pages, struct vm_area_struct **vmas,
431 unsigned long *position, int *length, int i) 447 unsigned long *position, int *length, int i)