diff options
| author | Jeff Garzik <jgarzik@pobox.com> | 2005-09-08 05:43:49 -0400 |
|---|---|---|
| committer | Jeff Garzik <jgarzik@pobox.com> | 2005-09-08 05:43:49 -0400 |
| commit | 1d6ae775d7a948c9575658eb41184fd2e506c0df (patch) | |
| tree | 8128a28e89d82f13bb8e3a2160382240c66e2816 /mm/memory.c | |
| parent | 739cdbf1d8f0739b80035b80d69d871e33749b86 (diff) | |
| parent | caf39e87cc1182f7dae84eefc43ca14d54c78ef9 (diff) | |
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'mm/memory.c')
| -rw-r--r-- | mm/memory.c | 18 |
1 files changed, 15 insertions, 3 deletions
diff --git a/mm/memory.c b/mm/memory.c index e046b7e4b530..788a62810340 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -498,6 +498,17 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
| 498 | unsigned long addr = vma->vm_start; | 498 | unsigned long addr = vma->vm_start; |
| 499 | unsigned long end = vma->vm_end; | 499 | unsigned long end = vma->vm_end; |
| 500 | 500 | ||
| 501 | /* | ||
| 502 | * Don't copy ptes where a page fault will fill them correctly. | ||
| 503 | * Fork becomes much lighter when there are big shared or private | ||
| 504 | * readonly mappings. The tradeoff is that copy_page_range is more | ||
| 505 | * efficient than faulting. | ||
| 506 | */ | ||
| 507 | if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) { | ||
| 508 | if (!vma->anon_vma) | ||
| 509 | return 0; | ||
| 510 | } | ||
| 511 | |||
| 501 | if (is_vm_hugetlb_page(vma)) | 512 | if (is_vm_hugetlb_page(vma)) |
| 502 | return copy_hugetlb_page_range(dst_mm, src_mm, vma); | 513 | return copy_hugetlb_page_range(dst_mm, src_mm, vma); |
| 503 | 514 | ||
| @@ -551,7 +562,8 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd, | |||
| 551 | page->index > details->last_index)) | 562 | page->index > details->last_index)) |
| 552 | continue; | 563 | continue; |
| 553 | } | 564 | } |
| 554 | ptent = ptep_get_and_clear(tlb->mm, addr, pte); | 565 | ptent = ptep_get_and_clear_full(tlb->mm, addr, pte, |
| 566 | tlb->fullmm); | ||
| 555 | tlb_remove_tlb_entry(tlb, pte, addr); | 567 | tlb_remove_tlb_entry(tlb, pte, addr); |
| 556 | if (unlikely(!page)) | 568 | if (unlikely(!page)) |
| 557 | continue; | 569 | continue; |
| @@ -579,7 +591,7 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd, | |||
| 579 | continue; | 591 | continue; |
| 580 | if (!pte_file(ptent)) | 592 | if (!pte_file(ptent)) |
| 581 | free_swap_and_cache(pte_to_swp_entry(ptent)); | 593 | free_swap_and_cache(pte_to_swp_entry(ptent)); |
| 582 | pte_clear(tlb->mm, addr, pte); | 594 | pte_clear_full(tlb->mm, addr, pte, tlb->fullmm); |
| 583 | } while (pte++, addr += PAGE_SIZE, addr != end); | 595 | } while (pte++, addr += PAGE_SIZE, addr != end); |
| 584 | pte_unmap(pte - 1); | 596 | pte_unmap(pte - 1); |
| 585 | } | 597 | } |
| @@ -1944,7 +1956,7 @@ static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma, | |||
| 1944 | * Fall back to the linear mapping if the fs does not support | 1956 | * Fall back to the linear mapping if the fs does not support |
| 1945 | * ->populate: | 1957 | * ->populate: |
| 1946 | */ | 1958 | */ |
| 1947 | if (!vma->vm_ops || !vma->vm_ops->populate || | 1959 | if (!vma->vm_ops->populate || |
| 1948 | (write_access && !(vma->vm_flags & VM_SHARED))) { | 1960 | (write_access && !(vma->vm_flags & VM_SHARED))) { |
| 1949 | pte_clear(mm, address, pte); | 1961 | pte_clear(mm, address, pte); |
| 1950 | return do_no_page(mm, vma, address, write_access, pte, pmd); | 1962 | return do_no_page(mm, vma, address, write_access, pte, pmd); |
