diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-11-22 00:32:16 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-22 12:13:42 -0500 |
commit | 101d2be7646b7dd1c367d50208a59b29fce61398 (patch) | |
tree | 5b01c8a93a749cc776744c71c4f79e95ef5d6393 | |
parent | 0b14c179a483e71ea41df2aa4a661760063115bd (diff) |
[PATCH] unpaged: VM_NONLINEAR VM_RESERVED
There's one peculiar use of VM_RESERVED which the previous patch left behind:
because VM_NONLINEAR's try_to_unmap_cluster uses vm_private_data as a swapout
cursor, but should never meet VM_RESERVED vmas, it was a way of extending
VM_NONLINEAR to VM_RESERVED vmas using vm_private_data for some other purpose.
But that's an empty set - they don't have the populate function required. So
just throw away those VM_RESERVED tests.
But one more interesting in rmap.c has to go too: try_to_unmap_one will want
to swap out an anonymous page from VM_RESERVED or VM_UNPAGED area.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/fremap.c | 6 | ||||
-rw-r--r-- | mm/rmap.c | 15 |
2 files changed, 7 insertions, 14 deletions
diff --git a/mm/fremap.c b/mm/fremap.c index 94254c5d7a18..007cbad9331e 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -204,12 +204,10 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, | |||
204 | * Make sure the vma is shared, that it supports prefaulting, | 204 | * Make sure the vma is shared, that it supports prefaulting, |
205 | * and that the remapped range is valid and fully within | 205 | * and that the remapped range is valid and fully within |
206 | * the single existing vma. vm_private_data is used as a | 206 | * the single existing vma. vm_private_data is used as a |
207 | * swapout cursor in a VM_NONLINEAR vma (unless VM_RESERVED | 207 | * swapout cursor in a VM_NONLINEAR vma. |
208 | * or VM_LOCKED, but VM_LOCKED could be revoked later on). | ||
209 | */ | 208 | */ |
210 | if (vma && (vma->vm_flags & VM_SHARED) && | 209 | if (vma && (vma->vm_flags & VM_SHARED) && |
211 | (!vma->vm_private_data || | 210 | (!vma->vm_private_data || (vma->vm_flags & VM_NONLINEAR)) && |
212 | (vma->vm_flags & (VM_NONLINEAR|VM_RESERVED))) && | ||
213 | vma->vm_ops && vma->vm_ops->populate && | 211 | vma->vm_ops && vma->vm_ops->populate && |
214 | end > start && start >= vma->vm_start && | 212 | end > start && start >= vma->vm_start && |
215 | end <= vma->vm_end) { | 213 | end <= vma->vm_end) { |
@@ -529,10 +529,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) | |||
529 | * If the page is mlock()d, we cannot swap it out. | 529 | * If the page is mlock()d, we cannot swap it out. |
530 | * If it's recently referenced (perhaps page_referenced | 530 | * If it's recently referenced (perhaps page_referenced |
531 | * skipped over this mm) then we should reactivate it. | 531 | * skipped over this mm) then we should reactivate it. |
532 | * | ||
533 | * Pages belonging to VM_RESERVED regions should not happen here. | ||
534 | */ | 532 | */ |
535 | if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) || | 533 | if ((vma->vm_flags & VM_LOCKED) || |
536 | ptep_clear_flush_young(vma, address, pte)) { | 534 | ptep_clear_flush_young(vma, address, pte)) { |
537 | ret = SWAP_FAIL; | 535 | ret = SWAP_FAIL; |
538 | goto out_unmap; | 536 | goto out_unmap; |
@@ -727,7 +725,7 @@ static int try_to_unmap_file(struct page *page) | |||
727 | 725 | ||
728 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 726 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, |
729 | shared.vm_set.list) { | 727 | shared.vm_set.list) { |
730 | if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) | 728 | if (vma->vm_flags & VM_LOCKED) |
731 | continue; | 729 | continue; |
732 | cursor = (unsigned long) vma->vm_private_data; | 730 | cursor = (unsigned long) vma->vm_private_data; |
733 | if (cursor > max_nl_cursor) | 731 | if (cursor > max_nl_cursor) |
@@ -761,7 +759,7 @@ static int try_to_unmap_file(struct page *page) | |||
761 | do { | 759 | do { |
762 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 760 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, |
763 | shared.vm_set.list) { | 761 | shared.vm_set.list) { |
764 | if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) | 762 | if (vma->vm_flags & VM_LOCKED) |
765 | continue; | 763 | continue; |
766 | cursor = (unsigned long) vma->vm_private_data; | 764 | cursor = (unsigned long) vma->vm_private_data; |
767 | while ( cursor < max_nl_cursor && | 765 | while ( cursor < max_nl_cursor && |
@@ -783,11 +781,8 @@ static int try_to_unmap_file(struct page *page) | |||
783 | * in locked vmas). Reset cursor on all unreserved nonlinear | 781 | * in locked vmas). Reset cursor on all unreserved nonlinear |
784 | * vmas, now forgetting on which ones it had fallen behind. | 782 | * vmas, now forgetting on which ones it had fallen behind. |
785 | */ | 783 | */ |
786 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 784 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) |
787 | shared.vm_set.list) { | 785 | vma->vm_private_data = NULL; |
788 | if (!(vma->vm_flags & VM_RESERVED)) | ||
789 | vma->vm_private_data = NULL; | ||
790 | } | ||
791 | out: | 786 | out: |
792 | spin_unlock(&mapping->i_mmap_lock); | 787 | spin_unlock(&mapping->i_mmap_lock); |
793 | return ret; | 788 | return ret; |