diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-28 17:34:23 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-28 17:34:23 -0500 |
commit | 6aab341e0a28aff100a09831c5300a2994b8b986 (patch) | |
tree | 1af3908275aa5e1b16e80efee554a9a7504c56d4 /mm/rmap.c | |
parent | 458af5439fe7ae7d95ca14106844e61f0795166c (diff) |
mm: re-architect the VM_UNPAGED logic
This replaces the (in my opinion horrible) VM_UNMAPPED logic with very
explicit support for a "remapped page range" aka VM_PFNMAP. It allows a
VM area to contain an arbitrary range of page table entries that the VM
never touches, and never considers to be normal pages.
Any user of "remap_pfn_range()" automatically gets this new
functionality, and doesn't even have to mark the pages reserved or
indeed mark them any other way. It just works. As a side effect, doing
mmap() on /dev/mem works for arbitrary ranges.
Sparc update from David in the next commit.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 14 |
1 files changed, 2 insertions, 12 deletions
@@ -226,8 +226,6 @@ vma_address(struct page *page, struct vm_area_struct *vma) | |||
226 | /* | 226 | /* |
227 | * At what user virtual address is page expected in vma? checking that the | 227 | * At what user virtual address is page expected in vma? checking that the |
228 | * page matches the vma: currently only used on anon pages, by unuse_vma; | 228 | * page matches the vma: currently only used on anon pages, by unuse_vma; |
229 | * and by extraordinary checks on anon pages in VM_UNPAGED vmas, taking | ||
230 | * care that an mmap of /dev/mem might window free and foreign pages. | ||
231 | */ | 229 | */ |
232 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | 230 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) |
233 | { | 231 | { |
@@ -614,7 +612,6 @@ static void try_to_unmap_cluster(unsigned long cursor, | |||
614 | struct page *page; | 612 | struct page *page; |
615 | unsigned long address; | 613 | unsigned long address; |
616 | unsigned long end; | 614 | unsigned long end; |
617 | unsigned long pfn; | ||
618 | 615 | ||
619 | address = (vma->vm_start + cursor) & CLUSTER_MASK; | 616 | address = (vma->vm_start + cursor) & CLUSTER_MASK; |
620 | end = address + CLUSTER_SIZE; | 617 | end = address + CLUSTER_SIZE; |
@@ -643,15 +640,8 @@ static void try_to_unmap_cluster(unsigned long cursor, | |||
643 | for (; address < end; pte++, address += PAGE_SIZE) { | 640 | for (; address < end; pte++, address += PAGE_SIZE) { |
644 | if (!pte_present(*pte)) | 641 | if (!pte_present(*pte)) |
645 | continue; | 642 | continue; |
646 | 643 | page = vm_normal_page(vma, address, *pte); | |
647 | pfn = pte_pfn(*pte); | 644 | BUG_ON(!page || PageAnon(page)); |
648 | if (unlikely(!pfn_valid(pfn))) { | ||
649 | print_bad_pte(vma, *pte, address); | ||
650 | continue; | ||
651 | } | ||
652 | |||
653 | page = pfn_to_page(pfn); | ||
654 | BUG_ON(PageAnon(page)); | ||
655 | 645 | ||
656 | if (ptep_clear_flush_young(vma, address, pte)) | 646 | if (ptep_clear_flush_young(vma, address, pte)) |
657 | continue; | 647 | continue; |