aboutsummaryrefslogtreecommitdiffstats
path: root/mm/fremap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-28 17:34:23 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-28 17:34:23 -0500
commit6aab341e0a28aff100a09831c5300a2994b8b986 (patch)
tree1af3908275aa5e1b16e80efee554a9a7504c56d4 /mm/fremap.c
parent458af5439fe7ae7d95ca14106844e61f0795166c (diff)
mm: re-architect the VM_UNPAGED logic
This replaces the (in my opinion horrible) VM_UNMAPPED logic with very explicit support for a "remapped page range" aka VM_PFNMAP. It allows a VM area to contain an arbitrary range of page table entries that the VM never touches, and never considers to be normal pages. Any user of "remap_pfn_range()" automatically gets this new functionality, and doesn't even have to mark the pages reserved or indeed mark them any other way. It just works. As a side effect, doing mmap() on /dev/mem works for arbitrary ranges. Sparc update from David in the next commit. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/fremap.c')
-rw-r--r--mm/fremap.c22
1 files changed, 7 insertions, 15 deletions
diff --git a/mm/fremap.c b/mm/fremap.c
index 007cbad9331..f851775e09c 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -27,24 +27,20 @@ static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
27 struct page *page = NULL; 27 struct page *page = NULL;
28 28
29 if (pte_present(pte)) { 29 if (pte_present(pte)) {
30 unsigned long pfn = pte_pfn(pte); 30 flush_cache_page(vma, addr, pte_pfn(pte));
31 flush_cache_page(vma, addr, pfn);
32 pte = ptep_clear_flush(vma, addr, ptep); 31 pte = ptep_clear_flush(vma, addr, ptep);
33 if (unlikely(!pfn_valid(pfn))) { 32 page = vm_normal_page(vma, addr, pte);
34 print_bad_pte(vma, pte, addr); 33 if (page) {
35 goto out; 34 if (pte_dirty(pte))
35 set_page_dirty(page);
36 page_remove_rmap(page);
37 page_cache_release(page);
36 } 38 }
37 page = pfn_to_page(pfn);
38 if (pte_dirty(pte))
39 set_page_dirty(page);
40 page_remove_rmap(page);
41 page_cache_release(page);
42 } else { 39 } else {
43 if (!pte_file(pte)) 40 if (!pte_file(pte))
44 free_swap_and_cache(pte_to_swp_entry(pte)); 41 free_swap_and_cache(pte_to_swp_entry(pte));
45 pte_clear(mm, addr, ptep); 42 pte_clear(mm, addr, ptep);
46 } 43 }
47out:
48 return !!page; 44 return !!page;
49} 45}
50 46
@@ -65,8 +61,6 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
65 pte_t pte_val; 61 pte_t pte_val;
66 spinlock_t *ptl; 62 spinlock_t *ptl;
67 63
68 BUG_ON(vma->vm_flags & VM_UNPAGED);
69
70 pgd = pgd_offset(mm, addr); 64 pgd = pgd_offset(mm, addr);
71 pud = pud_alloc(mm, pgd, addr); 65 pud = pud_alloc(mm, pgd, addr);
72 if (!pud) 66 if (!pud)
@@ -122,8 +116,6 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
122 pte_t pte_val; 116 pte_t pte_val;
123 spinlock_t *ptl; 117 spinlock_t *ptl;
124 118
125 BUG_ON(vma->vm_flags & VM_UNPAGED);
126
127 pgd = pgd_offset(mm, addr); 119 pgd = pgd_offset(mm, addr);
128 pud = pud_alloc(mm, pgd, addr); 120 pud = pud_alloc(mm, pgd, addr);
129 if (!pud) 121 if (!pud)