aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-28 17:34:23 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-28 17:34:23 -0500
commit6aab341e0a28aff100a09831c5300a2994b8b986 (patch)
tree1af3908275aa5e1b16e80efee554a9a7504c56d4 /mm/mempolicy.c
parent458af5439fe7ae7d95ca14106844e61f0795166c (diff)
mm: re-architect the VM_UNPAGED logic
This replaces the (in my opinion horrible) VM_UNMAPPED logic with very explicit support for a "remapped page range" aka VM_PFNMAP. It allows a VM area to contain an arbitrary range of page table entries that the VM never touches, and never considers to be normal pages. Any user of "remap_pfn_range()" automatically gets this new functionality, and doesn't even have to mark the pages reserved or indeed mark them any other way. It just works. As a side effect, doing mmap() on /dev/mem works for arbitrary ranges. Sparc update from David in the next commit. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 5609a31bdf22..bec88c81244e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -189,17 +189,15 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
189 189
190 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 190 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
191 do { 191 do {
192 unsigned long pfn; 192 struct page *page;
193 unsigned int nid; 193 unsigned int nid;
194 194
195 if (!pte_present(*pte)) 195 if (!pte_present(*pte))
196 continue; 196 continue;
197 pfn = pte_pfn(*pte); 197 page = vm_normal_page(vma, addr, *pte);
198 if (!pfn_valid(pfn)) { 198 if (!page)
199 print_bad_pte(vma, *pte, addr);
200 continue; 199 continue;
201 } 200 nid = page_to_nid(page);
202 nid = pfn_to_nid(pfn);
203 if (!node_isset(nid, *nodes)) 201 if (!node_isset(nid, *nodes))
204 break; 202 break;
205 } while (pte++, addr += PAGE_SIZE, addr != end); 203 } while (pte++, addr += PAGE_SIZE, addr != end);
@@ -269,8 +267,6 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
269 first = find_vma(mm, start); 267 first = find_vma(mm, start);
270 if (!first) 268 if (!first)
271 return ERR_PTR(-EFAULT); 269 return ERR_PTR(-EFAULT);
272 if (first->vm_flags & VM_UNPAGED)
273 return ERR_PTR(-EACCES);
274 prev = NULL; 270 prev = NULL;
275 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { 271 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
276 if (!vma->vm_next && vma->vm_end < end) 272 if (!vma->vm_next && vma->vm_end < end)