diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-28 17:34:23 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-28 17:34:23 -0500 |
commit | 6aab341e0a28aff100a09831c5300a2994b8b986 (patch) | |
tree | 1af3908275aa5e1b16e80efee554a9a7504c56d4 /fs/proc | |
parent | 458af5439fe7ae7d95ca14106844e61f0795166c (diff) |
mm: re-architect the VM_UNPAGED logic
This replaces the (in my opinion horrible) VM_UNMAPPED logic with very
explicit support for a "remapped page range" aka VM_PFNMAP. It allows a
VM area to contain an arbitrary range of page table entries that the VM
never touches, and never considers to be normal pages.
Any user of "remap_pfn_range()" automatically gets this new
functionality, and doesn't even have to mark the pages reserved or
indeed mark them any other way. It just works. As a side effect, doing
mmap() on /dev/mem works for arbitrary ranges.
Sparc update from David in the next commit.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/proc')
-rw-r--r-- | fs/proc/task_mmu.c | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 9ab97cef0daa..50bd5a8f0446 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -402,12 +402,11 @@ struct numa_maps { | |||
402 | /* | 402 | /* |
403 | * Calculate numa node maps for a vma | 403 | * Calculate numa node maps for a vma |
404 | */ | 404 | */ |
405 | static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma) | 405 | static struct numa_maps *get_numa_maps(struct vm_area_struct *vma) |
406 | { | 406 | { |
407 | int i; | ||
407 | struct page *page; | 408 | struct page *page; |
408 | unsigned long vaddr; | 409 | unsigned long vaddr; |
409 | struct mm_struct *mm = vma->vm_mm; | ||
410 | int i; | ||
411 | struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL); | 410 | struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL); |
412 | 411 | ||
413 | if (!md) | 412 | if (!md) |
@@ -420,7 +419,7 @@ static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma) | |||
420 | md->node[i] =0; | 419 | md->node[i] =0; |
421 | 420 | ||
422 | for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) { | 421 | for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) { |
423 | page = follow_page(mm, vaddr, 0); | 422 | page = follow_page(vma, vaddr, 0); |
424 | if (page) { | 423 | if (page) { |
425 | int count = page_mapcount(page); | 424 | int count = page_mapcount(page); |
426 | 425 | ||