diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-28 17:34:23 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-28 17:34:23 -0500 |
commit | 6aab341e0a28aff100a09831c5300a2994b8b986 (patch) | |
tree | 1af3908275aa5e1b16e80efee554a9a7504c56d4 /include/linux/mm.h | |
parent | 458af5439fe7ae7d95ca14106844e61f0795166c (diff) |
mm: re-architect the VM_UNPAGED logic
This replaces the (in my opinion horrible) VM_UNMAPPED logic with very
explicit support for a "remapped page range" aka VM_PFNMAP. It allows a
VM area to contain an arbitrary range of page table entries that the VM
never touches, and never considers to be normal pages.
Any user of "remap_pfn_range()" automatically gets this new
functionality, and doesn't even have to mark the pages reserved or
indeed mark them any other way. It just works. As a side effect, doing
mmap() on /dev/mem works for arbitrary ranges.
Sparc update from David in the next commit.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index f0cdfd18db55..6a75a7a78bf1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -145,7 +145,7 @@ extern unsigned int kobjsize(const void *objp); | |||
145 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ | 145 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ |
146 | #define VM_GROWSUP 0x00000200 | 146 | #define VM_GROWSUP 0x00000200 |
147 | #define VM_SHM 0x00000000 /* Means nothing: delete it later */ | 147 | #define VM_SHM 0x00000000 /* Means nothing: delete it later */ |
148 | #define VM_UNPAGED 0x00000400 /* Pages managed without map count */ | 148 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ |
149 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ | 149 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ |
150 | 150 | ||
151 | #define VM_EXECUTABLE 0x00001000 | 151 | #define VM_EXECUTABLE 0x00001000 |
@@ -664,6 +664,7 @@ struct zap_details { | |||
664 | unsigned long truncate_count; /* Compare vm_truncate_count */ | 664 | unsigned long truncate_count; /* Compare vm_truncate_count */ |
665 | }; | 665 | }; |
666 | 666 | ||
667 | struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t); | ||
667 | unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, | 668 | unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, |
668 | unsigned long size, struct zap_details *); | 669 | unsigned long size, struct zap_details *); |
669 | unsigned long unmap_vmas(struct mmu_gather **tlb, | 670 | unsigned long unmap_vmas(struct mmu_gather **tlb, |
@@ -953,7 +954,7 @@ unsigned long vmalloc_to_pfn(void *addr); | |||
953 | int remap_pfn_range(struct vm_area_struct *, unsigned long addr, | 954 | int remap_pfn_range(struct vm_area_struct *, unsigned long addr, |
954 | unsigned long pfn, unsigned long size, pgprot_t); | 955 | unsigned long pfn, unsigned long size, pgprot_t); |
955 | 956 | ||
956 | struct page *follow_page(struct mm_struct *, unsigned long address, | 957 | struct page *follow_page(struct vm_area_struct *, unsigned long address, |
957 | unsigned int foll_flags); | 958 | unsigned int foll_flags); |
958 | #define FOLL_WRITE 0x01 /* check pte is writable */ | 959 | #define FOLL_WRITE 0x01 /* check pte is writable */ |
959 | #define FOLL_TOUCH 0x02 /* mark page accessed */ | 960 | #define FOLL_TOUCH 0x02 /* mark page accessed */ |