aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-28 17:34:23 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-28 17:34:23 -0500
commit6aab341e0a28aff100a09831c5300a2994b8b986 (patch)
tree1af3908275aa5e1b16e80efee554a9a7504c56d4 /include/linux/mm.h
parent458af5439fe7ae7d95ca14106844e61f0795166c (diff)
mm: re-architect the VM_UNPAGED logic
This replaces the (in my opinion horrible) VM_UNMAPPED logic with very explicit support for a "remapped page range" aka VM_PFNMAP. It allows a VM area to contain an arbitrary range of page table entries that the VM never touches, and never considers to be normal pages. Any user of "remap_pfn_range()" automatically gets this new functionality, and doesn't even have to mark the pages reserved or indeed mark them any other way. It just works. As a side effect, doing mmap() on /dev/mem works for arbitrary ranges. Sparc update from David in the next commit. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f0cdfd18db55..6a75a7a78bf1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -145,7 +145,7 @@ extern unsigned int kobjsize(const void *objp);
145#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 145#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
146#define VM_GROWSUP 0x00000200 146#define VM_GROWSUP 0x00000200
147#define VM_SHM 0x00000000 /* Means nothing: delete it later */ 147#define VM_SHM 0x00000000 /* Means nothing: delete it later */
148#define VM_UNPAGED 0x00000400 /* Pages managed without map count */ 148#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
149#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 149#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
150 150
151#define VM_EXECUTABLE 0x00001000 151#define VM_EXECUTABLE 0x00001000
@@ -664,6 +664,7 @@ struct zap_details {
664 unsigned long truncate_count; /* Compare vm_truncate_count */ 664 unsigned long truncate_count; /* Compare vm_truncate_count */
665}; 665};
666 666
667struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t);
667unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 668unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
668 unsigned long size, struct zap_details *); 669 unsigned long size, struct zap_details *);
669unsigned long unmap_vmas(struct mmu_gather **tlb, 670unsigned long unmap_vmas(struct mmu_gather **tlb,
@@ -953,7 +954,7 @@ unsigned long vmalloc_to_pfn(void *addr);
953int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 954int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
954 unsigned long pfn, unsigned long size, pgprot_t); 955 unsigned long pfn, unsigned long size, pgprot_t);
955 956
956struct page *follow_page(struct mm_struct *, unsigned long address, 957struct page *follow_page(struct vm_area_struct *, unsigned long address,
957 unsigned int foll_flags); 958 unsigned int foll_flags);
958#define FOLL_WRITE 0x01 /* check pte is writable */ 959#define FOLL_WRITE 0x01 /* check pte is writable */
959#define FOLL_TOUCH 0x02 /* mark page accessed */ 960#define FOLL_TOUCH 0x02 /* mark page accessed */