aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h97
1 files changed, 11 insertions, 86 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1692dd6cb91..7e87e1b1662 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -50,69 +50,6 @@ extern int sysctl_legacy_va_layout;
50 * mmap() functions). 50 * mmap() functions).
51 */ 51 */
52 52
53/*
54 * This struct defines a memory VMM memory area. There is one of these
55 * per VM-area/task. A VM area is any part of the process virtual memory
56 * space that has a special rule for the page-fault handlers (ie a shared
57 * library, the executable area etc).
58 */
59struct vm_area_struct {
60 struct mm_struct * vm_mm; /* The address space we belong to. */
61 unsigned long vm_start; /* Our start address within vm_mm. */
62 unsigned long vm_end; /* The first byte after our end address
63 within vm_mm. */
64
65 /* linked list of VM areas per task, sorted by address */
66 struct vm_area_struct *vm_next;
67
68 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
69 unsigned long vm_flags; /* Flags, listed below. */
70
71 struct rb_node vm_rb;
72
73 /*
74 * For areas with an address space and backing store,
75 * linkage into the address_space->i_mmap prio tree, or
76 * linkage to the list of like vmas hanging off its node, or
77 * linkage of vma in the address_space->i_mmap_nonlinear list.
78 */
79 union {
80 struct {
81 struct list_head list;
82 void *parent; /* aligns with prio_tree_node parent */
83 struct vm_area_struct *head;
84 } vm_set;
85
86 struct raw_prio_tree_node prio_tree_node;
87 } shared;
88
89 /*
90 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
91 * list, after a COW of one of the file pages. A MAP_SHARED vma
92 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
93 * or brk vma (with NULL file) can only be in an anon_vma list.
94 */
95 struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
96 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
97
98 /* Function pointers to deal with this struct. */
99 struct vm_operations_struct * vm_ops;
100
101 /* Information about our backing store: */
102 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
103 units, *not* PAGE_CACHE_SIZE */
104 struct file * vm_file; /* File we map to (can be NULL). */
105 void * vm_private_data; /* was vm_pte (shared mem) */
106 unsigned long vm_truncate_count;/* truncate_count or restart_addr */
107
108#ifndef CONFIG_MMU
109 atomic_t vm_usage; /* refcount (VMAs shared if !MMU) */
110#endif
111#ifdef CONFIG_NUMA
112 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
113#endif
114};
115
116extern struct kmem_cache *vm_area_cachep; 53extern struct kmem_cache *vm_area_cachep;
117 54
118/* 55/*
@@ -631,10 +568,6 @@ static inline struct address_space *page_mapping(struct page *page)
631 VM_BUG_ON(PageSlab(page)); 568 VM_BUG_ON(PageSlab(page));
632 if (unlikely(PageSwapCache(page))) 569 if (unlikely(PageSwapCache(page)))
633 mapping = &swapper_space; 570 mapping = &swapper_space;
634#ifdef CONFIG_SLUB
635 else if (unlikely(PageSlab(page)))
636 mapping = NULL;
637#endif
638 else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) 571 else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
639 mapping = NULL; 572 mapping = NULL;
640 return mapping; 573 return mapping;
@@ -715,9 +648,6 @@ static inline int page_mapped(struct page *page)
715extern void show_free_areas(void); 648extern void show_free_areas(void);
716 649
717#ifdef CONFIG_SHMEM 650#ifdef CONFIG_SHMEM
718int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new);
719struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
720 unsigned long addr);
721int shmem_lock(struct file *file, int lock, struct user_struct *user); 651int shmem_lock(struct file *file, int lock, struct user_struct *user);
722#else 652#else
723static inline int shmem_lock(struct file *file, int lock, 653static inline int shmem_lock(struct file *file, int lock,
@@ -725,18 +655,6 @@ static inline int shmem_lock(struct file *file, int lock,
725{ 655{
726 return 0; 656 return 0;
727} 657}
728
729static inline int shmem_set_policy(struct vm_area_struct *vma,
730 struct mempolicy *new)
731{
732 return 0;
733}
734
735static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
736 unsigned long addr)
737{
738 return NULL;
739}
740#endif 658#endif
741struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); 659struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
742 660
@@ -779,8 +697,6 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma,
779 unsigned long floor, unsigned long ceiling); 697 unsigned long floor, unsigned long ceiling);
780int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 698int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
781 struct vm_area_struct *vma); 699 struct vm_area_struct *vma);
782int zeromap_page_range(struct vm_area_struct *vma, unsigned long from,
783 unsigned long size, pgprot_t prot);
784void unmap_mapping_range(struct address_space *mapping, 700void unmap_mapping_range(struct address_space *mapping,
785 loff_t const holebegin, loff_t const holelen, int even_cows); 701 loff_t const holebegin, loff_t const holelen, int even_cows);
786 702
@@ -1106,8 +1022,6 @@ int write_one_page(struct page *page, int wait);
1106/* readahead.c */ 1022/* readahead.c */
1107#define VM_MAX_READAHEAD 128 /* kbytes */ 1023#define VM_MAX_READAHEAD 128 /* kbytes */
1108#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ 1024#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
1109#define VM_MAX_CACHE_HIT 256 /* max pages in a row in cache before
1110 * turning readahead off */
1111 1025
1112int do_page_cache_readahead(struct address_space *mapping, struct file *filp, 1026int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
1113 pgoff_t offset, unsigned long nr_to_read); 1027 pgoff_t offset, unsigned long nr_to_read);
@@ -1218,5 +1132,16 @@ extern int randomize_va_space;
1218 1132
1219const char * arch_vma_name(struct vm_area_struct *vma); 1133const char * arch_vma_name(struct vm_area_struct *vma);
1220 1134
1135struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
1136pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
1137pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
1138pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
1139pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
1140void *vmemmap_alloc_block(unsigned long size, int node);
1141void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
1142int vmemmap_populate_basepages(struct page *start_page,
1143 unsigned long pages, int node);
1144int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1145
1221#endif /* __KERNEL__ */ 1146#endif /* __KERNEL__ */
1222#endif /* _LINUX_MM_H */ 1147#endif /* _LINUX_MM_H */