aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h44
1 files changed, 36 insertions, 8 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c1b7414c7bef..d6777060449f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -177,6 +177,9 @@ extern unsigned int kobjsize(const void *objp);
177 */ 177 */
178#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 178#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
179 179
180/* This mask defines which mm->def_flags a process can inherit its parent */
181#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
182
180/* 183/*
181 * mapping from the currently active vm_flags protection bits (the 184 * mapping from the currently active vm_flags protection bits (the
182 * low four bits) to a page protection mask.. 185 * low four bits) to a page protection mask..
@@ -210,6 +213,10 @@ struct vm_fault {
210 * is set (which is also implied by 213 * is set (which is also implied by
211 * VM_FAULT_ERROR). 214 * VM_FAULT_ERROR).
212 */ 215 */
216 /* for ->map_pages() only */
217 pgoff_t max_pgoff; /* map pages for offset from pgoff till
218 * max_pgoff inclusive */
219 pte_t *pte; /* pte entry associated with ->pgoff */
213}; 220};
214 221
215/* 222/*
@@ -221,6 +228,7 @@ struct vm_operations_struct {
221 void (*open)(struct vm_area_struct * area); 228 void (*open)(struct vm_area_struct * area);
222 void (*close)(struct vm_area_struct * area); 229 void (*close)(struct vm_area_struct * area);
223 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); 230 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
231 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
224 232
225 /* notification that a previously read-only page is about to become 233 /* notification that a previously read-only page is about to become
226 * writable, if an error is returned it will cause a SIGBUS */ 234 * writable, if an error is returned it will cause a SIGBUS */
@@ -362,6 +370,8 @@ static inline int is_vmalloc_or_module_addr(const void *x)
362} 370}
363#endif 371#endif
364 372
373extern void kvfree(const void *addr);
374
365static inline void compound_lock(struct page *page) 375static inline void compound_lock(struct page *page)
366{ 376{
367#ifdef CONFIG_TRANSPARENT_HUGEPAGE 377#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -581,6 +591,9 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
581 pte = pte_mkwrite(pte); 591 pte = pte_mkwrite(pte);
582 return pte; 592 return pte;
583} 593}
594
595void do_set_pte(struct vm_area_struct *vma, unsigned long address,
596 struct page *page, pte_t *pte, bool write, bool anon);
584#endif 597#endif
585 598
586/* 599/*
@@ -684,7 +697,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
684#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 697#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
685#define NODES_MASK ((1UL << NODES_WIDTH) - 1) 698#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
686#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 699#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
687#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_WIDTH) - 1) 700#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
688#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 701#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
689 702
690static inline enum zone_type page_zonenum(const struct page *page) 703static inline enum zone_type page_zonenum(const struct page *page)
@@ -1041,6 +1054,14 @@ extern void show_free_areas(unsigned int flags);
1041extern bool skip_free_areas_node(unsigned int flags, int nid); 1054extern bool skip_free_areas_node(unsigned int flags, int nid);
1042 1055
1043int shmem_zero_setup(struct vm_area_struct *); 1056int shmem_zero_setup(struct vm_area_struct *);
1057#ifdef CONFIG_SHMEM
1058bool shmem_mapping(struct address_space *mapping);
1059#else
1060static inline bool shmem_mapping(struct address_space *mapping)
1061{
1062 return false;
1063}
1064#endif
1044 1065
1045extern int can_do_mlock(void); 1066extern int can_do_mlock(void);
1046extern int user_shm_lock(size_t, struct user_struct *); 1067extern int user_shm_lock(size_t, struct user_struct *);
@@ -1185,6 +1206,7 @@ void account_page_writeback(struct page *page);
1185int set_page_dirty(struct page *page); 1206int set_page_dirty(struct page *page);
1186int set_page_dirty_lock(struct page *page); 1207int set_page_dirty_lock(struct page *page);
1187int clear_page_dirty_for_io(struct page *page); 1208int clear_page_dirty_for_io(struct page *page);
1209int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1188 1210
1189/* Is the vma a continuation of the stack vma above it? */ 1211/* Is the vma a continuation of the stack vma above it? */
1190static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) 1212static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
@@ -1487,9 +1509,15 @@ static inline void pgtable_page_dtor(struct page *page)
1487 1509
1488#if USE_SPLIT_PMD_PTLOCKS 1510#if USE_SPLIT_PMD_PTLOCKS
1489 1511
1512static struct page *pmd_to_page(pmd_t *pmd)
1513{
1514 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1515 return virt_to_page((void *)((unsigned long) pmd & mask));
1516}
1517
1490static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 1518static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1491{ 1519{
1492 return ptlock_ptr(virt_to_page(pmd)); 1520 return ptlock_ptr(pmd_to_page(pmd));
1493} 1521}
1494 1522
1495static inline bool pgtable_pmd_page_ctor(struct page *page) 1523static inline bool pgtable_pmd_page_ctor(struct page *page)
@@ -1508,7 +1536,7 @@ static inline void pgtable_pmd_page_dtor(struct page *page)
1508 ptlock_free(page); 1536 ptlock_free(page);
1509} 1537}
1510 1538
1511#define pmd_huge_pte(mm, pmd) (virt_to_page(pmd)->pmd_huge_pte) 1539#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
1512 1540
1513#else 1541#else
1514 1542
@@ -1652,10 +1680,8 @@ static inline int __early_pfn_to_nid(unsigned long pfn)
1652#else 1680#else
1653/* please see mm/page_alloc.c */ 1681/* please see mm/page_alloc.c */
1654extern int __meminit early_pfn_to_nid(unsigned long pfn); 1682extern int __meminit early_pfn_to_nid(unsigned long pfn);
1655#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
1656/* there is a per-arch backend function. */ 1683/* there is a per-arch backend function. */
1657extern int __meminit __early_pfn_to_nid(unsigned long pfn); 1684extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1658#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
1659#endif 1685#endif
1660 1686
1661extern void set_dma_reserve(unsigned long new_dma_reserve); 1687extern void set_dma_reserve(unsigned long new_dma_reserve);
@@ -1750,6 +1776,9 @@ extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1750extern struct file *get_mm_exe_file(struct mm_struct *mm); 1776extern struct file *get_mm_exe_file(struct mm_struct *mm);
1751 1777
1752extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); 1778extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1779extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
1780 unsigned long addr, unsigned long len,
1781 unsigned long flags, struct page **pages);
1753extern int install_special_mapping(struct mm_struct *mm, 1782extern int install_special_mapping(struct mm_struct *mm,
1754 unsigned long addr, unsigned long len, 1783 unsigned long addr, unsigned long len,
1755 unsigned long flags, struct page **pages); 1784 unsigned long flags, struct page **pages);
@@ -1817,9 +1846,11 @@ vm_unmapped_area(struct vm_unmapped_area_info *info)
1817extern void truncate_inode_pages(struct address_space *, loff_t); 1846extern void truncate_inode_pages(struct address_space *, loff_t);
1818extern void truncate_inode_pages_range(struct address_space *, 1847extern void truncate_inode_pages_range(struct address_space *,
1819 loff_t lstart, loff_t lend); 1848 loff_t lstart, loff_t lend);
1849extern void truncate_inode_pages_final(struct address_space *);
1820 1850
1821/* generic vm_area_ops exported for stackable file systems */ 1851/* generic vm_area_ops exported for stackable file systems */
1822extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); 1852extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1853extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
1823extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 1854extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1824 1855
1825/* mm/page-writeback.c */ 1856/* mm/page-writeback.c */
@@ -1847,9 +1878,6 @@ void page_cache_async_readahead(struct address_space *mapping,
1847 unsigned long size); 1878 unsigned long size);
1848 1879
1849unsigned long max_sane_readahead(unsigned long nr); 1880unsigned long max_sane_readahead(unsigned long nr);
1850unsigned long ra_submit(struct file_ra_state *ra,
1851 struct address_space *mapping,
1852 struct file *filp);
1853 1881
1854/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 1882/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
1855extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 1883extern int expand_stack(struct vm_area_struct *vma, unsigned long address);