aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h66
1 files changed, 35 insertions, 31 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d006e93d5c93..24c395694f4d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -25,6 +25,7 @@ extern unsigned long max_mapnr;
25#endif 25#endif
26 26
27extern unsigned long num_physpages; 27extern unsigned long num_physpages;
28extern unsigned long totalram_pages;
28extern void * high_memory; 29extern void * high_memory;
29extern int page_cluster; 30extern int page_cluster;
30 31
@@ -34,8 +35,6 @@ extern int sysctl_legacy_va_layout;
34#define sysctl_legacy_va_layout 0 35#define sysctl_legacy_va_layout 0
35#endif 36#endif
36 37
37extern unsigned long mmap_min_addr;
38
39#include <asm/page.h> 38#include <asm/page.h>
40#include <asm/pgtable.h> 39#include <asm/pgtable.h>
41#include <asm/processor.h> 40#include <asm/processor.h>
@@ -105,6 +104,7 @@ extern unsigned int kobjsize(const void *objp);
105#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 104#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
106#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ 105#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
107#define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ 106#define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
107#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
108 108
109#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 109#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
110#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 110#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
@@ -285,6 +285,14 @@ static inline int is_vmalloc_addr(const void *x)
285 return 0; 285 return 0;
286#endif 286#endif
287} 287}
288#ifdef CONFIG_MMU
289extern int is_vmalloc_or_module_addr(const void *x);
290#else
291static inline int is_vmalloc_or_module_addr(const void *x)
292{
293 return 0;
294}
295#endif
288 296
289static inline struct page *compound_head(struct page *page) 297static inline struct page *compound_head(struct page *page)
290{ 298{
@@ -575,19 +583,6 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
575} 583}
576 584
577/* 585/*
578 * If a hint addr is less than mmap_min_addr change hint to be as
579 * low as possible but still greater than mmap_min_addr
580 */
581static inline unsigned long round_hint_to_min(unsigned long hint)
582{
583 hint &= PAGE_MASK;
584 if (((void *)hint != NULL) &&
585 (hint < mmap_min_addr))
586 return PAGE_ALIGN(mmap_min_addr);
587 return hint;
588}
589
590/*
591 * Some inline functions in vmstat.h depend on page_zone() 586 * Some inline functions in vmstat.h depend on page_zone()
592 */ 587 */
593#include <linux/vmstat.h> 588#include <linux/vmstat.h>
@@ -700,11 +695,12 @@ static inline int page_mapped(struct page *page)
700#define VM_FAULT_SIGBUS 0x0002 695#define VM_FAULT_SIGBUS 0x0002
701#define VM_FAULT_MAJOR 0x0004 696#define VM_FAULT_MAJOR 0x0004
702#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ 697#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
698#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned page */
703 699
704#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ 700#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
705#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ 701#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
706 702
707#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS) 703#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON)
708 704
709/* 705/*
710 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. 706 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
@@ -715,17 +711,8 @@ extern void pagefault_out_of_memory(void);
715 711
716extern void show_free_areas(void); 712extern void show_free_areas(void);
717 713
718#ifdef CONFIG_SHMEM 714int shmem_lock(struct file *file, int lock, struct user_struct *user);
719extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
720#else
721static inline int shmem_lock(struct file *file, int lock,
722 struct user_struct *user)
723{
724 return 0;
725}
726#endif
727struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); 715struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
728
729int shmem_zero_setup(struct vm_area_struct *); 716int shmem_zero_setup(struct vm_area_struct *);
730 717
731#ifndef CONFIG_MMU 718#ifndef CONFIG_MMU
@@ -805,8 +792,14 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
805 unmap_mapping_range(mapping, holebegin, holelen, 0); 792 unmap_mapping_range(mapping, holebegin, holelen, 0);
806} 793}
807 794
808extern int vmtruncate(struct inode * inode, loff_t offset); 795extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
809extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); 796extern int vmtruncate(struct inode *inode, loff_t offset);
797extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
798
799int truncate_inode_page(struct address_space *mapping, struct page *page);
800int generic_error_remove_page(struct address_space *mapping, struct page *page);
801
802int invalidate_inode_page(struct page *page);
810 803
811#ifdef CONFIG_MMU 804#ifdef CONFIG_MMU
812extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 805extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -826,10 +819,11 @@ extern int make_pages_present(unsigned long addr, unsigned long end);
826extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 819extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
827 820
828int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 821int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
829 unsigned long start, int len, int write, int force, 822 unsigned long start, int nr_pages, int write, int force,
830 struct page **pages, struct vm_area_struct **vmas); 823 struct page **pages, struct vm_area_struct **vmas);
831int get_user_pages_fast(unsigned long start, int nr_pages, int write, 824int get_user_pages_fast(unsigned long start, int nr_pages, int write,
832 struct page **pages); 825 struct page **pages);
826struct page *get_dump_page(unsigned long addr);
833 827
834extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 828extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
835extern void do_invalidatepage(struct page *page, unsigned long offset); 829extern void do_invalidatepage(struct page *page, unsigned long offset);
@@ -1073,6 +1067,8 @@ extern void setup_per_cpu_pageset(void);
1073static inline void setup_per_cpu_pageset(void) {} 1067static inline void setup_per_cpu_pageset(void) {}
1074#endif 1068#endif
1075 1069
1070extern void zone_pcp_update(struct zone *zone);
1071
1076/* nommu.c */ 1072/* nommu.c */
1077extern atomic_long_t mmap_pages_allocated; 1073extern atomic_long_t mmap_pages_allocated;
1078 1074
@@ -1241,7 +1237,8 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
1241#define FOLL_WRITE 0x01 /* check pte is writable */ 1237#define FOLL_WRITE 0x01 /* check pte is writable */
1242#define FOLL_TOUCH 0x02 /* mark page accessed */ 1238#define FOLL_TOUCH 0x02 /* mark page accessed */
1243#define FOLL_GET 0x04 /* do get_page on page */ 1239#define FOLL_GET 0x04 /* do get_page on page */
1244#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */ 1240#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
1241#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
1245 1242
1246typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 1243typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1247 void *data); 1244 void *data);
@@ -1289,7 +1286,7 @@ int in_gate_area_no_task(unsigned long addr);
1289#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);}) 1286#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);})
1290#endif /* __HAVE_ARCH_GATE_AREA */ 1287#endif /* __HAVE_ARCH_GATE_AREA */
1291 1288
1292int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, 1289int drop_caches_sysctl_handler(struct ctl_table *, int,
1293 void __user *, size_t *, loff_t *); 1290 void __user *, size_t *, loff_t *);
1294unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, 1291unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
1295 unsigned long lru_pages); 1292 unsigned long lru_pages);
@@ -1318,5 +1315,12 @@ void vmemmap_populate_print_last(void);
1318extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, 1315extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
1319 size_t size); 1316 size_t size);
1320extern void refund_locked_memory(struct mm_struct *mm, size_t size); 1317extern void refund_locked_memory(struct mm_struct *mm, size_t size);
1318
1319extern void memory_failure(unsigned long pfn, int trapno);
1320extern int __memory_failure(unsigned long pfn, int trapno, int ref);
1321extern int sysctl_memory_failure_early_kill;
1322extern int sysctl_memory_failure_recovery;
1323extern atomic_long_t mce_bad_pages;
1324
1321#endif /* __KERNEL__ */ 1325#endif /* __KERNEL__ */
1322#endif /* _LINUX_MM_H */ 1326#endif /* _LINUX_MM_H */