aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h45
1 files changed, 25 insertions, 20 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 17b27cd269c4..d8738a464b94 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -6,6 +6,7 @@
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7 7
8#include <linux/gfp.h> 8#include <linux/gfp.h>
9#include <linux/bug.h>
9#include <linux/list.h> 10#include <linux/list.h>
10#include <linux/mmzone.h> 11#include <linux/mmzone.h>
11#include <linux/rbtree.h> 12#include <linux/rbtree.h>
@@ -111,7 +112,7 @@ extern unsigned int kobjsize(const void *objp);
111#define VM_HUGEPAGE 0x01000000 /* MADV_HUGEPAGE marked this vma */ 112#define VM_HUGEPAGE 0x01000000 /* MADV_HUGEPAGE marked this vma */
112#endif 113#endif
113#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ 114#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
114#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ 115#define VM_NODUMP 0x04000000 /* Do not include in the core dump */
115 116
116#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ 117#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
117#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 118#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
@@ -893,9 +894,9 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
893 894
894int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 895int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
895 unsigned long size); 896 unsigned long size);
896unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 897void zap_page_range(struct vm_area_struct *vma, unsigned long address,
897 unsigned long size, struct zap_details *); 898 unsigned long size, struct zap_details *);
898unsigned long unmap_vmas(struct mmu_gather *tlb, 899void unmap_vmas(struct mmu_gather *tlb,
899 struct vm_area_struct *start_vma, unsigned long start_addr, 900 struct vm_area_struct *start_vma, unsigned long start_addr,
900 unsigned long end_addr, unsigned long *nr_accounted, 901 unsigned long end_addr, unsigned long *nr_accounted,
901 struct zap_details *); 902 struct zap_details *);
@@ -953,7 +954,7 @@ extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
953extern void truncate_setsize(struct inode *inode, loff_t newsize); 954extern void truncate_setsize(struct inode *inode, loff_t newsize);
954extern int vmtruncate(struct inode *inode, loff_t offset); 955extern int vmtruncate(struct inode *inode, loff_t offset);
955extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end); 956extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
956 957void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
957int truncate_inode_page(struct address_space *mapping, struct page *page); 958int truncate_inode_page(struct address_space *mapping, struct page *page);
958int generic_error_remove_page(struct address_space *mapping, struct page *page); 959int generic_error_remove_page(struct address_space *mapping, struct page *page);
959 960
@@ -1040,6 +1041,9 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
1040 !vma_growsup(vma->vm_next, addr); 1041 !vma_growsup(vma->vm_next, addr);
1041} 1042}
1042 1043
1044extern pid_t
1045vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
1046
1043extern unsigned long move_page_tables(struct vm_area_struct *vma, 1047extern unsigned long move_page_tables(struct vm_area_struct *vma,
1044 unsigned long old_addr, struct vm_area_struct *new_vma, 1048 unsigned long old_addr, struct vm_area_struct *new_vma,
1045 unsigned long new_addr, unsigned long len); 1049 unsigned long new_addr, unsigned long len);
@@ -1058,19 +1062,20 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1058/* 1062/*
1059 * per-process(per-mm_struct) statistics. 1063 * per-process(per-mm_struct) statistics.
1060 */ 1064 */
1061static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
1062{
1063 atomic_long_set(&mm->rss_stat.count[member], value);
1064}
1065
1066#if defined(SPLIT_RSS_COUNTING)
1067unsigned long get_mm_counter(struct mm_struct *mm, int member);
1068#else
1069static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 1065static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1070{ 1066{
1071 return atomic_long_read(&mm->rss_stat.count[member]); 1067 long val = atomic_long_read(&mm->rss_stat.count[member]);
1072} 1068
1069#ifdef SPLIT_RSS_COUNTING
1070 /*
1071 * counter is updated in asynchronous manner and may go to minus.
1072 * But it's never be expected number for users.
1073 */
1074 if (val < 0)
1075 val = 0;
1073#endif 1076#endif
1077 return (unsigned long)val;
1078}
1074 1079
1075static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 1080static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1076{ 1081{
@@ -1127,9 +1132,9 @@ static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1127} 1132}
1128 1133
1129#if defined(SPLIT_RSS_COUNTING) 1134#if defined(SPLIT_RSS_COUNTING)
1130void sync_mm_rss(struct task_struct *task, struct mm_struct *mm); 1135void sync_mm_rss(struct mm_struct *mm);
1131#else 1136#else
1132static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) 1137static inline void sync_mm_rss(struct mm_struct *mm)
1133{ 1138{
1134} 1139}
1135#endif 1140#endif
@@ -1253,6 +1258,8 @@ static inline void pgtable_page_dtor(struct page *page)
1253extern void free_area_init(unsigned long * zones_size); 1258extern void free_area_init(unsigned long * zones_size);
1254extern void free_area_init_node(int nid, unsigned long * zones_size, 1259extern void free_area_init_node(int nid, unsigned long * zones_size,
1255 unsigned long zone_start_pfn, unsigned long *zholes_size); 1260 unsigned long zone_start_pfn, unsigned long *zholes_size);
1261extern void free_initmem(void);
1262
1256#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1263#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1257/* 1264/*
1258 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its 1265 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
@@ -1291,8 +1298,6 @@ extern void get_pfn_range_for_nid(unsigned int nid,
1291extern unsigned long find_min_pfn_with_active_regions(void); 1298extern unsigned long find_min_pfn_with_active_regions(void);
1292extern void free_bootmem_with_active_regions(int nid, 1299extern void free_bootmem_with_active_regions(int nid,
1293 unsigned long max_low_pfn); 1300 unsigned long max_low_pfn);
1294int add_from_early_node_map(struct range *range, int az,
1295 int nr_range, int nid);
1296extern void sparse_memory_present_with_active_regions(int nid); 1301extern void sparse_memory_present_with_active_regions(int nid);
1297 1302
1298#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1303#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
@@ -1598,9 +1603,9 @@ void vmemmap_populate_print_last(void);
1598 1603
1599enum mf_flags { 1604enum mf_flags {
1600 MF_COUNT_INCREASED = 1 << 0, 1605 MF_COUNT_INCREASED = 1 << 0,
1606 MF_ACTION_REQUIRED = 1 << 1,
1601}; 1607};
1602extern void memory_failure(unsigned long pfn, int trapno); 1608extern int memory_failure(unsigned long pfn, int trapno, int flags);
1603extern int __memory_failure(unsigned long pfn, int trapno, int flags);
1604extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); 1609extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
1605extern int unpoison_memory(unsigned long pfn); 1610extern int unpoison_memory(unsigned long pfn);
1606extern int sysctl_memory_failure_early_kill; 1611extern int sysctl_memory_failure_early_kill;