aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h40
1 files changed, 21 insertions, 19 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 17b27cd269c4..a6fabdfd34c5 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -111,7 +111,7 @@ extern unsigned int kobjsize(const void *objp);
111#define VM_HUGEPAGE 0x01000000 /* MADV_HUGEPAGE marked this vma */ 111#define VM_HUGEPAGE 0x01000000 /* MADV_HUGEPAGE marked this vma */
112#endif 112#endif
113#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ 113#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
114#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ 114#define VM_NODUMP 0x04000000 /* Do not include in the core dump */
115 115
116#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ 116#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
117#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 117#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
@@ -893,9 +893,9 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
893 893
894int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 894int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
895 unsigned long size); 895 unsigned long size);
896unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 896void zap_page_range(struct vm_area_struct *vma, unsigned long address,
897 unsigned long size, struct zap_details *); 897 unsigned long size, struct zap_details *);
898unsigned long unmap_vmas(struct mmu_gather *tlb, 898void unmap_vmas(struct mmu_gather *tlb,
899 struct vm_area_struct *start_vma, unsigned long start_addr, 899 struct vm_area_struct *start_vma, unsigned long start_addr,
900 unsigned long end_addr, unsigned long *nr_accounted, 900 unsigned long end_addr, unsigned long *nr_accounted,
901 struct zap_details *); 901 struct zap_details *);
@@ -1040,6 +1040,9 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
1040 !vma_growsup(vma->vm_next, addr); 1040 !vma_growsup(vma->vm_next, addr);
1041} 1041}
1042 1042
1043extern pid_t
1044vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
1045
1043extern unsigned long move_page_tables(struct vm_area_struct *vma, 1046extern unsigned long move_page_tables(struct vm_area_struct *vma,
1044 unsigned long old_addr, struct vm_area_struct *new_vma, 1047 unsigned long old_addr, struct vm_area_struct *new_vma,
1045 unsigned long new_addr, unsigned long len); 1048 unsigned long new_addr, unsigned long len);
@@ -1058,19 +1061,20 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1058/* 1061/*
1059 * per-process(per-mm_struct) statistics. 1062 * per-process(per-mm_struct) statistics.
1060 */ 1063 */
1061static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
1062{
1063 atomic_long_set(&mm->rss_stat.count[member], value);
1064}
1065
1066#if defined(SPLIT_RSS_COUNTING)
1067unsigned long get_mm_counter(struct mm_struct *mm, int member);
1068#else
1069static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 1064static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1070{ 1065{
1071 return atomic_long_read(&mm->rss_stat.count[member]); 1066 long val = atomic_long_read(&mm->rss_stat.count[member]);
1072} 1067
1068#ifdef SPLIT_RSS_COUNTING
1069 /*
1070 * counter is updated in asynchronous manner and may go to minus.
1071 * But it's never be expected number for users.
1072 */
1073 if (val < 0)
1074 val = 0;
1073#endif 1075#endif
1076 return (unsigned long)val;
1077}
1074 1078
1075static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 1079static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1076{ 1080{
@@ -1127,9 +1131,9 @@ static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1127} 1131}
1128 1132
1129#if defined(SPLIT_RSS_COUNTING) 1133#if defined(SPLIT_RSS_COUNTING)
1130void sync_mm_rss(struct task_struct *task, struct mm_struct *mm); 1134void sync_mm_rss(struct mm_struct *mm);
1131#else 1135#else
1132static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) 1136static inline void sync_mm_rss(struct mm_struct *mm)
1133{ 1137{
1134} 1138}
1135#endif 1139#endif
@@ -1291,8 +1295,6 @@ extern void get_pfn_range_for_nid(unsigned int nid,
1291extern unsigned long find_min_pfn_with_active_regions(void); 1295extern unsigned long find_min_pfn_with_active_regions(void);
1292extern void free_bootmem_with_active_regions(int nid, 1296extern void free_bootmem_with_active_regions(int nid,
1293 unsigned long max_low_pfn); 1297 unsigned long max_low_pfn);
1294int add_from_early_node_map(struct range *range, int az,
1295 int nr_range, int nid);
1296extern void sparse_memory_present_with_active_regions(int nid); 1298extern void sparse_memory_present_with_active_regions(int nid);
1297 1299
1298#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1300#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
@@ -1598,9 +1600,9 @@ void vmemmap_populate_print_last(void);
1598 1600
1599enum mf_flags { 1601enum mf_flags {
1600 MF_COUNT_INCREASED = 1 << 0, 1602 MF_COUNT_INCREASED = 1 << 0,
1603 MF_ACTION_REQUIRED = 1 << 1,
1601}; 1604};
1602extern void memory_failure(unsigned long pfn, int trapno); 1605extern int memory_failure(unsigned long pfn, int trapno, int flags);
1603extern int __memory_failure(unsigned long pfn, int trapno, int flags);
1604extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); 1606extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
1605extern int unpoison_memory(unsigned long pfn); 1607extern int unpoison_memory(unsigned long pfn);
1606extern int sysctl_memory_failure_early_kill; 1608extern int sysctl_memory_failure_early_kill;