aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h130
1 files changed, 125 insertions, 5 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 60c467bfbabd..e70f21beb4b4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -12,6 +12,7 @@
12#include <linux/prio_tree.h> 12#include <linux/prio_tree.h>
13#include <linux/debug_locks.h> 13#include <linux/debug_locks.h>
14#include <linux/mm_types.h> 14#include <linux/mm_types.h>
15#include <linux/range.h>
15 16
16struct mempolicy; 17struct mempolicy;
17struct anon_vma; 18struct anon_vma;
@@ -265,6 +266,8 @@ static inline int get_page_unless_zero(struct page *page)
265 return atomic_inc_not_zero(&page->_count); 266 return atomic_inc_not_zero(&page->_count);
266} 267}
267 268
269extern int page_is_ram(unsigned long pfn);
270
268/* Support for virtually mapped pages */ 271/* Support for virtually mapped pages */
269struct page *vmalloc_to_page(const void *addr); 272struct page *vmalloc_to_page(const void *addr);
270unsigned long vmalloc_to_pfn(const void *addr); 273unsigned long vmalloc_to_pfn(const void *addr);
@@ -867,6 +870,114 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
867 */ 870 */
868int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 871int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
869 struct page **pages); 872 struct page **pages);
873/*
874 * per-process(per-mm_struct) statistics.
875 */
876#if defined(SPLIT_RSS_COUNTING)
877/*
878 * The mm counters are not protected by its page_table_lock,
879 * so must be incremented atomically.
880 */
881static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
882{
883 atomic_long_set(&mm->rss_stat.count[member], value);
884}
885
886unsigned long get_mm_counter(struct mm_struct *mm, int member);
887
888static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
889{
890 atomic_long_add(value, &mm->rss_stat.count[member]);
891}
892
893static inline void inc_mm_counter(struct mm_struct *mm, int member)
894{
895 atomic_long_inc(&mm->rss_stat.count[member]);
896}
897
898static inline void dec_mm_counter(struct mm_struct *mm, int member)
899{
900 atomic_long_dec(&mm->rss_stat.count[member]);
901}
902
903#else /* !USE_SPLIT_PTLOCKS */
904/*
905 * The mm counters are protected by its page_table_lock,
906 * so can be incremented directly.
907 */
908static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
909{
910 mm->rss_stat.count[member] = value;
911}
912
913static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
914{
915 return mm->rss_stat.count[member];
916}
917
918static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
919{
920 mm->rss_stat.count[member] += value;
921}
922
923static inline void inc_mm_counter(struct mm_struct *mm, int member)
924{
925 mm->rss_stat.count[member]++;
926}
927
928static inline void dec_mm_counter(struct mm_struct *mm, int member)
929{
930 mm->rss_stat.count[member]--;
931}
932
933#endif /* !USE_SPLIT_PTLOCKS */
934
935static inline unsigned long get_mm_rss(struct mm_struct *mm)
936{
937 return get_mm_counter(mm, MM_FILEPAGES) +
938 get_mm_counter(mm, MM_ANONPAGES);
939}
940
941static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
942{
943 return max(mm->hiwater_rss, get_mm_rss(mm));
944}
945
946static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
947{
948 return max(mm->hiwater_vm, mm->total_vm);
949}
950
951static inline void update_hiwater_rss(struct mm_struct *mm)
952{
953 unsigned long _rss = get_mm_rss(mm);
954
955 if ((mm)->hiwater_rss < _rss)
956 (mm)->hiwater_rss = _rss;
957}
958
959static inline void update_hiwater_vm(struct mm_struct *mm)
960{
961 if (mm->hiwater_vm < mm->total_vm)
962 mm->hiwater_vm = mm->total_vm;
963}
964
965static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
966 struct mm_struct *mm)
967{
968 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
969
970 if (*maxrss < hiwater_rss)
971 *maxrss = hiwater_rss;
972}
973
974#if defined(SPLIT_RSS_COUNTING)
975void sync_mm_rss(struct task_struct *task, struct mm_struct *mm);
976#else
977static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
978{
979}
980#endif
870 981
871/* 982/*
872 * A callback you can register to apply pressure to ageable caches. 983 * A callback you can register to apply pressure to ageable caches.
@@ -1047,6 +1158,10 @@ extern void get_pfn_range_for_nid(unsigned int nid,
1047extern unsigned long find_min_pfn_with_active_regions(void); 1158extern unsigned long find_min_pfn_with_active_regions(void);
1048extern void free_bootmem_with_active_regions(int nid, 1159extern void free_bootmem_with_active_regions(int nid,
1049 unsigned long max_low_pfn); 1160 unsigned long max_low_pfn);
1161int add_from_early_node_map(struct range *range, int az,
1162 int nr_range, int nid);
1163void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
1164 u64 goal, u64 limit);
1050typedef int (*work_fn_t)(unsigned long, unsigned long, void *); 1165typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
1051extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); 1166extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
1052extern void sparse_memory_present_with_active_regions(int nid); 1167extern void sparse_memory_present_with_active_regions(int nid);
@@ -1079,11 +1194,7 @@ extern void si_meminfo(struct sysinfo * val);
1079extern void si_meminfo_node(struct sysinfo *val, int nid); 1194extern void si_meminfo_node(struct sysinfo *val, int nid);
1080extern int after_bootmem; 1195extern int after_bootmem;
1081 1196
1082#ifdef CONFIG_NUMA
1083extern void setup_per_cpu_pageset(void); 1197extern void setup_per_cpu_pageset(void);
1084#else
1085static inline void setup_per_cpu_pageset(void) {}
1086#endif
1087 1198
1088extern void zone_pcp_update(struct zone *zone); 1199extern void zone_pcp_update(struct zone *zone);
1089 1200
@@ -1111,7 +1222,7 @@ static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1111 1222
1112/* mmap.c */ 1223/* mmap.c */
1113extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); 1224extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1114extern void vma_adjust(struct vm_area_struct *vma, unsigned long start, 1225extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1115 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); 1226 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1116extern struct vm_area_struct *vma_merge(struct mm_struct *, 1227extern struct vm_area_struct *vma_merge(struct mm_struct *,
1117 struct vm_area_struct *prev, unsigned long addr, unsigned long end, 1228 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
@@ -1319,12 +1430,19 @@ extern int randomize_va_space;
1319const char * arch_vma_name(struct vm_area_struct *vma); 1430const char * arch_vma_name(struct vm_area_struct *vma);
1320void print_vma_addr(char *prefix, unsigned long rip); 1431void print_vma_addr(char *prefix, unsigned long rip);
1321 1432
1433void sparse_mem_maps_populate_node(struct page **map_map,
1434 unsigned long pnum_begin,
1435 unsigned long pnum_end,
1436 unsigned long map_count,
1437 int nodeid);
1438
1322struct page *sparse_mem_map_populate(unsigned long pnum, int nid); 1439struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
1323pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 1440pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
1324pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); 1441pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
1325pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 1442pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
1326pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); 1443pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
1327void *vmemmap_alloc_block(unsigned long size, int node); 1444void *vmemmap_alloc_block(unsigned long size, int node);
1445void *vmemmap_alloc_block_buf(unsigned long size, int node);
1328void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 1446void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
1329int vmemmap_populate_basepages(struct page *start_page, 1447int vmemmap_populate_basepages(struct page *start_page,
1330 unsigned long pages, int node); 1448 unsigned long pages, int node);
@@ -1347,5 +1465,7 @@ extern void shake_page(struct page *p, int access);
1347extern atomic_long_t mce_bad_pages; 1465extern atomic_long_t mce_bad_pages;
1348extern int soft_offline_page(struct page *page, int flags); 1466extern int soft_offline_page(struct page *page, int flags);
1349 1467
1468extern void dump_page(struct page *page);
1469
1350#endif /* __KERNEL__ */ 1470#endif /* __KERNEL__ */
1351#endif /* _LINUX_MM_H */ 1471#endif /* _LINUX_MM_H */