aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h121
1 files changed, 48 insertions, 73 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6507dde38b16..8eb969ebf904 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -153,6 +153,7 @@ extern pgprot_t protection_map[16];
153#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ 153#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
154#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ 154#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */
155#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */ 155#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
156#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */
156 157
157/* 158/*
158 * This interface is used by x86 PAT code to identify a pfn mapping that is 159 * This interface is used by x86 PAT code to identify a pfn mapping that is
@@ -604,10 +605,6 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
604#define NODE_NOT_IN_PAGE_FLAGS 605#define NODE_NOT_IN_PAGE_FLAGS
605#endif 606#endif
606 607
607#ifndef PFN_SECTION_SHIFT
608#define PFN_SECTION_SHIFT 0
609#endif
610
611/* 608/*
612 * Define the bit shifts to access each section. For non-existent 609 * Define the bit shifts to access each section. For non-existent
613 * sections we define the shift as 0; that plus a 0 mask ensures 610 * sections we define the shift as 0; that plus a 0 mask ensures
@@ -681,6 +678,12 @@ static inline struct zone *page_zone(struct page *page)
681} 678}
682 679
683#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 680#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
681static inline void set_page_section(struct page *page, unsigned long section)
682{
683 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
684 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
685}
686
684static inline unsigned long page_to_section(struct page *page) 687static inline unsigned long page_to_section(struct page *page)
685{ 688{
686 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 689 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
@@ -699,18 +702,14 @@ static inline void set_page_node(struct page *page, unsigned long node)
699 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 702 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
700} 703}
701 704
702static inline void set_page_section(struct page *page, unsigned long section)
703{
704 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
705 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
706}
707
708static inline void set_page_links(struct page *page, enum zone_type zone, 705static inline void set_page_links(struct page *page, enum zone_type zone,
709 unsigned long node, unsigned long pfn) 706 unsigned long node, unsigned long pfn)
710{ 707{
711 set_page_zone(page, zone); 708 set_page_zone(page, zone);
712 set_page_node(page, node); 709 set_page_node(page, node);
710#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
713 set_page_section(page, pfn_to_section_nr(pfn)); 711 set_page_section(page, pfn_to_section_nr(pfn));
712#endif
714} 713}
715 714
716/* 715/*
@@ -862,26 +861,18 @@ extern void pagefault_out_of_memory(void);
862#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 861#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
863 862
864/* 863/*
865 * Flags passed to show_mem() and __show_free_areas() to suppress output in 864 * Flags passed to show_mem() and show_free_areas() to suppress output in
866 * various contexts. 865 * various contexts.
867 */ 866 */
868#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */ 867#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */
869 868
870extern void show_free_areas(void); 869extern void show_free_areas(unsigned int flags);
871extern void __show_free_areas(unsigned int flags); 870extern bool skip_free_areas_node(unsigned int flags, int nid);
872 871
873int shmem_lock(struct file *file, int lock, struct user_struct *user); 872int shmem_lock(struct file *file, int lock, struct user_struct *user);
874struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); 873struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
875int shmem_zero_setup(struct vm_area_struct *); 874int shmem_zero_setup(struct vm_area_struct *);
876 875
877#ifndef CONFIG_MMU
878extern unsigned long shmem_get_unmapped_area(struct file *file,
879 unsigned long addr,
880 unsigned long len,
881 unsigned long pgoff,
882 unsigned long flags);
883#endif
884
885extern int can_do_mlock(void); 876extern int can_do_mlock(void);
886extern int user_shm_lock(size_t, struct user_struct *); 877extern int user_shm_lock(size_t, struct user_struct *);
887extern void user_shm_unlock(size_t, struct user_struct *); 878extern void user_shm_unlock(size_t, struct user_struct *);
@@ -894,8 +885,6 @@ struct zap_details {
894 struct address_space *check_mapping; /* Check page->mapping if set */ 885 struct address_space *check_mapping; /* Check page->mapping if set */
895 pgoff_t first_index; /* Lowest page->index to unmap */ 886 pgoff_t first_index; /* Lowest page->index to unmap */
896 pgoff_t last_index; /* Highest page->index to unmap */ 887 pgoff_t last_index; /* Highest page->index to unmap */
897 spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */
898 unsigned long truncate_count; /* Compare vm_truncate_count */
899}; 888};
900 889
901struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 890struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
@@ -905,7 +894,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
905 unsigned long size); 894 unsigned long size);
906unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 895unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
907 unsigned long size, struct zap_details *); 896 unsigned long size, struct zap_details *);
908unsigned long unmap_vmas(struct mmu_gather **tlb, 897unsigned long unmap_vmas(struct mmu_gather *tlb,
909 struct vm_area_struct *start_vma, unsigned long start_addr, 898 struct vm_area_struct *start_vma, unsigned long start_addr,
910 unsigned long end_addr, unsigned long *nr_accounted, 899 unsigned long end_addr, unsigned long *nr_accounted,
911 struct zap_details *); 900 struct zap_details *);
@@ -1056,65 +1045,35 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1056/* 1045/*
1057 * per-process(per-mm_struct) statistics. 1046 * per-process(per-mm_struct) statistics.
1058 */ 1047 */
1059#if defined(SPLIT_RSS_COUNTING)
1060/*
1061 * The mm counters are not protected by its page_table_lock,
1062 * so must be incremented atomically.
1063 */
1064static inline void set_mm_counter(struct mm_struct *mm, int member, long value) 1048static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
1065{ 1049{
1066 atomic_long_set(&mm->rss_stat.count[member], value); 1050 atomic_long_set(&mm->rss_stat.count[member], value);
1067} 1051}
1068 1052
1053#if defined(SPLIT_RSS_COUNTING)
1069unsigned long get_mm_counter(struct mm_struct *mm, int member); 1054unsigned long get_mm_counter(struct mm_struct *mm, int member);
1070 1055#else
1071static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1072{
1073 atomic_long_add(value, &mm->rss_stat.count[member]);
1074}
1075
1076static inline void inc_mm_counter(struct mm_struct *mm, int member)
1077{
1078 atomic_long_inc(&mm->rss_stat.count[member]);
1079}
1080
1081static inline void dec_mm_counter(struct mm_struct *mm, int member)
1082{
1083 atomic_long_dec(&mm->rss_stat.count[member]);
1084}
1085
1086#else /* !USE_SPLIT_PTLOCKS */
1087/*
1088 * The mm counters are protected by its page_table_lock,
1089 * so can be incremented directly.
1090 */
1091static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
1092{
1093 mm->rss_stat.count[member] = value;
1094}
1095
1096static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 1056static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1097{ 1057{
1098 return mm->rss_stat.count[member]; 1058 return atomic_long_read(&mm->rss_stat.count[member]);
1099} 1059}
1060#endif
1100 1061
1101static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 1062static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1102{ 1063{
1103 mm->rss_stat.count[member] += value; 1064 atomic_long_add(value, &mm->rss_stat.count[member]);
1104} 1065}
1105 1066
1106static inline void inc_mm_counter(struct mm_struct *mm, int member) 1067static inline void inc_mm_counter(struct mm_struct *mm, int member)
1107{ 1068{
1108 mm->rss_stat.count[member]++; 1069 atomic_long_inc(&mm->rss_stat.count[member]);
1109} 1070}
1110 1071
1111static inline void dec_mm_counter(struct mm_struct *mm, int member) 1072static inline void dec_mm_counter(struct mm_struct *mm, int member)
1112{ 1073{
1113 mm->rss_stat.count[member]--; 1074 atomic_long_dec(&mm->rss_stat.count[member]);
1114} 1075}
1115 1076
1116#endif /* !USE_SPLIT_PTLOCKS */
1117
1118static inline unsigned long get_mm_rss(struct mm_struct *mm) 1077static inline unsigned long get_mm_rss(struct mm_struct *mm)
1119{ 1078{
1120 return get_mm_counter(mm, MM_FILEPAGES) + 1079 return get_mm_counter(mm, MM_FILEPAGES) +
@@ -1163,13 +1122,24 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
1163#endif 1122#endif
1164 1123
1165/* 1124/*
1125 * This struct is used to pass information from page reclaim to the shrinkers.
1126 * We consolidate the values for easier extention later.
1127 */
1128struct shrink_control {
1129 gfp_t gfp_mask;
1130
1131 /* How many slab objects shrinker() should scan and try to reclaim */
1132 unsigned long nr_to_scan;
1133};
1134
1135/*
1166 * A callback you can register to apply pressure to ageable caches. 1136 * A callback you can register to apply pressure to ageable caches.
1167 * 1137 *
1168 * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should 1138 * 'sc' is passed shrink_control which includes a count 'nr_to_scan'
1169 * look through the least-recently-used 'nr_to_scan' entries and 1139 * and a 'gfpmask'. It should look through the least-recently-used
1170 * attempt to free them up. It should return the number of objects 1140 * 'nr_to_scan' entries and attempt to free them up. It should return
1171 * which remain in the cache. If it returns -1, it means it cannot do 1141 * the number of objects which remain in the cache. If it returns -1, it means
1172 * any scanning at this time (eg. there is a risk of deadlock). 1142 * it cannot do any scanning at this time (eg. there is a risk of deadlock).
1173 * 1143 *
1174 * The 'gfpmask' refers to the allocation we are currently trying to 1144 * The 'gfpmask' refers to the allocation we are currently trying to
1175 * fulfil. 1145 * fulfil.
@@ -1178,7 +1148,7 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
1178 * querying the cache size, so a fastpath for that case is appropriate. 1148 * querying the cache size, so a fastpath for that case is appropriate.
1179 */ 1149 */
1180struct shrinker { 1150struct shrinker {
1181 int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask); 1151 int (*shrink)(struct shrinker *, struct shrink_control *sc);
1182 int seeks; /* seeks to recreate an obj */ 1152 int seeks; /* seeks to recreate an obj */
1183 1153
1184 /* These are for internal use */ 1154 /* These are for internal use */
@@ -1380,7 +1350,7 @@ extern void set_dma_reserve(unsigned long new_dma_reserve);
1380extern void memmap_init_zone(unsigned long, int, unsigned long, 1350extern void memmap_init_zone(unsigned long, int, unsigned long,
1381 unsigned long, enum memmap_context); 1351 unsigned long, enum memmap_context);
1382extern void setup_per_zone_wmarks(void); 1352extern void setup_per_zone_wmarks(void);
1383extern void calculate_zone_inactive_ratio(struct zone *zone); 1353extern int __meminit init_per_zone_wmark_min(void);
1384extern void mem_init(void); 1354extern void mem_init(void);
1385extern void __init mmap_init(void); 1355extern void __init mmap_init(void);
1386extern void show_mem(unsigned int flags); 1356extern void show_mem(unsigned int flags);
@@ -1388,6 +1358,8 @@ extern void si_meminfo(struct sysinfo * val);
1388extern void si_meminfo_node(struct sysinfo *val, int nid); 1358extern void si_meminfo_node(struct sysinfo *val, int nid);
1389extern int after_bootmem; 1359extern int after_bootmem;
1390 1360
1361extern void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1362
1391extern void setup_per_cpu_pageset(void); 1363extern void setup_per_cpu_pageset(void);
1392 1364
1393extern void zone_pcp_update(struct zone *zone); 1365extern void zone_pcp_update(struct zone *zone);
@@ -1517,15 +1489,17 @@ unsigned long ra_submit(struct file_ra_state *ra,
1517 struct address_space *mapping, 1489 struct address_space *mapping,
1518 struct file *filp); 1490 struct file *filp);
1519 1491
1520/* Do stack extension */ 1492/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
1521extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 1493extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1494
1495/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
1496extern int expand_downwards(struct vm_area_struct *vma,
1497 unsigned long address);
1522#if VM_GROWSUP 1498#if VM_GROWSUP
1523extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 1499extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1524#else 1500#else
1525 #define expand_upwards(vma, address) do { } while (0) 1501 #define expand_upwards(vma, address) do { } while (0)
1526#endif 1502#endif
1527extern int expand_stack_downwards(struct vm_area_struct *vma,
1528 unsigned long address);
1529 1503
1530/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 1504/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
1531extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 1505extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
@@ -1627,8 +1601,9 @@ int in_gate_area_no_mm(unsigned long addr);
1627 1601
1628int drop_caches_sysctl_handler(struct ctl_table *, int, 1602int drop_caches_sysctl_handler(struct ctl_table *, int,
1629 void __user *, size_t *, loff_t *); 1603 void __user *, size_t *, loff_t *);
1630unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, 1604unsigned long shrink_slab(struct shrink_control *shrink,
1631 unsigned long lru_pages); 1605 unsigned long nr_pages_scanned,
1606 unsigned long lru_pages);
1632 1607
1633#ifndef CONFIG_MMU 1608#ifndef CONFIG_MMU
1634#define randomize_va_space 0 1609#define randomize_va_space 0