aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorMichal Marek <mmarek@suse.cz>2010-10-27 18:15:57 -0400
committerMichal Marek <mmarek@suse.cz>2010-10-27 18:15:57 -0400
commitb74b953b998bcc2db91b694446f3a2619ec32de6 (patch)
tree6ce24caabd730f6ae9287ed0676ec32e6ff31e9d /include/linux/mm.h
parentabb438526201c6a79949ad45375c051b6681c253 (diff)
parentf6f94e2ab1b33f0082ac22d71f66385a60d8157f (diff)
Merge commit 'v2.6.36' into kbuild/misc
Update to be able to fix a recent change to scripts/basic/docproc.c (commit eda603f).
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h178
1 files changed, 164 insertions, 14 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 60c467bfbabd..74949fbef8c6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -12,13 +12,14 @@
12#include <linux/prio_tree.h> 12#include <linux/prio_tree.h>
13#include <linux/debug_locks.h> 13#include <linux/debug_locks.h>
14#include <linux/mm_types.h> 14#include <linux/mm_types.h>
15#include <linux/range.h>
16#include <linux/pfn.h>
15 17
16struct mempolicy; 18struct mempolicy;
17struct anon_vma; 19struct anon_vma;
18struct file_ra_state; 20struct file_ra_state;
19struct user_struct; 21struct user_struct;
20struct writeback_control; 22struct writeback_control;
21struct rlimit;
22 23
23#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ 24#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
24extern unsigned long max_mapnr; 25extern unsigned long max_mapnr;
@@ -77,7 +78,11 @@ extern unsigned int kobjsize(const void *objp);
77#define VM_MAYSHARE 0x00000080 78#define VM_MAYSHARE 0x00000080
78 79
79#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 80#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
81#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
80#define VM_GROWSUP 0x00000200 82#define VM_GROWSUP 0x00000200
83#else
84#define VM_GROWSUP 0x00000000
85#endif
81#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 86#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
82#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 87#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
83 88
@@ -106,6 +111,9 @@ extern unsigned int kobjsize(const void *objp);
106#define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ 111#define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
107#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ 112#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
108 113
114/* Bits set in the VMA until the stack is in its final location */
115#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
116
109#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 117#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
110#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 118#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
111#endif 119#endif
@@ -265,6 +273,8 @@ static inline int get_page_unless_zero(struct page *page)
265 return atomic_inc_not_zero(&page->_count); 273 return atomic_inc_not_zero(&page->_count);
266} 274}
267 275
276extern int page_is_ram(unsigned long pfn);
277
268/* Support for virtually mapped pages */ 278/* Support for virtually mapped pages */
269struct page *vmalloc_to_page(const void *addr); 279struct page *vmalloc_to_page(const void *addr);
270unsigned long vmalloc_to_pfn(const void *addr); 280unsigned long vmalloc_to_pfn(const void *addr);
@@ -332,6 +342,7 @@ void put_page(struct page *page);
332void put_pages_list(struct list_head *pages); 342void put_pages_list(struct list_head *pages);
333 343
334void split_page(struct page *page, unsigned int order); 344void split_page(struct page *page, unsigned int order);
345int split_free_page(struct page *page);
335 346
336/* 347/*
337 * Compound pages have a destructor function. Provide a 348 * Compound pages have a destructor function. Provide a
@@ -589,7 +600,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
589 600
590static __always_inline void *lowmem_page_address(struct page *page) 601static __always_inline void *lowmem_page_address(struct page *page)
591{ 602{
592 return __va(page_to_pfn(page) << PAGE_SHIFT); 603 return __va(PFN_PHYS(page_to_pfn(page)));
593} 604}
594 605
595#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) 606#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
@@ -780,8 +791,8 @@ struct mm_walk {
780 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); 791 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
781 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); 792 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
782 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); 793 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
783 int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long, 794 int (*hugetlb_entry)(pte_t *, unsigned long,
784 struct mm_walk *); 795 unsigned long, unsigned long, struct mm_walk *);
785 struct mm_struct *mm; 796 struct mm_struct *mm;
786 void *private; 797 void *private;
787}; 798};
@@ -808,6 +819,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
808} 819}
809 820
810extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new); 821extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
822extern void truncate_setsize(struct inode *inode, loff_t newsize);
811extern int vmtruncate(struct inode *inode, loff_t offset); 823extern int vmtruncate(struct inode *inode, loff_t offset);
812extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end); 824extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
813 825
@@ -852,6 +864,12 @@ int set_page_dirty(struct page *page);
852int set_page_dirty_lock(struct page *page); 864int set_page_dirty_lock(struct page *page);
853int clear_page_dirty_for_io(struct page *page); 865int clear_page_dirty_for_io(struct page *page);
854 866
867/* Is the vma a continuation of the stack vma above it? */
868static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
869{
870 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
871}
872
855extern unsigned long move_page_tables(struct vm_area_struct *vma, 873extern unsigned long move_page_tables(struct vm_area_struct *vma,
856 unsigned long old_addr, struct vm_area_struct *new_vma, 874 unsigned long old_addr, struct vm_area_struct *new_vma,
857 unsigned long new_addr, unsigned long len); 875 unsigned long new_addr, unsigned long len);
@@ -867,6 +885,114 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
867 */ 885 */
868int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 886int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
869 struct page **pages); 887 struct page **pages);
888/*
889 * per-process(per-mm_struct) statistics.
890 */
891#if defined(SPLIT_RSS_COUNTING)
892/*
893 * The mm counters are not protected by its page_table_lock,
894 * so must be incremented atomically.
895 */
896static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
897{
898 atomic_long_set(&mm->rss_stat.count[member], value);
899}
900
901unsigned long get_mm_counter(struct mm_struct *mm, int member);
902
903static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
904{
905 atomic_long_add(value, &mm->rss_stat.count[member]);
906}
907
908static inline void inc_mm_counter(struct mm_struct *mm, int member)
909{
910 atomic_long_inc(&mm->rss_stat.count[member]);
911}
912
913static inline void dec_mm_counter(struct mm_struct *mm, int member)
914{
915 atomic_long_dec(&mm->rss_stat.count[member]);
916}
917
918#else /* !USE_SPLIT_PTLOCKS */
919/*
920 * The mm counters are protected by its page_table_lock,
921 * so can be incremented directly.
922 */
923static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
924{
925 mm->rss_stat.count[member] = value;
926}
927
928static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
929{
930 return mm->rss_stat.count[member];
931}
932
933static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
934{
935 mm->rss_stat.count[member] += value;
936}
937
938static inline void inc_mm_counter(struct mm_struct *mm, int member)
939{
940 mm->rss_stat.count[member]++;
941}
942
943static inline void dec_mm_counter(struct mm_struct *mm, int member)
944{
945 mm->rss_stat.count[member]--;
946}
947
948#endif /* !USE_SPLIT_PTLOCKS */
949
950static inline unsigned long get_mm_rss(struct mm_struct *mm)
951{
952 return get_mm_counter(mm, MM_FILEPAGES) +
953 get_mm_counter(mm, MM_ANONPAGES);
954}
955
956static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
957{
958 return max(mm->hiwater_rss, get_mm_rss(mm));
959}
960
961static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
962{
963 return max(mm->hiwater_vm, mm->total_vm);
964}
965
966static inline void update_hiwater_rss(struct mm_struct *mm)
967{
968 unsigned long _rss = get_mm_rss(mm);
969
970 if ((mm)->hiwater_rss < _rss)
971 (mm)->hiwater_rss = _rss;
972}
973
974static inline void update_hiwater_vm(struct mm_struct *mm)
975{
976 if (mm->hiwater_vm < mm->total_vm)
977 mm->hiwater_vm = mm->total_vm;
978}
979
980static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
981 struct mm_struct *mm)
982{
983 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
984
985 if (*maxrss < hiwater_rss)
986 *maxrss = hiwater_rss;
987}
988
989#if defined(SPLIT_RSS_COUNTING)
990void sync_mm_rss(struct task_struct *task, struct mm_struct *mm);
991#else
992static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
993{
994}
995#endif
870 996
871/* 997/*
872 * A callback you can register to apply pressure to ageable caches. 998 * A callback you can register to apply pressure to ageable caches.
@@ -884,7 +1010,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
884 * querying the cache size, so a fastpath for that case is appropriate. 1010 * querying the cache size, so a fastpath for that case is appropriate.
885 */ 1011 */
886struct shrinker { 1012struct shrinker {
887 int (*shrink)(int nr_to_scan, gfp_t gfp_mask); 1013 int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask);
888 int seeks; /* seeks to recreate an obj */ 1014 int seeks; /* seeks to recreate an obj */
889 1015
890 /* These are for internal use */ 1016 /* These are for internal use */
@@ -1047,6 +1173,10 @@ extern void get_pfn_range_for_nid(unsigned int nid,
1047extern unsigned long find_min_pfn_with_active_regions(void); 1173extern unsigned long find_min_pfn_with_active_regions(void);
1048extern void free_bootmem_with_active_regions(int nid, 1174extern void free_bootmem_with_active_regions(int nid,
1049 unsigned long max_low_pfn); 1175 unsigned long max_low_pfn);
1176int add_from_early_node_map(struct range *range, int az,
1177 int nr_range, int nid);
1178void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
1179 u64 goal, u64 limit);
1050typedef int (*work_fn_t)(unsigned long, unsigned long, void *); 1180typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
1051extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); 1181extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
1052extern void sparse_memory_present_with_active_regions(int nid); 1182extern void sparse_memory_present_with_active_regions(int nid);
@@ -1079,11 +1209,7 @@ extern void si_meminfo(struct sysinfo * val);
1079extern void si_meminfo_node(struct sysinfo *val, int nid); 1209extern void si_meminfo_node(struct sysinfo *val, int nid);
1080extern int after_bootmem; 1210extern int after_bootmem;
1081 1211
1082#ifdef CONFIG_NUMA
1083extern void setup_per_cpu_pageset(void); 1212extern void setup_per_cpu_pageset(void);
1084#else
1085static inline void setup_per_cpu_pageset(void) {}
1086#endif
1087 1213
1088extern void zone_pcp_update(struct zone *zone); 1214extern void zone_pcp_update(struct zone *zone);
1089 1215
@@ -1111,7 +1237,7 @@ static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1111 1237
1112/* mmap.c */ 1238/* mmap.c */
1113extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); 1239extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1114extern void vma_adjust(struct vm_area_struct *vma, unsigned long start, 1240extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1115 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); 1241 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1116extern struct vm_area_struct *vma_merge(struct mm_struct *, 1242extern struct vm_area_struct *vma_merge(struct mm_struct *,
1117 struct vm_area_struct *prev, unsigned long addr, unsigned long end, 1243 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
@@ -1214,8 +1340,10 @@ unsigned long ra_submit(struct file_ra_state *ra,
1214 1340
1215/* Do stack extension */ 1341/* Do stack extension */
1216extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 1342extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1217#ifdef CONFIG_IA64 1343#if VM_GROWSUP
1218extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 1344extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1345#else
1346 #define expand_upwards(vma, address) do { } while (0)
1219#endif 1347#endif
1220extern int expand_stack_downwards(struct vm_area_struct *vma, 1348extern int expand_stack_downwards(struct vm_area_struct *vma,
1221 unsigned long address); 1349 unsigned long address);
@@ -1241,7 +1369,15 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
1241 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 1369 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1242} 1370}
1243 1371
1372#ifdef CONFIG_MMU
1244pgprot_t vm_get_page_prot(unsigned long vm_flags); 1373pgprot_t vm_get_page_prot(unsigned long vm_flags);
1374#else
1375static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1376{
1377 return __pgprot(0);
1378}
1379#endif
1380
1245struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); 1381struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
1246int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 1382int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1247 unsigned long pfn, unsigned long size, pgprot_t); 1383 unsigned long pfn, unsigned long size, pgprot_t);
@@ -1319,21 +1455,25 @@ extern int randomize_va_space;
1319const char * arch_vma_name(struct vm_area_struct *vma); 1455const char * arch_vma_name(struct vm_area_struct *vma);
1320void print_vma_addr(char *prefix, unsigned long rip); 1456void print_vma_addr(char *prefix, unsigned long rip);
1321 1457
1458void sparse_mem_maps_populate_node(struct page **map_map,
1459 unsigned long pnum_begin,
1460 unsigned long pnum_end,
1461 unsigned long map_count,
1462 int nodeid);
1463
1322struct page *sparse_mem_map_populate(unsigned long pnum, int nid); 1464struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
1323pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 1465pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
1324pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); 1466pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
1325pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 1467pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
1326pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); 1468pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
1327void *vmemmap_alloc_block(unsigned long size, int node); 1469void *vmemmap_alloc_block(unsigned long size, int node);
1470void *vmemmap_alloc_block_buf(unsigned long size, int node);
1328void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 1471void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
1329int vmemmap_populate_basepages(struct page *start_page, 1472int vmemmap_populate_basepages(struct page *start_page,
1330 unsigned long pages, int node); 1473 unsigned long pages, int node);
1331int vmemmap_populate(struct page *start_page, unsigned long pages, int node); 1474int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1332void vmemmap_populate_print_last(void); 1475void vmemmap_populate_print_last(void);
1333 1476
1334extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
1335 size_t size);
1336extern void refund_locked_memory(struct mm_struct *mm, size_t size);
1337 1477
1338enum mf_flags { 1478enum mf_flags {
1339 MF_COUNT_INCREASED = 1 << 0, 1479 MF_COUNT_INCREASED = 1 << 0,
@@ -1346,6 +1486,16 @@ extern int sysctl_memory_failure_recovery;
1346extern void shake_page(struct page *p, int access); 1486extern void shake_page(struct page *p, int access);
1347extern atomic_long_t mce_bad_pages; 1487extern atomic_long_t mce_bad_pages;
1348extern int soft_offline_page(struct page *page, int flags); 1488extern int soft_offline_page(struct page *page, int flags);
1489#ifdef CONFIG_MEMORY_FAILURE
1490int is_hwpoison_address(unsigned long addr);
1491#else
1492static inline int is_hwpoison_address(unsigned long addr)
1493{
1494 return 0;
1495}
1496#endif
1497
1498extern void dump_page(struct page *page);
1349 1499
1350#endif /* __KERNEL__ */ 1500#endif /* __KERNEL__ */
1351#endif /* _LINUX_MM_H */ 1501#endif /* _LINUX_MM_H */