diff options
| author | Michal Marek <mmarek@suse.cz> | 2010-10-27 18:15:57 -0400 |
|---|---|---|
| committer | Michal Marek <mmarek@suse.cz> | 2010-10-27 18:15:57 -0400 |
| commit | b74b953b998bcc2db91b694446f3a2619ec32de6 (patch) | |
| tree | 6ce24caabd730f6ae9287ed0676ec32e6ff31e9d /include/linux/mm.h | |
| parent | abb438526201c6a79949ad45375c051b6681c253 (diff) | |
| parent | f6f94e2ab1b33f0082ac22d71f66385a60d8157f (diff) | |
Merge commit 'v2.6.36' into kbuild/misc
Update to be able to fix a recent change to scripts/basic/docproc.c
(commit eda603f).
Diffstat (limited to 'include/linux/mm.h')
| -rw-r--r-- | include/linux/mm.h | 178 |
1 files changed, 164 insertions, 14 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 60c467bfbabd..74949fbef8c6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -12,13 +12,14 @@ | |||
| 12 | #include <linux/prio_tree.h> | 12 | #include <linux/prio_tree.h> |
| 13 | #include <linux/debug_locks.h> | 13 | #include <linux/debug_locks.h> |
| 14 | #include <linux/mm_types.h> | 14 | #include <linux/mm_types.h> |
| 15 | #include <linux/range.h> | ||
| 16 | #include <linux/pfn.h> | ||
| 15 | 17 | ||
| 16 | struct mempolicy; | 18 | struct mempolicy; |
| 17 | struct anon_vma; | 19 | struct anon_vma; |
| 18 | struct file_ra_state; | 20 | struct file_ra_state; |
| 19 | struct user_struct; | 21 | struct user_struct; |
| 20 | struct writeback_control; | 22 | struct writeback_control; |
| 21 | struct rlimit; | ||
| 22 | 23 | ||
| 23 | #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ | 24 | #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ |
| 24 | extern unsigned long max_mapnr; | 25 | extern unsigned long max_mapnr; |
| @@ -77,7 +78,11 @@ extern unsigned int kobjsize(const void *objp); | |||
| 77 | #define VM_MAYSHARE 0x00000080 | 78 | #define VM_MAYSHARE 0x00000080 |
| 78 | 79 | ||
| 79 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ | 80 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ |
| 81 | #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) | ||
| 80 | #define VM_GROWSUP 0x00000200 | 82 | #define VM_GROWSUP 0x00000200 |
| 83 | #else | ||
| 84 | #define VM_GROWSUP 0x00000000 | ||
| 85 | #endif | ||
| 81 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ | 86 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ |
| 82 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ | 87 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ |
| 83 | 88 | ||
| @@ -106,6 +111,9 @@ extern unsigned int kobjsize(const void *objp); | |||
| 106 | #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ | 111 | #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ |
| 107 | #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ | 112 | #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ |
| 108 | 113 | ||
| 114 | /* Bits set in the VMA until the stack is in its final location */ | ||
| 115 | #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) | ||
| 116 | |||
| 109 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ | 117 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ |
| 110 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS | 118 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS |
| 111 | #endif | 119 | #endif |
| @@ -265,6 +273,8 @@ static inline int get_page_unless_zero(struct page *page) | |||
| 265 | return atomic_inc_not_zero(&page->_count); | 273 | return atomic_inc_not_zero(&page->_count); |
| 266 | } | 274 | } |
| 267 | 275 | ||
| 276 | extern int page_is_ram(unsigned long pfn); | ||
| 277 | |||
| 268 | /* Support for virtually mapped pages */ | 278 | /* Support for virtually mapped pages */ |
| 269 | struct page *vmalloc_to_page(const void *addr); | 279 | struct page *vmalloc_to_page(const void *addr); |
| 270 | unsigned long vmalloc_to_pfn(const void *addr); | 280 | unsigned long vmalloc_to_pfn(const void *addr); |
| @@ -332,6 +342,7 @@ void put_page(struct page *page); | |||
| 332 | void put_pages_list(struct list_head *pages); | 342 | void put_pages_list(struct list_head *pages); |
| 333 | 343 | ||
| 334 | void split_page(struct page *page, unsigned int order); | 344 | void split_page(struct page *page, unsigned int order); |
| 345 | int split_free_page(struct page *page); | ||
| 335 | 346 | ||
| 336 | /* | 347 | /* |
| 337 | * Compound pages have a destructor function. Provide a | 348 | * Compound pages have a destructor function. Provide a |
| @@ -589,7 +600,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone, | |||
| 589 | 600 | ||
| 590 | static __always_inline void *lowmem_page_address(struct page *page) | 601 | static __always_inline void *lowmem_page_address(struct page *page) |
| 591 | { | 602 | { |
| 592 | return __va(page_to_pfn(page) << PAGE_SHIFT); | 603 | return __va(PFN_PHYS(page_to_pfn(page))); |
| 593 | } | 604 | } |
| 594 | 605 | ||
| 595 | #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) | 606 | #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) |
| @@ -780,8 +791,8 @@ struct mm_walk { | |||
| 780 | int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); | 791 | int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); |
| 781 | int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); | 792 | int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); |
| 782 | int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); | 793 | int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); |
| 783 | int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long, | 794 | int (*hugetlb_entry)(pte_t *, unsigned long, |
| 784 | struct mm_walk *); | 795 | unsigned long, unsigned long, struct mm_walk *); |
| 785 | struct mm_struct *mm; | 796 | struct mm_struct *mm; |
| 786 | void *private; | 797 | void *private; |
| 787 | }; | 798 | }; |
| @@ -808,6 +819,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, | |||
| 808 | } | 819 | } |
| 809 | 820 | ||
| 810 | extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new); | 821 | extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new); |
| 822 | extern void truncate_setsize(struct inode *inode, loff_t newsize); | ||
| 811 | extern int vmtruncate(struct inode *inode, loff_t offset); | 823 | extern int vmtruncate(struct inode *inode, loff_t offset); |
| 812 | extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end); | 824 | extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end); |
| 813 | 825 | ||
| @@ -852,6 +864,12 @@ int set_page_dirty(struct page *page); | |||
| 852 | int set_page_dirty_lock(struct page *page); | 864 | int set_page_dirty_lock(struct page *page); |
| 853 | int clear_page_dirty_for_io(struct page *page); | 865 | int clear_page_dirty_for_io(struct page *page); |
| 854 | 866 | ||
| 867 | /* Is the vma a continuation of the stack vma above it? */ | ||
| 868 | static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) | ||
| 869 | { | ||
| 870 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); | ||
| 871 | } | ||
| 872 | |||
| 855 | extern unsigned long move_page_tables(struct vm_area_struct *vma, | 873 | extern unsigned long move_page_tables(struct vm_area_struct *vma, |
| 856 | unsigned long old_addr, struct vm_area_struct *new_vma, | 874 | unsigned long old_addr, struct vm_area_struct *new_vma, |
| 857 | unsigned long new_addr, unsigned long len); | 875 | unsigned long new_addr, unsigned long len); |
| @@ -867,6 +885,114 @@ extern int mprotect_fixup(struct vm_area_struct *vma, | |||
| 867 | */ | 885 | */ |
| 868 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | 886 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, |
| 869 | struct page **pages); | 887 | struct page **pages); |
| 888 | /* | ||
| 889 | * per-process(per-mm_struct) statistics. | ||
| 890 | */ | ||
| 891 | #if defined(SPLIT_RSS_COUNTING) | ||
| 892 | /* | ||
| 893 | * The mm counters are not protected by its page_table_lock, | ||
| 894 | * so must be incremented atomically. | ||
| 895 | */ | ||
| 896 | static inline void set_mm_counter(struct mm_struct *mm, int member, long value) | ||
| 897 | { | ||
| 898 | atomic_long_set(&mm->rss_stat.count[member], value); | ||
| 899 | } | ||
| 900 | |||
| 901 | unsigned long get_mm_counter(struct mm_struct *mm, int member); | ||
| 902 | |||
| 903 | static inline void add_mm_counter(struct mm_struct *mm, int member, long value) | ||
| 904 | { | ||
| 905 | atomic_long_add(value, &mm->rss_stat.count[member]); | ||
| 906 | } | ||
| 907 | |||
| 908 | static inline void inc_mm_counter(struct mm_struct *mm, int member) | ||
| 909 | { | ||
| 910 | atomic_long_inc(&mm->rss_stat.count[member]); | ||
| 911 | } | ||
| 912 | |||
| 913 | static inline void dec_mm_counter(struct mm_struct *mm, int member) | ||
| 914 | { | ||
| 915 | atomic_long_dec(&mm->rss_stat.count[member]); | ||
| 916 | } | ||
| 917 | |||
| 918 | #else /* !USE_SPLIT_PTLOCKS */ | ||
| 919 | /* | ||
| 920 | * The mm counters are protected by its page_table_lock, | ||
| 921 | * so can be incremented directly. | ||
| 922 | */ | ||
| 923 | static inline void set_mm_counter(struct mm_struct *mm, int member, long value) | ||
| 924 | { | ||
| 925 | mm->rss_stat.count[member] = value; | ||
| 926 | } | ||
| 927 | |||
| 928 | static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) | ||
| 929 | { | ||
| 930 | return mm->rss_stat.count[member]; | ||
| 931 | } | ||
| 932 | |||
| 933 | static inline void add_mm_counter(struct mm_struct *mm, int member, long value) | ||
| 934 | { | ||
| 935 | mm->rss_stat.count[member] += value; | ||
| 936 | } | ||
| 937 | |||
| 938 | static inline void inc_mm_counter(struct mm_struct *mm, int member) | ||
| 939 | { | ||
| 940 | mm->rss_stat.count[member]++; | ||
| 941 | } | ||
| 942 | |||
| 943 | static inline void dec_mm_counter(struct mm_struct *mm, int member) | ||
| 944 | { | ||
| 945 | mm->rss_stat.count[member]--; | ||
| 946 | } | ||
| 947 | |||
| 948 | #endif /* !USE_SPLIT_PTLOCKS */ | ||
| 949 | |||
| 950 | static inline unsigned long get_mm_rss(struct mm_struct *mm) | ||
| 951 | { | ||
| 952 | return get_mm_counter(mm, MM_FILEPAGES) + | ||
| 953 | get_mm_counter(mm, MM_ANONPAGES); | ||
| 954 | } | ||
| 955 | |||
| 956 | static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) | ||
| 957 | { | ||
| 958 | return max(mm->hiwater_rss, get_mm_rss(mm)); | ||
| 959 | } | ||
| 960 | |||
| 961 | static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) | ||
| 962 | { | ||
| 963 | return max(mm->hiwater_vm, mm->total_vm); | ||
| 964 | } | ||
| 965 | |||
| 966 | static inline void update_hiwater_rss(struct mm_struct *mm) | ||
| 967 | { | ||
| 968 | unsigned long _rss = get_mm_rss(mm); | ||
| 969 | |||
| 970 | if ((mm)->hiwater_rss < _rss) | ||
| 971 | (mm)->hiwater_rss = _rss; | ||
| 972 | } | ||
| 973 | |||
| 974 | static inline void update_hiwater_vm(struct mm_struct *mm) | ||
| 975 | { | ||
| 976 | if (mm->hiwater_vm < mm->total_vm) | ||
| 977 | mm->hiwater_vm = mm->total_vm; | ||
| 978 | } | ||
| 979 | |||
| 980 | static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, | ||
| 981 | struct mm_struct *mm) | ||
| 982 | { | ||
| 983 | unsigned long hiwater_rss = get_mm_hiwater_rss(mm); | ||
| 984 | |||
| 985 | if (*maxrss < hiwater_rss) | ||
| 986 | *maxrss = hiwater_rss; | ||
| 987 | } | ||
| 988 | |||
| 989 | #if defined(SPLIT_RSS_COUNTING) | ||
| 990 | void sync_mm_rss(struct task_struct *task, struct mm_struct *mm); | ||
| 991 | #else | ||
| 992 | static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) | ||
| 993 | { | ||
| 994 | } | ||
| 995 | #endif | ||
| 870 | 996 | ||
| 871 | /* | 997 | /* |
| 872 | * A callback you can register to apply pressure to ageable caches. | 998 | * A callback you can register to apply pressure to ageable caches. |
| @@ -884,7 +1010,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
| 884 | * querying the cache size, so a fastpath for that case is appropriate. | 1010 | * querying the cache size, so a fastpath for that case is appropriate. |
| 885 | */ | 1011 | */ |
| 886 | struct shrinker { | 1012 | struct shrinker { |
| 887 | int (*shrink)(int nr_to_scan, gfp_t gfp_mask); | 1013 | int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask); |
| 888 | int seeks; /* seeks to recreate an obj */ | 1014 | int seeks; /* seeks to recreate an obj */ |
| 889 | 1015 | ||
| 890 | /* These are for internal use */ | 1016 | /* These are for internal use */ |
| @@ -1047,6 +1173,10 @@ extern void get_pfn_range_for_nid(unsigned int nid, | |||
| 1047 | extern unsigned long find_min_pfn_with_active_regions(void); | 1173 | extern unsigned long find_min_pfn_with_active_regions(void); |
| 1048 | extern void free_bootmem_with_active_regions(int nid, | 1174 | extern void free_bootmem_with_active_regions(int nid, |
| 1049 | unsigned long max_low_pfn); | 1175 | unsigned long max_low_pfn); |
| 1176 | int add_from_early_node_map(struct range *range, int az, | ||
| 1177 | int nr_range, int nid); | ||
| 1178 | void *__alloc_memory_core_early(int nodeid, u64 size, u64 align, | ||
| 1179 | u64 goal, u64 limit); | ||
| 1050 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); | 1180 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); |
| 1051 | extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); | 1181 | extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); |
| 1052 | extern void sparse_memory_present_with_active_regions(int nid); | 1182 | extern void sparse_memory_present_with_active_regions(int nid); |
| @@ -1079,11 +1209,7 @@ extern void si_meminfo(struct sysinfo * val); | |||
| 1079 | extern void si_meminfo_node(struct sysinfo *val, int nid); | 1209 | extern void si_meminfo_node(struct sysinfo *val, int nid); |
| 1080 | extern int after_bootmem; | 1210 | extern int after_bootmem; |
| 1081 | 1211 | ||
| 1082 | #ifdef CONFIG_NUMA | ||
| 1083 | extern void setup_per_cpu_pageset(void); | 1212 | extern void setup_per_cpu_pageset(void); |
| 1084 | #else | ||
| 1085 | static inline void setup_per_cpu_pageset(void) {} | ||
| 1086 | #endif | ||
| 1087 | 1213 | ||
| 1088 | extern void zone_pcp_update(struct zone *zone); | 1214 | extern void zone_pcp_update(struct zone *zone); |
| 1089 | 1215 | ||
| @@ -1111,7 +1237,7 @@ static inline void vma_nonlinear_insert(struct vm_area_struct *vma, | |||
| 1111 | 1237 | ||
| 1112 | /* mmap.c */ | 1238 | /* mmap.c */ |
| 1113 | extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); | 1239 | extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); |
| 1114 | extern void vma_adjust(struct vm_area_struct *vma, unsigned long start, | 1240 | extern int vma_adjust(struct vm_area_struct *vma, unsigned long start, |
| 1115 | unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); | 1241 | unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); |
| 1116 | extern struct vm_area_struct *vma_merge(struct mm_struct *, | 1242 | extern struct vm_area_struct *vma_merge(struct mm_struct *, |
| 1117 | struct vm_area_struct *prev, unsigned long addr, unsigned long end, | 1243 | struct vm_area_struct *prev, unsigned long addr, unsigned long end, |
| @@ -1214,8 +1340,10 @@ unsigned long ra_submit(struct file_ra_state *ra, | |||
| 1214 | 1340 | ||
| 1215 | /* Do stack extension */ | 1341 | /* Do stack extension */ |
| 1216 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); | 1342 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); |
| 1217 | #ifdef CONFIG_IA64 | 1343 | #if VM_GROWSUP |
| 1218 | extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); | 1344 | extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); |
| 1345 | #else | ||
| 1346 | #define expand_upwards(vma, address) do { } while (0) | ||
| 1219 | #endif | 1347 | #endif |
| 1220 | extern int expand_stack_downwards(struct vm_area_struct *vma, | 1348 | extern int expand_stack_downwards(struct vm_area_struct *vma, |
| 1221 | unsigned long address); | 1349 | unsigned long address); |
| @@ -1241,7 +1369,15 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma) | |||
| 1241 | return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 1369 | return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
| 1242 | } | 1370 | } |
| 1243 | 1371 | ||
| 1372 | #ifdef CONFIG_MMU | ||
| 1244 | pgprot_t vm_get_page_prot(unsigned long vm_flags); | 1373 | pgprot_t vm_get_page_prot(unsigned long vm_flags); |
| 1374 | #else | ||
| 1375 | static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) | ||
| 1376 | { | ||
| 1377 | return __pgprot(0); | ||
| 1378 | } | ||
| 1379 | #endif | ||
| 1380 | |||
| 1245 | struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); | 1381 | struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); |
| 1246 | int remap_pfn_range(struct vm_area_struct *, unsigned long addr, | 1382 | int remap_pfn_range(struct vm_area_struct *, unsigned long addr, |
| 1247 | unsigned long pfn, unsigned long size, pgprot_t); | 1383 | unsigned long pfn, unsigned long size, pgprot_t); |
| @@ -1319,21 +1455,25 @@ extern int randomize_va_space; | |||
| 1319 | const char * arch_vma_name(struct vm_area_struct *vma); | 1455 | const char * arch_vma_name(struct vm_area_struct *vma); |
| 1320 | void print_vma_addr(char *prefix, unsigned long rip); | 1456 | void print_vma_addr(char *prefix, unsigned long rip); |
| 1321 | 1457 | ||
| 1458 | void sparse_mem_maps_populate_node(struct page **map_map, | ||
| 1459 | unsigned long pnum_begin, | ||
| 1460 | unsigned long pnum_end, | ||
| 1461 | unsigned long map_count, | ||
| 1462 | int nodeid); | ||
| 1463 | |||
| 1322 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid); | 1464 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid); |
| 1323 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); | 1465 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); |
| 1324 | pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); | 1466 | pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); |
| 1325 | pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); | 1467 | pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); |
| 1326 | pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); | 1468 | pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); |
| 1327 | void *vmemmap_alloc_block(unsigned long size, int node); | 1469 | void *vmemmap_alloc_block(unsigned long size, int node); |
| 1470 | void *vmemmap_alloc_block_buf(unsigned long size, int node); | ||
| 1328 | void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); | 1471 | void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); |
| 1329 | int vmemmap_populate_basepages(struct page *start_page, | 1472 | int vmemmap_populate_basepages(struct page *start_page, |
| 1330 | unsigned long pages, int node); | 1473 | unsigned long pages, int node); |
| 1331 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); | 1474 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); |
| 1332 | void vmemmap_populate_print_last(void); | 1475 | void vmemmap_populate_print_last(void); |
| 1333 | 1476 | ||
| 1334 | extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, | ||
| 1335 | size_t size); | ||
| 1336 | extern void refund_locked_memory(struct mm_struct *mm, size_t size); | ||
| 1337 | 1477 | ||
| 1338 | enum mf_flags { | 1478 | enum mf_flags { |
| 1339 | MF_COUNT_INCREASED = 1 << 0, | 1479 | MF_COUNT_INCREASED = 1 << 0, |
| @@ -1346,6 +1486,16 @@ extern int sysctl_memory_failure_recovery; | |||
| 1346 | extern void shake_page(struct page *p, int access); | 1486 | extern void shake_page(struct page *p, int access); |
| 1347 | extern atomic_long_t mce_bad_pages; | 1487 | extern atomic_long_t mce_bad_pages; |
| 1348 | extern int soft_offline_page(struct page *page, int flags); | 1488 | extern int soft_offline_page(struct page *page, int flags); |
| 1489 | #ifdef CONFIG_MEMORY_FAILURE | ||
| 1490 | int is_hwpoison_address(unsigned long addr); | ||
| 1491 | #else | ||
| 1492 | static inline int is_hwpoison_address(unsigned long addr) | ||
| 1493 | { | ||
| 1494 | return 0; | ||
| 1495 | } | ||
| 1496 | #endif | ||
| 1497 | |||
| 1498 | extern void dump_page(struct page *page); | ||
| 1349 | 1499 | ||
| 1350 | #endif /* __KERNEL__ */ | 1500 | #endif /* __KERNEL__ */ |
| 1351 | #endif /* _LINUX_MM_H */ | 1501 | #endif /* _LINUX_MM_H */ |
