aboutsummaryrefslogtreecommitdiffstats
path: root/mm/internal.h
diff options
context:
space:
mode:
Diffstat (limited to 'mm/internal.h')
-rw-r--r--mm/internal.h35
1 files changed, 17 insertions, 18 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 22ec8d2b0fb8..6a697bb97fc5 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -50,6 +50,9 @@ extern void putback_lru_page(struct page *page);
50 */ 50 */
51extern void __free_pages_bootmem(struct page *page, unsigned int order); 51extern void __free_pages_bootmem(struct page *page, unsigned int order);
52extern void prep_compound_page(struct page *page, unsigned long order); 52extern void prep_compound_page(struct page *page, unsigned long order);
53#ifdef CONFIG_MEMORY_FAILURE
54extern bool is_free_buddy_page(struct page *page);
55#endif
53 56
54 57
55/* 58/*
@@ -63,7 +66,7 @@ static inline unsigned long page_order(struct page *page)
63 return page_private(page); 66 return page_private(page);
64} 67}
65 68
66#ifdef CONFIG_HAVE_MLOCK 69#ifdef CONFIG_MMU
67extern long mlock_vma_pages_range(struct vm_area_struct *vma, 70extern long mlock_vma_pages_range(struct vm_area_struct *vma,
68 unsigned long start, unsigned long end); 71 unsigned long start, unsigned long end);
69extern void munlock_vma_pages_range(struct vm_area_struct *vma, 72extern void munlock_vma_pages_range(struct vm_area_struct *vma,
@@ -72,22 +75,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
72{ 75{
73 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); 76 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
74} 77}
75#endif
76 78
77/* 79/*
78 * unevictable_migrate_page() called only from migrate_page_copy() to
79 * migrate unevictable flag to new page.
80 * Note that the old page has been isolated from the LRU lists at this
81 * point so we don't need to worry about LRU statistics.
82 */
83static inline void unevictable_migrate_page(struct page *new, struct page *old)
84{
85 if (TestClearPageUnevictable(old))
86 SetPageUnevictable(new);
87}
88
89#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
90/*
91 * Called only in fault path via page_evictable() for a new page 80 * Called only in fault path via page_evictable() for a new page
92 * to determine if it's being mapped into a LOCKED vma. 81 * to determine if it's being mapped into a LOCKED vma.
93 * If so, mark page as mlocked. 82 * If so, mark page as mlocked.
@@ -107,9 +96,10 @@ static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
107} 96}
108 97
109/* 98/*
110 * must be called with vma's mmap_sem held for read, and page locked. 99 * must be called with vma's mmap_sem held for read or write, and page locked.
111 */ 100 */
112extern void mlock_vma_page(struct page *page); 101extern void mlock_vma_page(struct page *page);
102extern void munlock_vma_page(struct page *page);
113 103
114/* 104/*
115 * Clear the page's PageMlocked(). This can be useful in a situation where 105 * Clear the page's PageMlocked(). This can be useful in a situation where
@@ -144,7 +134,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
144 } 134 }
145} 135}
146 136
147#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ 137#else /* !CONFIG_MMU */
148static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) 138static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
149{ 139{
150 return 0; 140 return 0;
@@ -153,7 +143,7 @@ static inline void clear_page_mlock(struct page *page) { }
153static inline void mlock_vma_page(struct page *page) { } 143static inline void mlock_vma_page(struct page *page) { }
154static inline void mlock_migrate_page(struct page *new, struct page *old) { } 144static inline void mlock_migrate_page(struct page *new, struct page *old) { }
155 145
156#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ 146#endif /* !CONFIG_MMU */
157 147
158/* 148/*
159 * Return the mem_map entry representing the 'offset' subpage within 149 * Return the mem_map entry representing the 'offset' subpage within
@@ -260,3 +250,12 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
260#define ZONE_RECLAIM_SOME 0 250#define ZONE_RECLAIM_SOME 0
261#define ZONE_RECLAIM_SUCCESS 1 251#define ZONE_RECLAIM_SUCCESS 1
262#endif 252#endif
253
254extern int hwpoison_filter(struct page *p);
255
256extern u32 hwpoison_filter_dev_major;
257extern u32 hwpoison_filter_dev_minor;
258extern u64 hwpoison_filter_flags_mask;
259extern u64 hwpoison_filter_flags_value;
260extern u64 hwpoison_filter_memcg;
261extern u32 hwpoison_filter_enable;