aboutsummaryrefslogtreecommitdiffstats
path: root/mm/internal.h
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /mm/internal.h
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'mm/internal.h')
-rw-r--r--mm/internal.h96
1 files changed, 15 insertions, 81 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 9ba21100ebf..2189af49178 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -92,11 +92,6 @@ extern int isolate_lru_page(struct page *page);
92extern void putback_lru_page(struct page *page); 92extern void putback_lru_page(struct page *page);
93 93
94/* 94/*
95 * in mm/rmap.c:
96 */
97extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
98
99/*
100 * in mm/page_alloc.c 95 * in mm/page_alloc.c
101 */ 96 */
102extern void __free_pages_bootmem(struct page *page, unsigned int order); 97extern void __free_pages_bootmem(struct page *page, unsigned int order);
@@ -105,46 +100,6 @@ extern void prep_compound_page(struct page *page, unsigned long order);
105extern bool is_free_buddy_page(struct page *page); 100extern bool is_free_buddy_page(struct page *page);
106#endif 101#endif
107 102
108#if defined CONFIG_COMPACTION || defined CONFIG_CMA
109
110/*
111 * in mm/compaction.c
112 */
113/*
114 * compact_control is used to track pages being migrated and the free pages
115 * they are being migrated to during memory compaction. The free_pfn starts
116 * at the end of a zone and migrate_pfn begins at the start. Movable pages
117 * are moved to the end of a zone during a compaction run and the run
118 * completes when free_pfn <= migrate_pfn
119 */
120struct compact_control {
121 struct list_head freepages; /* List of free pages to migrate to */
122 struct list_head migratepages; /* List of pages being migrated */
123 unsigned long nr_freepages; /* Number of isolated free pages */
124 unsigned long nr_migratepages; /* Number of pages to migrate */
125 unsigned long free_pfn; /* isolate_freepages search base */
126 unsigned long migrate_pfn; /* isolate_migratepages search base */
127 bool sync; /* Synchronous migration */
128 bool ignore_skip_hint; /* Scan blocks even if marked skip */
129 bool finished_update_free; /* True when the zone cached pfns are
130 * no longer being updated
131 */
132 bool finished_update_migrate;
133
134 int order; /* order a direct compactor needs */
135 int migratetype; /* MOVABLE, RECLAIMABLE etc */
136 struct zone *zone;
137 bool contended; /* True if a lock was contended */
138};
139
140unsigned long
141isolate_freepages_range(struct compact_control *cc,
142 unsigned long start_pfn, unsigned long end_pfn);
143unsigned long
144isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
145 unsigned long low_pfn, unsigned long end_pfn, bool unevictable);
146
147#endif
148 103
149/* 104/*
150 * function for dealing with page's order in buddy system. 105 * function for dealing with page's order in buddy system.
@@ -172,11 +127,11 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
172} 127}
173 128
174/* 129/*
175 * Called only in fault path, to determine if a new page is being 130 * Called only in fault path via page_evictable() for a new page
176 * mapped into a LOCKED vma. If it is, mark page as mlocked. 131 * to determine if it's being mapped into a LOCKED vma.
132 * If so, mark page as mlocked.
177 */ 133 */
178static inline int mlocked_vma_newpage(struct vm_area_struct *vma, 134static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
179 struct page *page)
180{ 135{
181 VM_BUG_ON(PageLRU(page)); 136 VM_BUG_ON(PageLRU(page));
182 137
@@ -184,8 +139,7 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
184 return 0; 139 return 0;
185 140
186 if (!TestSetPageMlocked(page)) { 141 if (!TestSetPageMlocked(page)) {
187 mod_zone_page_state(page_zone(page), NR_MLOCK, 142 inc_zone_page_state(page, NR_MLOCK);
188 hpage_nr_pages(page));
189 count_vm_event(UNEVICTABLE_PGMLOCKED); 143 count_vm_event(UNEVICTABLE_PGMLOCKED);
190 } 144 }
191 return 1; 145 return 1;
@@ -206,7 +160,12 @@ extern void munlock_vma_page(struct page *page);
206 * If called for a page that is still mapped by mlocked vmas, all we do 160 * If called for a page that is still mapped by mlocked vmas, all we do
207 * is revert to lazy LRU behaviour -- semantics are not broken. 161 * is revert to lazy LRU behaviour -- semantics are not broken.
208 */ 162 */
209extern void clear_page_mlock(struct page *page); 163extern void __clear_page_mlock(struct page *page);
164static inline void clear_page_mlock(struct page *page)
165{
166 if (unlikely(TestClearPageMlocked(page)))
167 __clear_page_mlock(page);
168}
210 169
211/* 170/*
212 * mlock_migrate_page - called only from migrate_page_copy() to 171 * mlock_migrate_page - called only from migrate_page_copy() to
@@ -216,24 +175,21 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
216{ 175{
217 if (TestClearPageMlocked(page)) { 176 if (TestClearPageMlocked(page)) {
218 unsigned long flags; 177 unsigned long flags;
219 int nr_pages = hpage_nr_pages(page);
220 178
221 local_irq_save(flags); 179 local_irq_save(flags);
222 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); 180 __dec_zone_page_state(page, NR_MLOCK);
223 SetPageMlocked(newpage); 181 SetPageMlocked(newpage);
224 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); 182 __inc_zone_page_state(newpage, NR_MLOCK);
225 local_irq_restore(flags); 183 local_irq_restore(flags);
226 } 184 }
227} 185}
228 186
229extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
230
231#ifdef CONFIG_TRANSPARENT_HUGEPAGE 187#ifdef CONFIG_TRANSPARENT_HUGEPAGE
232extern unsigned long vma_address(struct page *page, 188extern unsigned long vma_address(struct page *page,
233 struct vm_area_struct *vma); 189 struct vm_area_struct *vma);
234#endif 190#endif
235#else /* !CONFIG_MMU */ 191#else /* !CONFIG_MMU */
236static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p) 192static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
237{ 193{
238 return 0; 194 return 0;
239} 195}
@@ -343,6 +299,7 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
343#define ZONE_RECLAIM_FULL -1 299#define ZONE_RECLAIM_FULL -1
344#define ZONE_RECLAIM_SOME 0 300#define ZONE_RECLAIM_SOME 0
345#define ZONE_RECLAIM_SUCCESS 1 301#define ZONE_RECLAIM_SUCCESS 1
302#endif
346 303
347extern int hwpoison_filter(struct page *p); 304extern int hwpoison_filter(struct page *p);
348 305
@@ -352,26 +309,3 @@ extern u64 hwpoison_filter_flags_mask;
352extern u64 hwpoison_filter_flags_value; 309extern u64 hwpoison_filter_flags_value;
353extern u64 hwpoison_filter_memcg; 310extern u64 hwpoison_filter_memcg;
354extern u32 hwpoison_filter_enable; 311extern u32 hwpoison_filter_enable;
355
356extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
357 unsigned long, unsigned long,
358 unsigned long, unsigned long);
359
360extern void set_pageblock_order(void);
361unsigned long reclaim_clean_pages_from_list(struct zone *zone,
362 struct list_head *page_list);
363/* The ALLOC_WMARK bits are used as an index to zone->watermark */
364#define ALLOC_WMARK_MIN WMARK_MIN
365#define ALLOC_WMARK_LOW WMARK_LOW
366#define ALLOC_WMARK_HIGH WMARK_HIGH
367#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
368
369/* Mask to get the watermark bits */
370#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
371
372#define ALLOC_HARDER 0x10 /* try to alloc harder */
373#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
374#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
375#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
376
377#endif /* __MM_INTERNAL_H */