aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/swap.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/swap.h')
-rw-r--r--include/linux/swap.h69
1 files changed, 66 insertions, 3 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index de40f169a4e..a3af95b2cb6 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -7,6 +7,7 @@
7#include <linux/list.h> 7#include <linux/list.h>
8#include <linux/memcontrol.h> 8#include <linux/memcontrol.h>
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/node.h>
10 11
11#include <asm/atomic.h> 12#include <asm/atomic.h>
12#include <asm/page.h> 13#include <asm/page.h>
@@ -171,8 +172,10 @@ extern unsigned int nr_free_pagecache_pages(void);
171 172
172 173
173/* linux/mm/swap.c */ 174/* linux/mm/swap.c */
174extern void lru_cache_add(struct page *); 175extern void __lru_cache_add(struct page *, enum lru_list lru);
175extern void lru_cache_add_active(struct page *); 176extern void lru_cache_add_lru(struct page *, enum lru_list lru);
177extern void lru_cache_add_active_or_unevictable(struct page *,
178 struct vm_area_struct *);
176extern void activate_page(struct page *); 179extern void activate_page(struct page *);
177extern void mark_page_accessed(struct page *); 180extern void mark_page_accessed(struct page *);
178extern void lru_add_drain(void); 181extern void lru_add_drain(void);
@@ -180,12 +183,38 @@ extern int lru_add_drain_all(void);
180extern void rotate_reclaimable_page(struct page *page); 183extern void rotate_reclaimable_page(struct page *page);
181extern void swap_setup(void); 184extern void swap_setup(void);
182 185
186extern void add_page_to_unevictable_list(struct page *page);
187
188/**
189 * lru_cache_add: add a page to the page lists
190 * @page: the page to add
191 */
192static inline void lru_cache_add_anon(struct page *page)
193{
194 __lru_cache_add(page, LRU_INACTIVE_ANON);
195}
196
197static inline void lru_cache_add_active_anon(struct page *page)
198{
199 __lru_cache_add(page, LRU_ACTIVE_ANON);
200}
201
202static inline void lru_cache_add_file(struct page *page)
203{
204 __lru_cache_add(page, LRU_INACTIVE_FILE);
205}
206
207static inline void lru_cache_add_active_file(struct page *page)
208{
209 __lru_cache_add(page, LRU_ACTIVE_FILE);
210}
211
183/* linux/mm/vmscan.c */ 212/* linux/mm/vmscan.c */
184extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 213extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
185 gfp_t gfp_mask); 214 gfp_t gfp_mask);
186extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 215extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
187 gfp_t gfp_mask); 216 gfp_t gfp_mask);
188extern int __isolate_lru_page(struct page *page, int mode); 217extern int __isolate_lru_page(struct page *page, int mode, int file);
189extern unsigned long shrink_all_memory(unsigned long nr_pages); 218extern unsigned long shrink_all_memory(unsigned long nr_pages);
190extern int vm_swappiness; 219extern int vm_swappiness;
191extern int remove_mapping(struct address_space *mapping, struct page *page); 220extern int remove_mapping(struct address_space *mapping, struct page *page);
@@ -204,6 +233,34 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
204} 233}
205#endif 234#endif
206 235
236#ifdef CONFIG_UNEVICTABLE_LRU
237extern int page_evictable(struct page *page, struct vm_area_struct *vma);
238extern void scan_mapping_unevictable_pages(struct address_space *);
239
240extern unsigned long scan_unevictable_pages;
241extern int scan_unevictable_handler(struct ctl_table *, int, struct file *,
242 void __user *, size_t *, loff_t *);
243extern int scan_unevictable_register_node(struct node *node);
244extern void scan_unevictable_unregister_node(struct node *node);
245#else
246static inline int page_evictable(struct page *page,
247 struct vm_area_struct *vma)
248{
249 return 1;
250}
251
252static inline void scan_mapping_unevictable_pages(struct address_space *mapping)
253{
254}
255
256static inline int scan_unevictable_register_node(struct node *node)
257{
258 return 0;
259}
260
261static inline void scan_unevictable_unregister_node(struct node *node) { }
262#endif
263
207extern int kswapd_run(int nid); 264extern int kswapd_run(int nid);
208 265
209#ifdef CONFIG_MMU 266#ifdef CONFIG_MMU
@@ -251,6 +308,7 @@ extern sector_t swapdev_block(int, pgoff_t);
251extern struct swap_info_struct *get_swap_info_struct(unsigned); 308extern struct swap_info_struct *get_swap_info_struct(unsigned);
252extern int can_share_swap_page(struct page *); 309extern int can_share_swap_page(struct page *);
253extern int remove_exclusive_swap_page(struct page *); 310extern int remove_exclusive_swap_page(struct page *);
311extern int remove_exclusive_swap_page_ref(struct page *);
254struct backing_dev_info; 312struct backing_dev_info;
255 313
256/* linux/mm/thrash.c */ 314/* linux/mm/thrash.c */
@@ -339,6 +397,11 @@ static inline int remove_exclusive_swap_page(struct page *p)
339 return 0; 397 return 0;
340} 398}
341 399
400static inline int remove_exclusive_swap_page_ref(struct page *page)
401{
402 return 0;
403}
404
342static inline swp_entry_t get_swap_page(void) 405static inline swp_entry_t get_swap_page(void)
343{ 406{
344 swp_entry_t entry; 407 swp_entry_t entry;