diff options
Diffstat (limited to 'include/linux/swap.h')
-rw-r--r-- | include/linux/swap.h | 45 |
1 files changed, 26 insertions, 19 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h index 7cdd63366f88..a273468f8285 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -155,6 +155,15 @@ enum { | |||
155 | #define SWAP_CLUSTER_MAX 32 | 155 | #define SWAP_CLUSTER_MAX 32 |
156 | #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX | 156 | #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX |
157 | 157 | ||
158 | /* | ||
159 | * Ratio between the present memory in the zone and the "gap" that | ||
160 | * we're allowing kswapd to shrink in addition to the per-zone high | ||
161 | * wmark, even for zones that already have the high wmark satisfied, | ||
162 | * in order to provide better per-zone lru behavior. We are ok to | ||
163 | * spend not more than 1% of the memory for this zone balancing "gap". | ||
164 | */ | ||
165 | #define KSWAPD_ZONE_BALANCE_GAP_RATIO 100 | ||
166 | |||
158 | #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ | 167 | #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ |
159 | #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ | 168 | #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ |
160 | #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ | 169 | #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ |
@@ -208,11 +217,14 @@ extern unsigned int nr_free_pagecache_pages(void); | |||
208 | /* linux/mm/swap.c */ | 217 | /* linux/mm/swap.c */ |
209 | extern void __lru_cache_add(struct page *, enum lru_list lru); | 218 | extern void __lru_cache_add(struct page *, enum lru_list lru); |
210 | extern void lru_cache_add_lru(struct page *, enum lru_list lru); | 219 | extern void lru_cache_add_lru(struct page *, enum lru_list lru); |
220 | extern void lru_add_page_tail(struct zone* zone, | ||
221 | struct page *page, struct page *page_tail); | ||
211 | extern void activate_page(struct page *); | 222 | extern void activate_page(struct page *); |
212 | extern void mark_page_accessed(struct page *); | 223 | extern void mark_page_accessed(struct page *); |
213 | extern void lru_add_drain(void); | 224 | extern void lru_add_drain(void); |
214 | extern int lru_add_drain_all(void); | 225 | extern int lru_add_drain_all(void); |
215 | extern void rotate_reclaimable_page(struct page *page); | 226 | extern void rotate_reclaimable_page(struct page *page); |
227 | extern void deactivate_page(struct page *page); | ||
216 | extern void swap_setup(void); | 228 | extern void swap_setup(void); |
217 | 229 | ||
218 | extern void add_page_to_unevictable_list(struct page *page); | 230 | extern void add_page_to_unevictable_list(struct page *page); |
@@ -245,7 +257,8 @@ extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, | |||
245 | extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | 257 | extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, |
246 | gfp_t gfp_mask, bool noswap, | 258 | gfp_t gfp_mask, bool noswap, |
247 | unsigned int swappiness, | 259 | unsigned int swappiness, |
248 | struct zone *zone); | 260 | struct zone *zone, |
261 | unsigned long *nr_scanned); | ||
249 | extern int __isolate_lru_page(struct page *page, int mode, int file); | 262 | extern int __isolate_lru_page(struct page *page, int mode, int file); |
250 | extern unsigned long shrink_all_memory(unsigned long nr_pages); | 263 | extern unsigned long shrink_all_memory(unsigned long nr_pages); |
251 | extern int vm_swappiness; | 264 | extern int vm_swappiness; |
@@ -271,24 +284,22 @@ extern void scan_mapping_unevictable_pages(struct address_space *); | |||
271 | extern unsigned long scan_unevictable_pages; | 284 | extern unsigned long scan_unevictable_pages; |
272 | extern int scan_unevictable_handler(struct ctl_table *, int, | 285 | extern int scan_unevictable_handler(struct ctl_table *, int, |
273 | void __user *, size_t *, loff_t *); | 286 | void __user *, size_t *, loff_t *); |
287 | #ifdef CONFIG_NUMA | ||
274 | extern int scan_unevictable_register_node(struct node *node); | 288 | extern int scan_unevictable_register_node(struct node *node); |
275 | extern void scan_unevictable_unregister_node(struct node *node); | 289 | extern void scan_unevictable_unregister_node(struct node *node); |
290 | #else | ||
291 | static inline int scan_unevictable_register_node(struct node *node) | ||
292 | { | ||
293 | return 0; | ||
294 | } | ||
295 | static inline void scan_unevictable_unregister_node(struct node *node) | ||
296 | { | ||
297 | } | ||
298 | #endif | ||
276 | 299 | ||
277 | extern int kswapd_run(int nid); | 300 | extern int kswapd_run(int nid); |
278 | extern void kswapd_stop(int nid); | 301 | extern void kswapd_stop(int nid); |
279 | 302 | ||
280 | #ifdef CONFIG_MMU | ||
281 | /* linux/mm/shmem.c */ | ||
282 | extern int shmem_unuse(swp_entry_t entry, struct page *page); | ||
283 | #endif /* CONFIG_MMU */ | ||
284 | |||
285 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | ||
286 | extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff, | ||
287 | struct page **pagep, swp_entry_t *ent); | ||
288 | #endif | ||
289 | |||
290 | extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *); | ||
291 | |||
292 | #ifdef CONFIG_SWAP | 303 | #ifdef CONFIG_SWAP |
293 | /* linux/mm/page_io.c */ | 304 | /* linux/mm/page_io.c */ |
294 | extern int swap_readpage(struct page *); | 305 | extern int swap_readpage(struct page *); |
@@ -337,6 +348,7 @@ struct backing_dev_info; | |||
337 | extern struct mm_struct *swap_token_mm; | 348 | extern struct mm_struct *swap_token_mm; |
338 | extern void grab_swap_token(struct mm_struct *); | 349 | extern void grab_swap_token(struct mm_struct *); |
339 | extern void __put_swap_token(struct mm_struct *); | 350 | extern void __put_swap_token(struct mm_struct *); |
351 | extern void disable_swap_token(struct mem_cgroup *memcg); | ||
340 | 352 | ||
341 | static inline int has_swap_token(struct mm_struct *mm) | 353 | static inline int has_swap_token(struct mm_struct *mm) |
342 | { | 354 | { |
@@ -349,11 +361,6 @@ static inline void put_swap_token(struct mm_struct *mm) | |||
349 | __put_swap_token(mm); | 361 | __put_swap_token(mm); |
350 | } | 362 | } |
351 | 363 | ||
352 | static inline void disable_swap_token(void) | ||
353 | { | ||
354 | put_swap_token(swap_token_mm); | ||
355 | } | ||
356 | |||
357 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 364 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
358 | extern void | 365 | extern void |
359 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); | 366 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); |
@@ -479,7 +486,7 @@ static inline int has_swap_token(struct mm_struct *mm) | |||
479 | return 0; | 486 | return 0; |
480 | } | 487 | } |
481 | 488 | ||
482 | static inline void disable_swap_token(void) | 489 | static inline void disable_swap_token(struct mem_cgroup *memcg) |
483 | { | 490 | { |
484 | } | 491 | } |
485 | 492 | ||