aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/swap.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/swap.h')
-rw-r--r--include/linux/swap.h49
1 files changed, 36 insertions, 13 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 68df9c17fbbb..2818a123f3ea 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -8,7 +8,7 @@
8#include <linux/memcontrol.h> 8#include <linux/memcontrol.h>
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/node.h> 10#include <linux/node.h>
11 11#include <linux/fs.h>
12#include <linux/atomic.h> 12#include <linux/atomic.h>
13#include <asm/page.h> 13#include <asm/page.h>
14 14
@@ -156,7 +156,7 @@ enum {
156 SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ 156 SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
157}; 157};
158 158
159#define SWAP_CLUSTER_MAX 32 159#define SWAP_CLUSTER_MAX 32UL
160#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX 160#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
161 161
162/* 162/*
@@ -202,6 +202,18 @@ struct swap_info_struct {
202 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ 202 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
203 atomic_t frontswap_pages; /* frontswap pages in-use counter */ 203 atomic_t frontswap_pages; /* frontswap pages in-use counter */
204#endif 204#endif
205 spinlock_t lock; /*
206 * protect map scan related fields like
207 * swap_map, lowest_bit, highest_bit,
208 * inuse_pages, cluster_next,
209 * cluster_nr, lowest_alloc and
210 * highest_alloc. other fields are only
211 * changed at swapon/swapoff, so are
212 * protected by swap_lock. changing
213 * flags need hold this lock and
214 * swap_lock. If both locks need hold,
215 * hold swap_lock first.
216 */
205}; 217};
206 218
207struct swap_list_t { 219struct swap_list_t {
@@ -209,15 +221,12 @@ struct swap_list_t {
209 int next; /* swapfile to be used next */ 221 int next; /* swapfile to be used next */
210}; 222};
211 223
212/* Swap 50% full? Release swapcache more aggressively.. */
213#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
214
215/* linux/mm/page_alloc.c */ 224/* linux/mm/page_alloc.c */
216extern unsigned long totalram_pages; 225extern unsigned long totalram_pages;
217extern unsigned long totalreserve_pages; 226extern unsigned long totalreserve_pages;
218extern unsigned long dirty_balance_reserve; 227extern unsigned long dirty_balance_reserve;
219extern unsigned int nr_free_buffer_pages(void); 228extern unsigned long nr_free_buffer_pages(void);
220extern unsigned int nr_free_pagecache_pages(void); 229extern unsigned long nr_free_pagecache_pages(void);
221 230
222/* Definition of global_page_state not available yet */ 231/* Definition of global_page_state not available yet */
223#define nr_free_pages() global_page_state(NR_FREE_PAGES) 232#define nr_free_pages() global_page_state(NR_FREE_PAGES)
@@ -266,7 +275,7 @@ extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
266extern unsigned long shrink_all_memory(unsigned long nr_pages); 275extern unsigned long shrink_all_memory(unsigned long nr_pages);
267extern int vm_swappiness; 276extern int vm_swappiness;
268extern int remove_mapping(struct address_space *mapping, struct page *page); 277extern int remove_mapping(struct address_space *mapping, struct page *page);
269extern long vm_total_pages; 278extern unsigned long vm_total_pages;
270 279
271#ifdef CONFIG_NUMA 280#ifdef CONFIG_NUMA
272extern int zone_reclaim_mode; 281extern int zone_reclaim_mode;
@@ -330,8 +339,9 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *,
330 sector_t *); 339 sector_t *);
331 340
332/* linux/mm/swap_state.c */ 341/* linux/mm/swap_state.c */
333extern struct address_space swapper_space; 342extern struct address_space swapper_spaces[];
334#define total_swapcache_pages swapper_space.nrpages 343#define swap_address_space(entry) (&swapper_spaces[swp_type(entry)])
344extern unsigned long total_swapcache_pages(void);
335extern void show_swap_cache_info(void); 345extern void show_swap_cache_info(void);
336extern int add_to_swap(struct page *); 346extern int add_to_swap(struct page *);
337extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); 347extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
@@ -346,8 +356,20 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t,
346 struct vm_area_struct *vma, unsigned long addr); 356 struct vm_area_struct *vma, unsigned long addr);
347 357
348/* linux/mm/swapfile.c */ 358/* linux/mm/swapfile.c */
349extern long nr_swap_pages; 359extern atomic_long_t nr_swap_pages;
350extern long total_swap_pages; 360extern long total_swap_pages;
361
362/* Swap 50% full? Release swapcache more aggressively.. */
363static inline bool vm_swap_full(void)
364{
365 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
366}
367
368static inline long get_nr_swap_pages(void)
369{
370 return atomic_long_read(&nr_swap_pages);
371}
372
351extern void si_swapinfo(struct sysinfo *); 373extern void si_swapinfo(struct sysinfo *);
352extern swp_entry_t get_swap_page(void); 374extern swp_entry_t get_swap_page(void);
353extern swp_entry_t get_swap_page_of_type(int); 375extern swp_entry_t get_swap_page_of_type(int);
@@ -380,9 +402,10 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
380 402
381#else /* CONFIG_SWAP */ 403#else /* CONFIG_SWAP */
382 404
383#define nr_swap_pages 0L 405#define get_nr_swap_pages() 0L
384#define total_swap_pages 0L 406#define total_swap_pages 0L
385#define total_swapcache_pages 0UL 407#define total_swapcache_pages() 0UL
408#define vm_swap_full() 0
386 409
387#define si_swapinfo(val) \ 410#define si_swapinfo(val) \
388 do { (val)->freeswap = (val)->totalswap = 0; } while (0) 411 do { (val)->freeswap = (val)->totalswap = 0; } while (0)