aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/swap.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/swap.h')
-rw-r--r--include/linux/swap.h32
1 files changed, 27 insertions, 5 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 235c039892ee..a3e22d357e91 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -202,6 +202,18 @@ struct swap_info_struct {
202 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ 202 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
203 atomic_t frontswap_pages; /* frontswap pages in-use counter */ 203 atomic_t frontswap_pages; /* frontswap pages in-use counter */
204#endif 204#endif
205 spinlock_t lock; /*
206 * protect map scan related fields like
207 * swap_map, lowest_bit, highest_bit,
208 * inuse_pages, cluster_next,
209 * cluster_nr, lowest_alloc and
210 * highest_alloc. other fields are only
211 * changed at swapon/swapoff, so are
212 * protected by swap_lock. changing
213 * flags need hold this lock and
214 * swap_lock. If both locks need hold,
215 * hold swap_lock first.
216 */
205}; 217};
206 218
207struct swap_list_t { 219struct swap_list_t {
@@ -209,9 +221,6 @@ struct swap_list_t {
209 int next; /* swapfile to be used next */ 221 int next; /* swapfile to be used next */
210}; 222};
211 223
212/* Swap 50% full? Release swapcache more aggressively.. */
213#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
214
215/* linux/mm/page_alloc.c */ 224/* linux/mm/page_alloc.c */
216extern unsigned long totalram_pages; 225extern unsigned long totalram_pages;
217extern unsigned long totalreserve_pages; 226extern unsigned long totalreserve_pages;
@@ -347,8 +356,20 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t,
347 struct vm_area_struct *vma, unsigned long addr); 356 struct vm_area_struct *vma, unsigned long addr);
348 357
349/* linux/mm/swapfile.c */ 358/* linux/mm/swapfile.c */
350extern long nr_swap_pages; 359extern atomic_long_t nr_swap_pages;
351extern long total_swap_pages; 360extern long total_swap_pages;
361
362/* Swap 50% full? Release swapcache more aggressively.. */
363static inline bool vm_swap_full(void)
364{
365 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
366}
367
368static inline long get_nr_swap_pages(void)
369{
370 return atomic_long_read(&nr_swap_pages);
371}
372
352extern void si_swapinfo(struct sysinfo *); 373extern void si_swapinfo(struct sysinfo *);
353extern swp_entry_t get_swap_page(void); 374extern swp_entry_t get_swap_page(void);
354extern swp_entry_t get_swap_page_of_type(int); 375extern swp_entry_t get_swap_page_of_type(int);
@@ -381,9 +402,10 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
381 402
382#else /* CONFIG_SWAP */ 403#else /* CONFIG_SWAP */
383 404
384#define nr_swap_pages 0L 405#define get_nr_swap_pages() 0L
385#define total_swap_pages 0L 406#define total_swap_pages 0L
386#define total_swapcache_pages() 0UL 407#define total_swapcache_pages() 0UL
408#define vm_swap_full() 0
387 409
388#define si_swapinfo(val) \ 410#define si_swapinfo(val) \
389 do { (val)->freeswap = (val)->totalswap = 0; } while (0) 411 do { (val)->freeswap = (val)->totalswap = 0; } while (0)