aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/swap.h19
-rw-r--r--mm/memory.c45
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/swap_state.c47
4 files changed, 57 insertions, 60 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4f3838adbb30..9fa1aef1b82c 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -158,9 +158,6 @@ struct swap_list_t {
158/* Swap 50% full? Release swapcache more aggressively.. */ 158/* Swap 50% full? Release swapcache more aggressively.. */
159#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages) 159#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
160 160
161/* linux/mm/memory.c */
162extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *);
163
164/* linux/mm/page_alloc.c */ 161/* linux/mm/page_alloc.c */
165extern unsigned long totalram_pages; 162extern unsigned long totalram_pages;
166extern unsigned long totalreserve_pages; 163extern unsigned long totalreserve_pages;
@@ -230,9 +227,12 @@ extern int move_from_swap_cache(struct page *, unsigned long,
230 struct address_space *); 227 struct address_space *);
231extern void free_page_and_swap_cache(struct page *); 228extern void free_page_and_swap_cache(struct page *);
232extern void free_pages_and_swap_cache(struct page **, int); 229extern void free_pages_and_swap_cache(struct page **, int);
233extern struct page * lookup_swap_cache(swp_entry_t); 230extern struct page *lookup_swap_cache(swp_entry_t);
234extern struct page * read_swap_cache_async(swp_entry_t, struct vm_area_struct *vma, 231extern struct page *read_swap_cache_async(swp_entry_t,
235 unsigned long addr); 232 struct vm_area_struct *vma, unsigned long addr);
233extern struct page *swapin_readahead(swp_entry_t,
234 struct vm_area_struct *vma, unsigned long addr);
235
236/* linux/mm/swapfile.c */ 236/* linux/mm/swapfile.c */
237extern long total_swap_pages; 237extern long total_swap_pages;
238extern unsigned int nr_swapfiles; 238extern unsigned int nr_swapfiles;
@@ -306,7 +306,7 @@ static inline void swap_free(swp_entry_t swp)
306{ 306{
307} 307}
308 308
309static inline struct page *read_swap_cache_async(swp_entry_t swp, 309static inline struct page *swapin_readahead(swp_entry_t swp,
310 struct vm_area_struct *vma, unsigned long addr) 310 struct vm_area_struct *vma, unsigned long addr)
311{ 311{
312 return NULL; 312 return NULL;
@@ -317,11 +317,6 @@ static inline struct page *lookup_swap_cache(swp_entry_t swp)
317 return NULL; 317 return NULL;
318} 318}
319 319
320static inline int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
321{
322 return 0;
323}
324
325#define can_share_swap_page(p) (page_mapcount(p) == 1) 320#define can_share_swap_page(p) (page_mapcount(p) == 1)
326 321
327static inline int move_to_swap_cache(struct page *page, swp_entry_t entry) 322static inline int move_to_swap_cache(struct page *page, swp_entry_t entry)
diff --git a/mm/memory.c b/mm/memory.c
index 1d803c2d0184..ccc9403d5352 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1980,48 +1980,6 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
1980 return 0; 1980 return 0;
1981} 1981}
1982 1982
1983/**
1984 * swapin_readahead - swap in pages in hope we need them soon
1985 * @entry: swap entry of this memory
1986 * @addr: address to start
1987 * @vma: user vma this addresses belong to
1988 *
1989 * Primitive swap readahead code. We simply read an aligned block of
1990 * (1 << page_cluster) entries in the swap area. This method is chosen
1991 * because it doesn't cost us any seek time. We also make sure to queue
1992 * the 'original' request together with the readahead ones...
1993 *
1994 * This has been extended to use the NUMA policies from the mm triggering
1995 * the readahead.
1996 *
1997 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
1998 */
1999void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
2000{
2001 int nr_pages;
2002 struct page *page;
2003 unsigned long offset;
2004 unsigned long end_offset;
2005
2006 /*
2007 * Get starting offset for readaround, and number of pages to read.
2008 * Adjust starting address by readbehind (for NUMA interleave case)?
2009 * No, it's very unlikely that swap layout would follow vma layout,
2010 * more likely that neighbouring swap pages came from the same node:
2011 * so use the same "addr" to choose the same node for each swap read.
2012 */
2013 nr_pages = valid_swaphandles(entry, &offset);
2014 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
2015 /* Ok, do the async read-ahead now */
2016 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
2017 vma, addr);
2018 if (!page)
2019 break;
2020 page_cache_release(page);
2021 }
2022 lru_add_drain(); /* Push any new pages onto the LRU now */
2023}
2024
2025/* 1983/*
2026 * We enter with non-exclusive mmap_sem (to exclude vma changes, 1984 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2027 * but allow concurrent faults), and pte mapped but not yet locked. 1985 * but allow concurrent faults), and pte mapped but not yet locked.
@@ -2049,8 +2007,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2049 page = lookup_swap_cache(entry); 2007 page = lookup_swap_cache(entry);
2050 if (!page) { 2008 if (!page) {
2051 grab_swap_token(); /* Contend for token _before_ read-in */ 2009 grab_swap_token(); /* Contend for token _before_ read-in */
2052 swapin_readahead(entry, address, vma); 2010 page = swapin_readahead(entry, vma, address);
2053 page = read_swap_cache_async(entry, vma, address);
2054 if (!page) { 2011 if (!page) {
2055 /* 2012 /*
2056 * Back out if somebody else faulted in this pte 2013 * Back out if somebody else faulted in this pte
diff --git a/mm/shmem.c b/mm/shmem.c
index 88c6685f16b7..3a22a8f79331 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1036,8 +1036,7 @@ static struct page *shmem_swapin(struct shmem_inode_info *info,
1036 pvma.vm_pgoff = idx; 1036 pvma.vm_pgoff = idx;
1037 pvma.vm_ops = NULL; 1037 pvma.vm_ops = NULL;
1038 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); 1038 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1039 swapin_readahead(entry, 0, &pvma); 1039 page = swapin_readahead(entry, &pvma, 0);
1040 page = read_swap_cache_async(entry, &pvma, 0);
1041 mpol_free(pvma.vm_policy); 1040 mpol_free(pvma.vm_policy);
1042 return page; 1041 return page;
1043} 1042}
@@ -1067,8 +1066,7 @@ static inline int shmem_parse_mpol(char *value, int *policy,
1067static inline struct page * 1066static inline struct page *
1068shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) 1067shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
1069{ 1068{
1070 swapin_readahead(entry, 0, NULL); 1069 return swapin_readahead(entry, NULL, 0);
1071 return read_swap_cache_async(entry, NULL, 0);
1072} 1070}
1073 1071
1074static inline struct page * 1072static inline struct page *
diff --git a/mm/swap_state.c b/mm/swap_state.c
index b52635601dfe..668a80422630 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -10,6 +10,7 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/kernel_stat.h> 11#include <linux/kernel_stat.h>
12#include <linux/swap.h> 12#include <linux/swap.h>
13#include <linux/swapops.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/pagemap.h> 15#include <linux/pagemap.h>
15#include <linux/buffer_head.h> 16#include <linux/buffer_head.h>
@@ -368,3 +369,49 @@ struct page *read_swap_cache_async(swp_entry_t entry,
368 page_cache_release(new_page); 369 page_cache_release(new_page);
369 return found_page; 370 return found_page;
370} 371}
372
373/**
374 * swapin_readahead - swap in pages in hope we need them soon
375 * @entry: swap entry of this memory
376 * @vma: user vma this address belongs to
377 * @addr: target address for mempolicy
378 *
379 * Returns the struct page for entry and addr, after queueing swapin.
380 *
381 * Primitive swap readahead code. We simply read an aligned block of
382 * (1 << page_cluster) entries in the swap area. This method is chosen
383 * because it doesn't cost us any seek time. We also make sure to queue
384 * the 'original' request together with the readahead ones...
385 *
386 * This has been extended to use the NUMA policies from the mm triggering
387 * the readahead.
388 *
389 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
390 */
391struct page *swapin_readahead(swp_entry_t entry,
392 struct vm_area_struct *vma, unsigned long addr)
393{
394 int nr_pages;
395 struct page *page;
396 unsigned long offset;
397 unsigned long end_offset;
398
399 /*
400 * Get starting offset for readaround, and number of pages to read.
401 * Adjust starting address by readbehind (for NUMA interleave case)?
402 * No, it's very unlikely that swap layout would follow vma layout,
403 * more likely that neighbouring swap pages came from the same node:
404 * so use the same "addr" to choose the same node for each swap read.
405 */
406 nr_pages = valid_swaphandles(entry, &offset);
407 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
408 /* Ok, do the async read-ahead now */
409 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
410 vma, addr);
411 if (!page)
412 break;
413 page_cache_release(page);
414 }
415 lru_add_drain(); /* Push any new pages onto the LRU now */
416 return read_swap_cache_async(entry, vma, addr);
417}