aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap_state.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r--mm/swap_state.c23
1 files changed, 22 insertions, 1 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index fd2f21e1c60a..85245fdec8d9 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -523,7 +523,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
523 * This has been extended to use the NUMA policies from the mm triggering 523 * This has been extended to use the NUMA policies from the mm triggering
524 * the readahead. 524 * the readahead.
525 * 525 *
526 * Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL. 526 * Caller must hold read mmap_sem if vmf->vma is not NULL.
527 */ 527 */
528struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, 528struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
529 struct vm_fault *vmf) 529 struct vm_fault *vmf)
@@ -543,6 +543,13 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
543 if (!mask) 543 if (!mask)
544 goto skip; 544 goto skip;
545 545
546 /* Test swap type to make sure the dereference is safe */
547 if (likely(si->flags & (SWP_BLKDEV | SWP_FS))) {
548 struct inode *inode = si->swap_file->f_mapping->host;
549 if (inode_read_congested(inode))
550 goto skip;
551 }
552
546 do_poll = false; 553 do_poll = false;
547 /* Read a page_cluster sized and aligned cluster around offset. */ 554 /* Read a page_cluster sized and aligned cluster around offset. */
548 start_offset = offset & ~mask; 555 start_offset = offset & ~mask;
@@ -691,6 +698,20 @@ static void swap_ra_info(struct vm_fault *vmf,
691 pte_unmap(orig_pte); 698 pte_unmap(orig_pte);
692} 699}
693 700
701/**
702 * swap_vma_readahead - swap in pages in hope we need them soon
703 * @entry: swap entry of this memory
704 * @gfp_mask: memory allocation flags
705 * @vmf: fault information
706 *
707 * Returns the struct page for entry and addr, after queueing swapin.
708 *
709 * Primitive swap readahead code. We simply read in a few pages whoes
710 * virtual addresses are around the fault address in the same vma.
711 *
712 * Caller must hold read mmap_sem if vmf->vma is not NULL.
713 *
714 */
694static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, 715static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
695 struct vm_fault *vmf) 716 struct vm_fault *vmf)
696{ 717{