diff options
-rw-r--r-- | include/linux/swap.h | 27 | ||||
-rw-r--r-- | mm/memory.c | 11 | ||||
-rw-r--r-- | mm/shmem.c | 5 | ||||
-rw-r--r-- | mm/swap_state.c | 48 |
4 files changed, 53 insertions, 38 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h index fa92177d863e..2417d288e016 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -400,7 +400,6 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *, | |||
400 | #define SWAP_ADDRESS_SPACE_SHIFT 14 | 400 | #define SWAP_ADDRESS_SPACE_SHIFT 14 |
401 | #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) | 401 | #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) |
402 | extern struct address_space *swapper_spaces[]; | 402 | extern struct address_space *swapper_spaces[]; |
403 | extern bool swap_vma_readahead; | ||
404 | #define swap_address_space(entry) \ | 403 | #define swap_address_space(entry) \ |
405 | (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ | 404 | (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ |
406 | >> SWAP_ADDRESS_SPACE_SHIFT]) | 405 | >> SWAP_ADDRESS_SPACE_SHIFT]) |
@@ -422,10 +421,10 @@ extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, | |||
422 | extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t, | 421 | extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t, |
423 | struct vm_area_struct *vma, unsigned long addr, | 422 | struct vm_area_struct *vma, unsigned long addr, |
424 | bool *new_page_allocated); | 423 | bool *new_page_allocated); |
425 | extern struct page *swapin_readahead(swp_entry_t, gfp_t, | 424 | extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, |
426 | struct vm_area_struct *vma, unsigned long addr); | 425 | struct vm_fault *vmf); |
427 | extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask, | 426 | extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag, |
428 | struct vm_fault *vmf); | 427 | struct vm_fault *vmf); |
429 | 428 | ||
430 | /* linux/mm/swapfile.c */ | 429 | /* linux/mm/swapfile.c */ |
431 | extern atomic_long_t nr_swap_pages; | 430 | extern atomic_long_t nr_swap_pages; |
@@ -433,11 +432,6 @@ extern long total_swap_pages; | |||
433 | extern atomic_t nr_rotate_swap; | 432 | extern atomic_t nr_rotate_swap; |
434 | extern bool has_usable_swap(void); | 433 | extern bool has_usable_swap(void); |
435 | 434 | ||
436 | static inline bool swap_use_vma_readahead(void) | ||
437 | { | ||
438 | return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap); | ||
439 | } | ||
440 | |||
441 | /* Swap 50% full? Release swapcache more aggressively.. */ | 435 | /* Swap 50% full? Release swapcache more aggressively.. */ |
442 | static inline bool vm_swap_full(void) | 436 | static inline bool vm_swap_full(void) |
443 | { | 437 | { |
@@ -533,19 +527,14 @@ static inline void put_swap_page(struct page *page, swp_entry_t swp) | |||
533 | { | 527 | { |
534 | } | 528 | } |
535 | 529 | ||
536 | static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, | 530 | static inline struct page *swap_cluster_readahead(swp_entry_t entry, |
537 | struct vm_area_struct *vma, unsigned long addr) | 531 | gfp_t gfp_mask, struct vm_fault *vmf) |
538 | { | 532 | { |
539 | return NULL; | 533 | return NULL; |
540 | } | 534 | } |
541 | 535 | ||
542 | static inline bool swap_use_vma_readahead(void) | 536 | static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, |
543 | { | 537 | struct vm_fault *vmf) |
544 | return false; | ||
545 | } | ||
546 | |||
547 | static inline struct page *do_swap_page_readahead(swp_entry_t fentry, | ||
548 | gfp_t gfp_mask, struct vm_fault *vmf) | ||
549 | { | 538 | { |
550 | return NULL; | 539 | return NULL; |
551 | } | 540 | } |
diff --git a/mm/memory.c b/mm/memory.c index bc1ccff79538..01f5464e0fd2 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2927,7 +2927,8 @@ int do_swap_page(struct vm_fault *vmf) | |||
2927 | if (si->flags & SWP_SYNCHRONOUS_IO && | 2927 | if (si->flags & SWP_SYNCHRONOUS_IO && |
2928 | __swap_count(si, entry) == 1) { | 2928 | __swap_count(si, entry) == 1) { |
2929 | /* skip swapcache */ | 2929 | /* skip swapcache */ |
2930 | page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); | 2930 | page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, |
2931 | vmf->address); | ||
2931 | if (page) { | 2932 | if (page) { |
2932 | __SetPageLocked(page); | 2933 | __SetPageLocked(page); |
2933 | __SetPageSwapBacked(page); | 2934 | __SetPageSwapBacked(page); |
@@ -2936,12 +2937,8 @@ int do_swap_page(struct vm_fault *vmf) | |||
2936 | swap_readpage(page, true); | 2937 | swap_readpage(page, true); |
2937 | } | 2938 | } |
2938 | } else { | 2939 | } else { |
2939 | if (swap_use_vma_readahead()) | 2940 | page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, |
2940 | page = do_swap_page_readahead(entry, | 2941 | vmf); |
2941 | GFP_HIGHUSER_MOVABLE, vmf); | ||
2942 | else | ||
2943 | page = swapin_readahead(entry, | ||
2944 | GFP_HIGHUSER_MOVABLE, vma, vmf->address); | ||
2945 | swapcache = page; | 2942 | swapcache = page; |
2946 | } | 2943 | } |
2947 | 2944 | ||
diff --git a/mm/shmem.c b/mm/shmem.c index b85919243399..4424fc0c33aa 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1422,9 +1422,12 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, | |||
1422 | { | 1422 | { |
1423 | struct vm_area_struct pvma; | 1423 | struct vm_area_struct pvma; |
1424 | struct page *page; | 1424 | struct page *page; |
1425 | struct vm_fault vmf; | ||
1425 | 1426 | ||
1426 | shmem_pseudo_vma_init(&pvma, info, index); | 1427 | shmem_pseudo_vma_init(&pvma, info, index); |
1427 | page = swapin_readahead(swap, gfp, &pvma, 0); | 1428 | vmf.vma = &pvma; |
1429 | vmf.address = 0; | ||
1430 | page = swap_cluster_readahead(swap, gfp, &vmf); | ||
1428 | shmem_pseudo_vma_destroy(&pvma); | 1431 | shmem_pseudo_vma_destroy(&pvma); |
1429 | 1432 | ||
1430 | return page; | 1433 | return page; |
diff --git a/mm/swap_state.c b/mm/swap_state.c index db5da2baafb1..b97da97b6846 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -38,7 +38,7 @@ static const struct address_space_operations swap_aops = { | |||
38 | 38 | ||
39 | struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; | 39 | struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; |
40 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; | 40 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; |
41 | bool swap_vma_readahead __read_mostly = true; | 41 | bool enable_vma_readahead __read_mostly = true; |
42 | 42 | ||
43 | #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) | 43 | #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) |
44 | #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) | 44 | #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) |
@@ -322,6 +322,11 @@ void free_pages_and_swap_cache(struct page **pages, int nr) | |||
322 | release_pages(pagep, nr); | 322 | release_pages(pagep, nr); |
323 | } | 323 | } |
324 | 324 | ||
325 | static inline bool swap_use_vma_readahead(void) | ||
326 | { | ||
327 | return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); | ||
328 | } | ||
329 | |||
325 | /* | 330 | /* |
326 | * Lookup a swap entry in the swap cache. A found page will be returned | 331 | * Lookup a swap entry in the swap cache. A found page will be returned |
327 | * unlocked and with its refcount incremented - we rely on the kernel | 332 | * unlocked and with its refcount incremented - we rely on the kernel |
@@ -544,11 +549,10 @@ static unsigned long swapin_nr_pages(unsigned long offset) | |||
544 | } | 549 | } |
545 | 550 | ||
546 | /** | 551 | /** |
547 | * swapin_readahead - swap in pages in hope we need them soon | 552 | * swap_cluster_readahead - swap in pages in hope we need them soon |
548 | * @entry: swap entry of this memory | 553 | * @entry: swap entry of this memory |
549 | * @gfp_mask: memory allocation flags | 554 | * @gfp_mask: memory allocation flags |
550 | * @vma: user vma this address belongs to | 555 | * @vmf: fault information |
551 | * @addr: target address for mempolicy | ||
552 | * | 556 | * |
553 | * Returns the struct page for entry and addr, after queueing swapin. | 557 | * Returns the struct page for entry and addr, after queueing swapin. |
554 | * | 558 | * |
@@ -560,10 +564,10 @@ static unsigned long swapin_nr_pages(unsigned long offset) | |||
560 | * This has been extended to use the NUMA policies from the mm triggering | 564 | * This has been extended to use the NUMA policies from the mm triggering |
561 | * the readahead. | 565 | * the readahead. |
562 | * | 566 | * |
563 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. | 567 | * Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL. |
564 | */ | 568 | */ |
565 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, | 569 | struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, |
566 | struct vm_area_struct *vma, unsigned long addr) | 570 | struct vm_fault *vmf) |
567 | { | 571 | { |
568 | struct page *page; | 572 | struct page *page; |
569 | unsigned long entry_offset = swp_offset(entry); | 573 | unsigned long entry_offset = swp_offset(entry); |
@@ -573,6 +577,8 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, | |||
573 | struct swap_info_struct *si = swp_swap_info(entry); | 577 | struct swap_info_struct *si = swp_swap_info(entry); |
574 | struct blk_plug plug; | 578 | struct blk_plug plug; |
575 | bool do_poll = true, page_allocated; | 579 | bool do_poll = true, page_allocated; |
580 | struct vm_area_struct *vma = vmf->vma; | ||
581 | unsigned long addr = vmf->address; | ||
576 | 582 | ||
577 | mask = swapin_nr_pages(offset) - 1; | 583 | mask = swapin_nr_pages(offset) - 1; |
578 | if (!mask) | 584 | if (!mask) |
@@ -727,7 +733,7 @@ static void swap_ra_info(struct vm_fault *vmf, | |||
727 | pte_unmap(orig_pte); | 733 | pte_unmap(orig_pte); |
728 | } | 734 | } |
729 | 735 | ||
730 | struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask, | 736 | struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, |
731 | struct vm_fault *vmf) | 737 | struct vm_fault *vmf) |
732 | { | 738 | { |
733 | struct blk_plug plug; | 739 | struct blk_plug plug; |
@@ -774,20 +780,40 @@ skip: | |||
774 | ra_info.win == 1); | 780 | ra_info.win == 1); |
775 | } | 781 | } |
776 | 782 | ||
783 | /** | ||
784 | * swapin_readahead - swap in pages in hope we need them soon | ||
785 | * @entry: swap entry of this memory | ||
786 | * @gfp_mask: memory allocation flags | ||
787 | * @vmf: fault information | ||
788 | * | ||
789 | * Returns the struct page for entry and addr, after queueing swapin. | ||
790 | * | ||
791 | * It's a main entry function for swap readahead. By the configuration, | ||
792 | * it will read ahead blocks by cluster-based(ie, physical disk based) | ||
793 | * or vma-based(ie, virtual address based on faulty address) readahead. | ||
794 | */ | ||
795 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, | ||
796 | struct vm_fault *vmf) | ||
797 | { | ||
798 | return swap_use_vma_readahead() ? | ||
799 | swap_vma_readahead(entry, gfp_mask, vmf) : | ||
800 | swap_cluster_readahead(entry, gfp_mask, vmf); | ||
801 | } | ||
802 | |||
777 | #ifdef CONFIG_SYSFS | 803 | #ifdef CONFIG_SYSFS |
778 | static ssize_t vma_ra_enabled_show(struct kobject *kobj, | 804 | static ssize_t vma_ra_enabled_show(struct kobject *kobj, |
779 | struct kobj_attribute *attr, char *buf) | 805 | struct kobj_attribute *attr, char *buf) |
780 | { | 806 | { |
781 | return sprintf(buf, "%s\n", swap_vma_readahead ? "true" : "false"); | 807 | return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false"); |
782 | } | 808 | } |
783 | static ssize_t vma_ra_enabled_store(struct kobject *kobj, | 809 | static ssize_t vma_ra_enabled_store(struct kobject *kobj, |
784 | struct kobj_attribute *attr, | 810 | struct kobj_attribute *attr, |
785 | const char *buf, size_t count) | 811 | const char *buf, size_t count) |
786 | { | 812 | { |
787 | if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) | 813 | if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) |
788 | swap_vma_readahead = true; | 814 | enable_vma_readahead = true; |
789 | else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) | 815 | else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) |
790 | swap_vma_readahead = false; | 816 | enable_vma_readahead = false; |
791 | else | 817 | else |
792 | return -EINVAL; | 818 | return -EINVAL; |
793 | 819 | ||