diff options
author | Jani Nikula <jani.nikula@intel.com> | 2018-11-20 06:14:08 -0500 |
---|---|---|
committer | Jani Nikula <jani.nikula@intel.com> | 2018-11-20 06:14:08 -0500 |
commit | 2ac5e38ea4203852d6e99edd3cf11f044b0a409f (patch) | |
tree | 1ef02da98d56309368ad2b6a4e492bafe5bb4faf /mm/vmscan.c | |
parent | f48cc647f3e196a3179d695d3c2d56c13e9dec98 (diff) | |
parent | 9235dd441af43599b9cdcce599a3da4083fcad3c (diff) |
Merge drm/drm-next into drm-intel-next-queued
Pull in v4.20-rc3 via drm-next.
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 48 |
1 files changed, 34 insertions, 14 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 0dbc493026a2..24ab1f7394ab 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/prefetch.h> | 50 | #include <linux/prefetch.h> |
51 | #include <linux/printk.h> | 51 | #include <linux/printk.h> |
52 | #include <linux/dax.h> | 52 | #include <linux/dax.h> |
53 | #include <linux/psi.h> | ||
53 | 54 | ||
54 | #include <asm/tlbflush.h> | 55 | #include <asm/tlbflush.h> |
55 | #include <asm/div64.h> | 56 | #include <asm/div64.h> |
@@ -474,9 +475,18 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, | |||
474 | nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0); | 475 | nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0); |
475 | 476 | ||
476 | total_scan = nr; | 477 | total_scan = nr; |
477 | delta = freeable >> priority; | 478 | if (shrinker->seeks) { |
478 | delta *= 4; | 479 | delta = freeable >> priority; |
479 | do_div(delta, shrinker->seeks); | 480 | delta *= 4; |
481 | do_div(delta, shrinker->seeks); | ||
482 | } else { | ||
483 | /* | ||
484 | * These objects don't require any IO to create. Trim | ||
485 | * them aggressively under memory pressure to keep | ||
486 | * them from causing refetches in the IO caches. | ||
487 | */ | ||
488 | delta = freeable / 2; | ||
489 | } | ||
480 | 490 | ||
481 | /* | 491 | /* |
482 | * Make sure we apply some minimal pressure on default priority | 492 | * Make sure we apply some minimal pressure on default priority |
@@ -581,8 +591,8 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, | |||
581 | struct mem_cgroup *memcg, int priority) | 591 | struct mem_cgroup *memcg, int priority) |
582 | { | 592 | { |
583 | struct memcg_shrinker_map *map; | 593 | struct memcg_shrinker_map *map; |
584 | unsigned long freed = 0; | 594 | unsigned long ret, freed = 0; |
585 | int ret, i; | 595 | int i; |
586 | 596 | ||
587 | if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)) | 597 | if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)) |
588 | return 0; | 598 | return 0; |
@@ -678,9 +688,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, | |||
678 | struct mem_cgroup *memcg, | 688 | struct mem_cgroup *memcg, |
679 | int priority) | 689 | int priority) |
680 | { | 690 | { |
691 | unsigned long ret, freed = 0; | ||
681 | struct shrinker *shrinker; | 692 | struct shrinker *shrinker; |
682 | unsigned long freed = 0; | ||
683 | int ret; | ||
684 | 693 | ||
685 | if (!mem_cgroup_is_root(memcg)) | 694 | if (!mem_cgroup_is_root(memcg)) |
686 | return shrink_slab_memcg(gfp_mask, nid, memcg, priority); | 695 | return shrink_slab_memcg(gfp_mask, nid, memcg, priority); |
@@ -743,12 +752,12 @@ static inline int is_page_cache_freeable(struct page *page) | |||
743 | { | 752 | { |
744 | /* | 753 | /* |
745 | * A freeable page cache page is referenced only by the caller | 754 | * A freeable page cache page is referenced only by the caller |
746 | * that isolated the page, the page cache radix tree and | 755 | * that isolated the page, the page cache and optional buffer |
747 | * optional buffer heads at page->private. | 756 | * heads at page->private. |
748 | */ | 757 | */ |
749 | int radix_pins = PageTransHuge(page) && PageSwapCache(page) ? | 758 | int page_cache_pins = PageTransHuge(page) && PageSwapCache(page) ? |
750 | HPAGE_PMD_NR : 1; | 759 | HPAGE_PMD_NR : 1; |
751 | return page_count(page) - page_has_private(page) == 1 + radix_pins; | 760 | return page_count(page) - page_has_private(page) == 1 + page_cache_pins; |
752 | } | 761 | } |
753 | 762 | ||
754 | static int may_write_to_inode(struct inode *inode, struct scan_control *sc) | 763 | static int may_write_to_inode(struct inode *inode, struct scan_control *sc) |
@@ -924,7 +933,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, | |||
924 | if (PageSwapCache(page)) { | 933 | if (PageSwapCache(page)) { |
925 | swp_entry_t swap = { .val = page_private(page) }; | 934 | swp_entry_t swap = { .val = page_private(page) }; |
926 | mem_cgroup_swapout(page, swap); | 935 | mem_cgroup_swapout(page, swap); |
927 | __delete_from_swap_cache(page); | 936 | __delete_from_swap_cache(page, swap); |
928 | xa_unlock_irqrestore(&mapping->i_pages, flags); | 937 | xa_unlock_irqrestore(&mapping->i_pages, flags); |
929 | put_swap_page(page, swap); | 938 | put_swap_page(page, swap); |
930 | } else { | 939 | } else { |
@@ -2147,6 +2156,7 @@ static void shrink_active_list(unsigned long nr_to_scan, | |||
2147 | } | 2156 | } |
2148 | 2157 | ||
2149 | ClearPageActive(page); /* we are de-activating */ | 2158 | ClearPageActive(page); /* we are de-activating */ |
2159 | SetPageWorkingset(page); | ||
2150 | list_add(&page->lru, &l_inactive); | 2160 | list_add(&page->lru, &l_inactive); |
2151 | } | 2161 | } |
2152 | 2162 | ||
@@ -2458,9 +2468,11 @@ out: | |||
2458 | /* | 2468 | /* |
2459 | * Scan types proportional to swappiness and | 2469 | * Scan types proportional to swappiness and |
2460 | * their relative recent reclaim efficiency. | 2470 | * their relative recent reclaim efficiency. |
2471 | * Make sure we don't miss the last page | ||
2472 | * because of a round-off error. | ||
2461 | */ | 2473 | */ |
2462 | scan = div64_u64(scan * fraction[file], | 2474 | scan = DIV64_U64_ROUND_UP(scan * fraction[file], |
2463 | denominator); | 2475 | denominator); |
2464 | break; | 2476 | break; |
2465 | case SCAN_FILE: | 2477 | case SCAN_FILE: |
2466 | case SCAN_ANON: | 2478 | case SCAN_ANON: |
@@ -3304,6 +3316,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, | |||
3304 | { | 3316 | { |
3305 | struct zonelist *zonelist; | 3317 | struct zonelist *zonelist; |
3306 | unsigned long nr_reclaimed; | 3318 | unsigned long nr_reclaimed; |
3319 | unsigned long pflags; | ||
3307 | int nid; | 3320 | int nid; |
3308 | unsigned int noreclaim_flag; | 3321 | unsigned int noreclaim_flag; |
3309 | struct scan_control sc = { | 3322 | struct scan_control sc = { |
@@ -3332,9 +3345,13 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, | |||
3332 | sc.gfp_mask, | 3345 | sc.gfp_mask, |
3333 | sc.reclaim_idx); | 3346 | sc.reclaim_idx); |
3334 | 3347 | ||
3348 | psi_memstall_enter(&pflags); | ||
3335 | noreclaim_flag = memalloc_noreclaim_save(); | 3349 | noreclaim_flag = memalloc_noreclaim_save(); |
3350 | |||
3336 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc); | 3351 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc); |
3352 | |||
3337 | memalloc_noreclaim_restore(noreclaim_flag); | 3353 | memalloc_noreclaim_restore(noreclaim_flag); |
3354 | psi_memstall_leave(&pflags); | ||
3338 | 3355 | ||
3339 | trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); | 3356 | trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); |
3340 | 3357 | ||
@@ -3499,6 +3516,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) | |||
3499 | int i; | 3516 | int i; |
3500 | unsigned long nr_soft_reclaimed; | 3517 | unsigned long nr_soft_reclaimed; |
3501 | unsigned long nr_soft_scanned; | 3518 | unsigned long nr_soft_scanned; |
3519 | unsigned long pflags; | ||
3502 | struct zone *zone; | 3520 | struct zone *zone; |
3503 | struct scan_control sc = { | 3521 | struct scan_control sc = { |
3504 | .gfp_mask = GFP_KERNEL, | 3522 | .gfp_mask = GFP_KERNEL, |
@@ -3509,6 +3527,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) | |||
3509 | .may_swap = 1, | 3527 | .may_swap = 1, |
3510 | }; | 3528 | }; |
3511 | 3529 | ||
3530 | psi_memstall_enter(&pflags); | ||
3512 | __fs_reclaim_acquire(); | 3531 | __fs_reclaim_acquire(); |
3513 | 3532 | ||
3514 | count_vm_event(PAGEOUTRUN); | 3533 | count_vm_event(PAGEOUTRUN); |
@@ -3610,6 +3629,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) | |||
3610 | out: | 3629 | out: |
3611 | snapshot_refaults(NULL, pgdat); | 3630 | snapshot_refaults(NULL, pgdat); |
3612 | __fs_reclaim_release(); | 3631 | __fs_reclaim_release(); |
3632 | psi_memstall_leave(&pflags); | ||
3613 | /* | 3633 | /* |
3614 | * Return the order kswapd stopped reclaiming at as | 3634 | * Return the order kswapd stopped reclaiming at as |
3615 | * prepare_kswapd_sleep() takes it into account. If another caller | 3635 | * prepare_kswapd_sleep() takes it into account. If another caller |