aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/vmscan.c99
1 files changed, 46 insertions, 53 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c28b8981e56a..81dd858b9d17 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -59,35 +59,20 @@
59#include <trace/events/vmscan.h> 59#include <trace/events/vmscan.h>
60 60
61struct scan_control { 61struct scan_control {
62 /* Incremented by the number of inactive pages that were scanned */
63 unsigned long nr_scanned;
64
65 /* Number of pages freed so far during a call to shrink_zones() */
66 unsigned long nr_reclaimed;
67
68 /* One of the zones is ready for compaction */
69 int compaction_ready;
70
71 /* How many pages shrink_list() should reclaim */ 62 /* How many pages shrink_list() should reclaim */
72 unsigned long nr_to_reclaim; 63 unsigned long nr_to_reclaim;
73 64
74 unsigned long hibernation_mode;
75
76 /* This context's GFP mask */ 65 /* This context's GFP mask */
77 gfp_t gfp_mask; 66 gfp_t gfp_mask;
78 67
79 int may_writepage; 68 /* Allocation order */
80
81 /* Can mapped pages be reclaimed? */
82 int may_unmap;
83
84 /* Can pages be swapped as part of reclaim? */
85 int may_swap;
86
87 int order; 69 int order;
88 70
89 /* Scan (total_size >> priority) pages at once */ 71 /*
90 int priority; 72 * Nodemask of nodes allowed by the caller. If NULL, all nodes
73 * are scanned.
74 */
75 nodemask_t *nodemask;
91 76
92 /* 77 /*
93 * The memory cgroup that hit its limit and as a result is the 78 * The memory cgroup that hit its limit and as a result is the
@@ -95,11 +80,27 @@ struct scan_control {
95 */ 80 */
96 struct mem_cgroup *target_mem_cgroup; 81 struct mem_cgroup *target_mem_cgroup;
97 82
98 /* 83 /* Scan (total_size >> priority) pages at once */
99 * Nodemask of nodes allowed by the caller. If NULL, all nodes 84 int priority;
100 * are scanned. 85
101 */ 86 unsigned int may_writepage:1;
102 nodemask_t *nodemask; 87
88 /* Can mapped pages be reclaimed? */
89 unsigned int may_unmap:1;
90
91 /* Can pages be swapped as part of reclaim? */
92 unsigned int may_swap:1;
93
94 unsigned int hibernation_mode:1;
95
96 /* One of the zones is ready for compaction */
97 unsigned int compaction_ready:1;
98
99 /* Incremented by the number of inactive pages that were scanned */
100 unsigned long nr_scanned;
101
102 /* Number of pages freed so far during a call to shrink_zones() */
103 unsigned long nr_reclaimed;
103}; 104};
104 105
105#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 106#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -2668,15 +2669,14 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2668{ 2669{
2669 unsigned long nr_reclaimed; 2670 unsigned long nr_reclaimed;
2670 struct scan_control sc = { 2671 struct scan_control sc = {
2672 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2671 .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)), 2673 .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
2674 .order = order,
2675 .nodemask = nodemask,
2676 .priority = DEF_PRIORITY,
2672 .may_writepage = !laptop_mode, 2677 .may_writepage = !laptop_mode,
2673 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2674 .may_unmap = 1, 2678 .may_unmap = 1,
2675 .may_swap = 1, 2679 .may_swap = 1,
2676 .order = order,
2677 .priority = DEF_PRIORITY,
2678 .target_mem_cgroup = NULL,
2679 .nodemask = nodemask,
2680 }; 2680 };
2681 2681
2682 /* 2682 /*
@@ -2706,14 +2706,11 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
2706 unsigned long *nr_scanned) 2706 unsigned long *nr_scanned)
2707{ 2707{
2708 struct scan_control sc = { 2708 struct scan_control sc = {
2709 .nr_scanned = 0,
2710 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2709 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2710 .target_mem_cgroup = memcg,
2711 .may_writepage = !laptop_mode, 2711 .may_writepage = !laptop_mode,
2712 .may_unmap = 1, 2712 .may_unmap = 1,
2713 .may_swap = !noswap, 2713 .may_swap = !noswap,
2714 .order = 0,
2715 .priority = 0,
2716 .target_mem_cgroup = memcg,
2717 }; 2714 };
2718 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); 2715 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2719 int swappiness = mem_cgroup_swappiness(memcg); 2716 int swappiness = mem_cgroup_swappiness(memcg);
@@ -2748,16 +2745,14 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
2748 unsigned long nr_reclaimed; 2745 unsigned long nr_reclaimed;
2749 int nid; 2746 int nid;
2750 struct scan_control sc = { 2747 struct scan_control sc = {
2751 .may_writepage = !laptop_mode,
2752 .may_unmap = 1,
2753 .may_swap = !noswap,
2754 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2748 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2755 .order = 0,
2756 .priority = DEF_PRIORITY,
2757 .target_mem_cgroup = memcg,
2758 .nodemask = NULL, /* we don't care the placement */
2759 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2749 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2760 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 2750 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2751 .target_mem_cgroup = memcg,
2752 .priority = DEF_PRIORITY,
2753 .may_writepage = !laptop_mode,
2754 .may_unmap = 1,
2755 .may_swap = !noswap,
2761 }; 2756 };
2762 2757
2763 /* 2758 /*
@@ -3015,12 +3010,11 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
3015 unsigned long nr_soft_scanned; 3010 unsigned long nr_soft_scanned;
3016 struct scan_control sc = { 3011 struct scan_control sc = {
3017 .gfp_mask = GFP_KERNEL, 3012 .gfp_mask = GFP_KERNEL,
3013 .order = order,
3018 .priority = DEF_PRIORITY, 3014 .priority = DEF_PRIORITY,
3015 .may_writepage = !laptop_mode,
3019 .may_unmap = 1, 3016 .may_unmap = 1,
3020 .may_swap = 1, 3017 .may_swap = 1,
3021 .may_writepage = !laptop_mode,
3022 .order = order,
3023 .target_mem_cgroup = NULL,
3024 }; 3018 };
3025 count_vm_event(PAGEOUTRUN); 3019 count_vm_event(PAGEOUTRUN);
3026 3020
@@ -3401,14 +3395,13 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
3401{ 3395{
3402 struct reclaim_state reclaim_state; 3396 struct reclaim_state reclaim_state;
3403 struct scan_control sc = { 3397 struct scan_control sc = {
3398 .nr_to_reclaim = nr_to_reclaim,
3404 .gfp_mask = GFP_HIGHUSER_MOVABLE, 3399 .gfp_mask = GFP_HIGHUSER_MOVABLE,
3405 .may_swap = 1, 3400 .priority = DEF_PRIORITY,
3406 .may_unmap = 1,
3407 .may_writepage = 1, 3401 .may_writepage = 1,
3408 .nr_to_reclaim = nr_to_reclaim, 3402 .may_unmap = 1,
3403 .may_swap = 1,
3409 .hibernation_mode = 1, 3404 .hibernation_mode = 1,
3410 .order = 0,
3411 .priority = DEF_PRIORITY,
3412 }; 3405 };
3413 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 3406 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
3414 struct task_struct *p = current; 3407 struct task_struct *p = current;
@@ -3588,13 +3581,13 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3588 struct task_struct *p = current; 3581 struct task_struct *p = current;
3589 struct reclaim_state reclaim_state; 3582 struct reclaim_state reclaim_state;
3590 struct scan_control sc = { 3583 struct scan_control sc = {
3591 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
3592 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
3593 .may_swap = 1,
3594 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 3584 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3595 .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)), 3585 .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
3596 .order = order, 3586 .order = order,
3597 .priority = ZONE_RECLAIM_PRIORITY, 3587 .priority = ZONE_RECLAIM_PRIORITY,
3588 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
3589 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
3590 .may_swap = 1,
3598 }; 3591 };
3599 struct shrink_control shrink = { 3592 struct shrink_control shrink = {
3600 .gfp_mask = sc.gfp_mask, 3593 .gfp_mask = sc.gfp_mask,