summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2018-10-26 18:06:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-26 19:26:32 -0400
commit1899ad18c6072d689896badafb81267b0a1092a4 (patch)
treed960f2886025787cdbd5fa12fb64e7f52a6e4053
parent95f9ab2d596e8cbb388315e78c82b9a131bf2928 (diff)
mm: workingset: tell cache transitions from workingset thrashing
Refaults happen during transitions between workingsets as well as in-place thrashing. Knowing the difference between the two has a range of applications, including measuring the impact of memory shortage on the system performance, as well as the ability to smarter balance pressure between the filesystem cache and the swap-backed workingset. During workingset transitions, inactive cache refaults and pushes out established active cache. When that active cache isn't stale, however, and also ends up refaulting, that's bonafide thrashing. Introduce a new page flag that tells on eviction whether the page has been active or not in its lifetime. This bit is then stored in the shadow entry, to classify refaults as transitioning or thrashing. How many page->flags does this leave us with on 32-bit? 20 bits are always page flags 21 if you have an MMU 23 with the zone bits for DMA, Normal, HighMem, Movable 29 with the sparsemem section bits 30 if PAE is enabled 31 with this patch. So on 32-bit PAE, that leaves 1 bit for distinguishing two NUMA nodes. If that's not enough, the system can switch to discontigmem and re-gain the 6 or 7 sparsemem section bits. Link: http://lkml.kernel.org/r/20180828172258.3185-3-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Daniel Drake <drake@endlessm.com> Tested-by: Suren Baghdasaryan <surenb@google.com> Cc: Christopher Lameter <cl@linux.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Johannes Weiner <jweiner@fb.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Enderborg <peter.enderborg@sony.com> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Shakeel Butt <shakeelb@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vinayak Menon <vinmenon@codeaurora.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/page-flags.h5
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/trace/events/mmflags.h1
-rw-r--r--mm/filemap.c9
-rw-r--r--mm/huge_memory.c1
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/swap_state.c1
-rw-r--r--mm/vmscan.c1
-rw-r--r--mm/vmstat.c1
-rw-r--r--mm/workingset.c95
11 files changed, 77 insertions, 42 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7bbeba21f6a3..ba51d5bf7af1 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -163,6 +163,7 @@ enum node_stat_item {
163 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 163 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
164 WORKINGSET_REFAULT, 164 WORKINGSET_REFAULT,
165 WORKINGSET_ACTIVATE, 165 WORKINGSET_ACTIVATE,
166 WORKINGSET_RESTORE,
166 WORKINGSET_NODERECLAIM, 167 WORKINGSET_NODERECLAIM,
167 NR_ANON_MAPPED, /* Mapped anonymous pages */ 168 NR_ANON_MAPPED, /* Mapped anonymous pages */
168 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 169 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 74bee8cecf4c..4d99504f6496 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -69,13 +69,14 @@
69 */ 69 */
70enum pageflags { 70enum pageflags {
71 PG_locked, /* Page is locked. Don't touch. */ 71 PG_locked, /* Page is locked. Don't touch. */
72 PG_error,
73 PG_referenced, 72 PG_referenced,
74 PG_uptodate, 73 PG_uptodate,
75 PG_dirty, 74 PG_dirty,
76 PG_lru, 75 PG_lru,
77 PG_active, 76 PG_active,
77 PG_workingset,
78 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ 78 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
79 PG_error,
79 PG_slab, 80 PG_slab,
80 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ 81 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
81 PG_arch_1, 82 PG_arch_1,
@@ -280,6 +281,8 @@ PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
280PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) 281PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
281PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) 282PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
282 TESTCLEARFLAG(Active, active, PF_HEAD) 283 TESTCLEARFLAG(Active, active, PF_HEAD)
284PAGEFLAG(Workingset, workingset, PF_HEAD)
285 TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
283__PAGEFLAG(Slab, slab, PF_NO_TAIL) 286__PAGEFLAG(Slab, slab, PF_NO_TAIL)
284__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) 287__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
285PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ 288PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 8e2c11e692ba..b93740d72e78 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -296,7 +296,7 @@ struct vma_swap_readahead {
296 296
297/* linux/mm/workingset.c */ 297/* linux/mm/workingset.c */
298void *workingset_eviction(struct address_space *mapping, struct page *page); 298void *workingset_eviction(struct address_space *mapping, struct page *page);
299bool workingset_refault(void *shadow); 299void workingset_refault(struct page *page, void *shadow);
300void workingset_activation(struct page *page); 300void workingset_activation(struct page *page);
301 301
302/* Do not use directly, use workingset_lookup_update */ 302/* Do not use directly, use workingset_lookup_update */
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index a81cffb76d89..a1675d43777e 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -88,6 +88,7 @@
88 {1UL << PG_dirty, "dirty" }, \ 88 {1UL << PG_dirty, "dirty" }, \
89 {1UL << PG_lru, "lru" }, \ 89 {1UL << PG_lru, "lru" }, \
90 {1UL << PG_active, "active" }, \ 90 {1UL << PG_active, "active" }, \
91 {1UL << PG_workingset, "workingset" }, \
91 {1UL << PG_slab, "slab" }, \ 92 {1UL << PG_slab, "slab" }, \
92 {1UL << PG_owner_priv_1, "owner_priv_1" }, \ 93 {1UL << PG_owner_priv_1, "owner_priv_1" }, \
93 {1UL << PG_arch_1, "arch_1" }, \ 94 {1UL << PG_arch_1, "arch_1" }, \
diff --git a/mm/filemap.c b/mm/filemap.c
index de6fed2a0815..7997adce5a29 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -915,12 +915,9 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
915 * data from the working set, only to cache data that will 915 * data from the working set, only to cache data that will
916 * get overwritten with something else, is a waste of memory. 916 * get overwritten with something else, is a waste of memory.
917 */ 917 */
918 if (!(gfp_mask & __GFP_WRITE) && 918 WARN_ON_ONCE(PageActive(page));
919 shadow && workingset_refault(shadow)) { 919 if (!(gfp_mask & __GFP_WRITE) && shadow)
920 SetPageActive(page); 920 workingset_refault(page, shadow);
921 workingset_activation(page);
922 } else
923 ClearPageActive(page);
924 lru_cache_add(page); 921 lru_cache_add(page);
925 } 922 }
926 return ret; 923 return ret;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index deed97fba979..8ea1b36bd452 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2369,6 +2369,7 @@ static void __split_huge_page_tail(struct page *head, int tail,
2369 (1L << PG_mlocked) | 2369 (1L << PG_mlocked) |
2370 (1L << PG_uptodate) | 2370 (1L << PG_uptodate) |
2371 (1L << PG_active) | 2371 (1L << PG_active) |
2372 (1L << PG_workingset) |
2372 (1L << PG_locked) | 2373 (1L << PG_locked) |
2373 (1L << PG_unevictable) | 2374 (1L << PG_unevictable) |
2374 (1L << PG_dirty))); 2375 (1L << PG_dirty)));
diff --git a/mm/migrate.c b/mm/migrate.c
index 84381b55b2bd..1ea27b343ccd 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -685,6 +685,8 @@ void migrate_page_states(struct page *newpage, struct page *page)
685 SetPageActive(newpage); 685 SetPageActive(newpage);
686 } else if (TestClearPageUnevictable(page)) 686 } else if (TestClearPageUnevictable(page))
687 SetPageUnevictable(newpage); 687 SetPageUnevictable(newpage);
688 if (PageWorkingset(page))
689 SetPageWorkingset(newpage);
688 if (PageChecked(page)) 690 if (PageChecked(page))
689 SetPageChecked(newpage); 691 SetPageChecked(newpage);
690 if (PageMappedToDisk(page)) 692 if (PageMappedToDisk(page))
diff --git a/mm/swap_state.c b/mm/swap_state.c
index ecee9c6c4cc1..0d6a7f268d2e 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -448,6 +448,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
448 /* 448 /*
449 * Initiate read into locked page and return. 449 * Initiate read into locked page and return.
450 */ 450 */
451 SetPageWorkingset(new_page);
451 lru_cache_add_anon(new_page); 452 lru_cache_add_anon(new_page);
452 *new_page_allocated = true; 453 *new_page_allocated = true;
453 return new_page; 454 return new_page;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 961401c46334..87e9fef341d2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2145,6 +2145,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
2145 } 2145 }
2146 2146
2147 ClearPageActive(page); /* we are de-activating */ 2147 ClearPageActive(page); /* we are de-activating */
2148 SetPageWorkingset(page);
2148 list_add(&page->lru, &l_inactive); 2149 list_add(&page->lru, &l_inactive);
2149 } 2150 }
2150 2151
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 2cec2fa4c8ae..d918f6192d15 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1145,6 +1145,7 @@ const char * const vmstat_text[] = {
1145 "nr_isolated_file", 1145 "nr_isolated_file",
1146 "workingset_refault", 1146 "workingset_refault",
1147 "workingset_activate", 1147 "workingset_activate",
1148 "workingset_restore",
1148 "workingset_nodereclaim", 1149 "workingset_nodereclaim",
1149 "nr_anon_pages", 1150 "nr_anon_pages",
1150 "nr_mapped", 1151 "nr_mapped",
diff --git a/mm/workingset.c b/mm/workingset.c
index 7d5fa0dd2b38..99b7f7c09b13 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -121,7 +121,7 @@
121 * the only thing eating into inactive list space is active pages. 121 * the only thing eating into inactive list space is active pages.
122 * 122 *
123 * 123 *
124 * Activating refaulting pages 124 * Refaulting inactive pages
125 * 125 *
126 * All that is known about the active list is that the pages have been 126 * All that is known about the active list is that the pages have been
127 * accessed more than once in the past. This means that at any given 127 * accessed more than once in the past. This means that at any given
@@ -134,6 +134,10 @@
134 * used less frequently than the refaulting page - or even not used at 134 * used less frequently than the refaulting page - or even not used at
135 * all anymore. 135 * all anymore.
136 * 136 *
137 * That means if inactive cache is refaulting with a suitable refault
138 * distance, we assume the cache workingset is transitioning and put
139 * pressure on the current active list.
140 *
137 * If this is wrong and demotion kicks in, the pages which are truly 141 * If this is wrong and demotion kicks in, the pages which are truly
138 * used more frequently will be reactivated while the less frequently 142 * used more frequently will be reactivated while the less frequently
139 * used once will be evicted from memory. 143 * used once will be evicted from memory.
@@ -141,6 +145,14 @@
141 * But if this is right, the stale pages will be pushed out of memory 145 * But if this is right, the stale pages will be pushed out of memory
142 * and the used pages get to stay in cache. 146 * and the used pages get to stay in cache.
143 * 147 *
148 * Refaulting active pages
149 *
150 * If on the other hand the refaulting pages have recently been
151 * deactivated, it means that the active list is no longer protecting
152 * actively used cache from reclaim. The cache is NOT transitioning to
153 * a different workingset; the existing workingset is thrashing in the
154 * space allocated to the page cache.
155 *
144 * 156 *
145 * Implementation 157 * Implementation
146 * 158 *
@@ -156,8 +168,7 @@
156 */ 168 */
157 169
158#define EVICTION_SHIFT (RADIX_TREE_EXCEPTIONAL_ENTRY + \ 170#define EVICTION_SHIFT (RADIX_TREE_EXCEPTIONAL_ENTRY + \
159 NODES_SHIFT + \ 171 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
160 MEM_CGROUP_ID_SHIFT)
161#define EVICTION_MASK (~0UL >> EVICTION_SHIFT) 172#define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
162 173
163/* 174/*
@@ -170,23 +181,28 @@
170 */ 181 */
171static unsigned int bucket_order __read_mostly; 182static unsigned int bucket_order __read_mostly;
172 183
173static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction) 184static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
185 bool workingset)
174{ 186{
175 eviction >>= bucket_order; 187 eviction >>= bucket_order;
176 eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; 188 eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
177 eviction = (eviction << NODES_SHIFT) | pgdat->node_id; 189 eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
190 eviction = (eviction << 1) | workingset;
178 eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT); 191 eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
179 192
180 return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY); 193 return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
181} 194}
182 195
183static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, 196static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
184 unsigned long *evictionp) 197 unsigned long *evictionp, bool *workingsetp)
185{ 198{
186 unsigned long entry = (unsigned long)shadow; 199 unsigned long entry = (unsigned long)shadow;
187 int memcgid, nid; 200 int memcgid, nid;
201 bool workingset;
188 202
189 entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT; 203 entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
204 workingset = entry & 1;
205 entry >>= 1;
190 nid = entry & ((1UL << NODES_SHIFT) - 1); 206 nid = entry & ((1UL << NODES_SHIFT) - 1);
191 entry >>= NODES_SHIFT; 207 entry >>= NODES_SHIFT;
192 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); 208 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
@@ -195,6 +211,7 @@ static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
195 *memcgidp = memcgid; 211 *memcgidp = memcgid;
196 *pgdat = NODE_DATA(nid); 212 *pgdat = NODE_DATA(nid);
197 *evictionp = entry << bucket_order; 213 *evictionp = entry << bucket_order;
214 *workingsetp = workingset;
198} 215}
199 216
200/** 217/**
@@ -207,8 +224,8 @@ static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
207 */ 224 */
208void *workingset_eviction(struct address_space *mapping, struct page *page) 225void *workingset_eviction(struct address_space *mapping, struct page *page)
209{ 226{
210 struct mem_cgroup *memcg = page_memcg(page);
211 struct pglist_data *pgdat = page_pgdat(page); 227 struct pglist_data *pgdat = page_pgdat(page);
228 struct mem_cgroup *memcg = page_memcg(page);
212 int memcgid = mem_cgroup_id(memcg); 229 int memcgid = mem_cgroup_id(memcg);
213 unsigned long eviction; 230 unsigned long eviction;
214 struct lruvec *lruvec; 231 struct lruvec *lruvec;
@@ -220,30 +237,30 @@ void *workingset_eviction(struct address_space *mapping, struct page *page)
220 237
221 lruvec = mem_cgroup_lruvec(pgdat, memcg); 238 lruvec = mem_cgroup_lruvec(pgdat, memcg);
222 eviction = atomic_long_inc_return(&lruvec->inactive_age); 239 eviction = atomic_long_inc_return(&lruvec->inactive_age);
223 return pack_shadow(memcgid, pgdat, eviction); 240 return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
224} 241}
225 242
226/** 243/**
227 * workingset_refault - evaluate the refault of a previously evicted page 244 * workingset_refault - evaluate the refault of a previously evicted page
245 * @page: the freshly allocated replacement page
228 * @shadow: shadow entry of the evicted page 246 * @shadow: shadow entry of the evicted page
229 * 247 *
230 * Calculates and evaluates the refault distance of the previously 248 * Calculates and evaluates the refault distance of the previously
231 * evicted page in the context of the node it was allocated in. 249 * evicted page in the context of the node it was allocated in.
232 *
233 * Returns %true if the page should be activated, %false otherwise.
234 */ 250 */
235bool workingset_refault(void *shadow) 251void workingset_refault(struct page *page, void *shadow)
236{ 252{
237 unsigned long refault_distance; 253 unsigned long refault_distance;
254 struct pglist_data *pgdat;
238 unsigned long active_file; 255 unsigned long active_file;
239 struct mem_cgroup *memcg; 256 struct mem_cgroup *memcg;
240 unsigned long eviction; 257 unsigned long eviction;
241 struct lruvec *lruvec; 258 struct lruvec *lruvec;
242 unsigned long refault; 259 unsigned long refault;
243 struct pglist_data *pgdat; 260 bool workingset;
244 int memcgid; 261 int memcgid;
245 262
246 unpack_shadow(shadow, &memcgid, &pgdat, &eviction); 263 unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
247 264
248 rcu_read_lock(); 265 rcu_read_lock();
249 /* 266 /*
@@ -263,41 +280,51 @@ bool workingset_refault(void *shadow)
263 * configurations instead. 280 * configurations instead.
264 */ 281 */
265 memcg = mem_cgroup_from_id(memcgid); 282 memcg = mem_cgroup_from_id(memcgid);
266 if (!mem_cgroup_disabled() && !memcg) { 283 if (!mem_cgroup_disabled() && !memcg)
267 rcu_read_unlock(); 284 goto out;
268 return false;
269 }
270 lruvec = mem_cgroup_lruvec(pgdat, memcg); 285 lruvec = mem_cgroup_lruvec(pgdat, memcg);
271 refault = atomic_long_read(&lruvec->inactive_age); 286 refault = atomic_long_read(&lruvec->inactive_age);
272 active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES); 287 active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES);
273 288
274 /* 289 /*
275 * The unsigned subtraction here gives an accurate distance 290 * Calculate the refault distance
276 * across inactive_age overflows in most cases.
277 * 291 *
278 * There is a special case: usually, shadow entries have a 292 * The unsigned subtraction here gives an accurate distance
279 * short lifetime and are either refaulted or reclaimed along 293 * across inactive_age overflows in most cases. There is a
280 * with the inode before they get too old. But it is not 294 * special case: usually, shadow entries have a short lifetime
281 * impossible for the inactive_age to lap a shadow entry in 295 * and are either refaulted or reclaimed along with the inode
282 * the field, which can then can result in a false small 296 * before they get too old. But it is not impossible for the
283 * refault distance, leading to a false activation should this 297 * inactive_age to lap a shadow entry in the field, which can
284 * old entry actually refault again. However, earlier kernels 298 * then result in a false small refault distance, leading to a
285 * used to deactivate unconditionally with *every* reclaim 299 * false activation should this old entry actually refault
286 * invocation for the longest time, so the occasional 300 * again. However, earlier kernels used to deactivate
287 * inappropriate activation leading to pressure on the active 301 * unconditionally with *every* reclaim invocation for the
288 * list is not a problem. 302 * longest time, so the occasional inappropriate activation
303 * leading to pressure on the active list is not a problem.
289 */ 304 */
290 refault_distance = (refault - eviction) & EVICTION_MASK; 305 refault_distance = (refault - eviction) & EVICTION_MASK;
291 306
292 inc_lruvec_state(lruvec, WORKINGSET_REFAULT); 307 inc_lruvec_state(lruvec, WORKINGSET_REFAULT);
293 308
294 if (refault_distance <= active_file) { 309 /*
295 inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE); 310 * Compare the distance to the existing workingset size. We
296 rcu_read_unlock(); 311 * don't act on pages that couldn't stay resident even if all
297 return true; 312 * the memory was available to the page cache.
313 */
314 if (refault_distance > active_file)
315 goto out;
316
317 SetPageActive(page);
318 atomic_long_inc(&lruvec->inactive_age);
319 inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
320
321 /* Page was active prior to eviction */
322 if (workingset) {
323 SetPageWorkingset(page);
324 inc_lruvec_state(lruvec, WORKINGSET_RESTORE);
298 } 325 }
326out:
299 rcu_read_unlock(); 327 rcu_read_unlock();
300 return false;
301} 328}
302 329
303/** 330/**