diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2016-03-15 17:57:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-15 19:55:16 -0400 |
commit | 162453bfbdf4c0f58cb3058aad9ad8cda1044cda (patch) | |
tree | 43be985615141172792acb585d80bf6651cdc399 /mm | |
parent | 689c94f03ae251181725fe853b4ffc870f05c7fe (diff) |
mm: workingset: separate shadow unpacking and refault calculation
Per-cgroup thrash detection will need to derive a live memcg from the
eviction cookie, and doing that inside unpack_shadow() will get nasty
with the reference handling spread over two functions.
In preparation, make unpack_shadow() clearly about extracting static
data, and let workingset_refault() do all the higher-level handling.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/workingset.c | 56 |
1 files changed, 28 insertions, 28 deletions
diff --git a/mm/workingset.c b/mm/workingset.c index 3ef92f6e41fe..f874b2c663e3 100644 --- a/mm/workingset.c +++ b/mm/workingset.c | |||
@@ -165,13 +165,10 @@ static void *pack_shadow(unsigned long eviction, struct zone *zone) | |||
165 | return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY); | 165 | return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY); |
166 | } | 166 | } |
167 | 167 | ||
168 | static void unpack_shadow(void *shadow, | 168 | static void unpack_shadow(void *shadow, struct zone **zonep, |
169 | struct zone **zone, | 169 | unsigned long *evictionp) |
170 | unsigned long *distance) | ||
171 | { | 170 | { |
172 | unsigned long entry = (unsigned long)shadow; | 171 | unsigned long entry = (unsigned long)shadow; |
173 | unsigned long eviction; | ||
174 | unsigned long refault; | ||
175 | int zid, nid; | 172 | int zid, nid; |
176 | 173 | ||
177 | entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT; | 174 | entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT; |
@@ -179,29 +176,9 @@ static void unpack_shadow(void *shadow, | |||
179 | entry >>= ZONES_SHIFT; | 176 | entry >>= ZONES_SHIFT; |
180 | nid = entry & ((1UL << NODES_SHIFT) - 1); | 177 | nid = entry & ((1UL << NODES_SHIFT) - 1); |
181 | entry >>= NODES_SHIFT; | 178 | entry >>= NODES_SHIFT; |
182 | eviction = entry; | ||
183 | |||
184 | *zone = NODE_DATA(nid)->node_zones + zid; | ||
185 | 179 | ||
186 | refault = atomic_long_read(&(*zone)->inactive_age); | 180 | *zonep = NODE_DATA(nid)->node_zones + zid; |
187 | 181 | *evictionp = entry; | |
188 | /* | ||
189 | * The unsigned subtraction here gives an accurate distance | ||
190 | * across inactive_age overflows in most cases. | ||
191 | * | ||
192 | * There is a special case: usually, shadow entries have a | ||
193 | * short lifetime and are either refaulted or reclaimed along | ||
194 | * with the inode before they get too old. But it is not | ||
195 | * impossible for the inactive_age to lap a shadow entry in | ||
196 | * the field, which can then can result in a false small | ||
197 | * refault distance, leading to a false activation should this | ||
198 | * old entry actually refault again. However, earlier kernels | ||
199 | * used to deactivate unconditionally with *every* reclaim | ||
200 | * invocation for the longest time, so the occasional | ||
201 | * inappropriate activation leading to pressure on the active | ||
202 | * list is not a problem. | ||
203 | */ | ||
204 | *distance = (refault - eviction) & EVICTION_MASK; | ||
205 | } | 182 | } |
206 | 183 | ||
207 | /** | 184 | /** |
@@ -233,9 +210,32 @@ void *workingset_eviction(struct address_space *mapping, struct page *page) | |||
233 | bool workingset_refault(void *shadow) | 210 | bool workingset_refault(void *shadow) |
234 | { | 211 | { |
235 | unsigned long refault_distance; | 212 | unsigned long refault_distance; |
213 | unsigned long eviction; | ||
214 | unsigned long refault; | ||
236 | struct zone *zone; | 215 | struct zone *zone; |
237 | 216 | ||
238 | unpack_shadow(shadow, &zone, &refault_distance); | 217 | unpack_shadow(shadow, &zone, &eviction); |
218 | |||
219 | refault = atomic_long_read(&zone->inactive_age); | ||
220 | |||
221 | /* | ||
222 | * The unsigned subtraction here gives an accurate distance | ||
223 | * across inactive_age overflows in most cases. | ||
224 | * | ||
225 | * There is a special case: usually, shadow entries have a | ||
226 | * short lifetime and are either refaulted or reclaimed along | ||
227 | * with the inode before they get too old. But it is not | ||
228 | * impossible for the inactive_age to lap a shadow entry in | ||
229 | * the field, which can then can result in a false small | ||
230 | * refault distance, leading to a false activation should this | ||
231 | * old entry actually refault again. However, earlier kernels | ||
232 | * used to deactivate unconditionally with *every* reclaim | ||
233 | * invocation for the longest time, so the occasional | ||
234 | * inappropriate activation leading to pressure on the active | ||
235 | * list is not a problem. | ||
236 | */ | ||
237 | refault_distance = (refault - eviction) & EVICTION_MASK; | ||
238 | |||
239 | inc_zone_state(zone, WORKINGSET_REFAULT); | 239 | inc_zone_state(zone, WORKINGSET_REFAULT); |
240 | 240 | ||
241 | if (refault_distance <= zone_page_state(zone, NR_ACTIVE_FILE)) { | 241 | if (refault_distance <= zone_page_state(zone, NR_ACTIVE_FILE)) { |