diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/workingset.c | 30 |
1 files changed, 29 insertions, 1 deletions
diff --git a/mm/workingset.c b/mm/workingset.c index f874b2c663e3..9a26a60368d2 100644 --- a/mm/workingset.c +++ b/mm/workingset.c | |||
@@ -156,8 +156,19 @@ | |||
156 | ZONES_SHIFT + NODES_SHIFT) | 156 | ZONES_SHIFT + NODES_SHIFT) |
157 | #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) | 157 | #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) |
158 | 158 | ||
159 | /* | ||
160 | * Eviction timestamps need to be able to cover the full range of | ||
161 | * actionable refaults. However, bits are tight in the radix tree | ||
162 | * entry, and after storing the identifier for the lruvec there might | ||
163 | * not be enough left to represent every single actionable refault. In | ||
164 | * that case, we have to sacrifice granularity for distance, and group | ||
165 | * evictions into coarser buckets by shaving off lower timestamp bits. | ||
166 | */ | ||
167 | static unsigned int bucket_order __read_mostly; | ||
168 | |||
159 | static void *pack_shadow(unsigned long eviction, struct zone *zone) | 169 | static void *pack_shadow(unsigned long eviction, struct zone *zone) |
160 | { | 170 | { |
171 | eviction >>= bucket_order; | ||
161 | eviction = (eviction << NODES_SHIFT) | zone_to_nid(zone); | 172 | eviction = (eviction << NODES_SHIFT) | zone_to_nid(zone); |
162 | eviction = (eviction << ZONES_SHIFT) | zone_idx(zone); | 173 | eviction = (eviction << ZONES_SHIFT) | zone_idx(zone); |
163 | eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT); | 174 | eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT); |
@@ -178,7 +189,7 @@ static void unpack_shadow(void *shadow, struct zone **zonep, | |||
178 | entry >>= NODES_SHIFT; | 189 | entry >>= NODES_SHIFT; |
179 | 190 | ||
180 | *zonep = NODE_DATA(nid)->node_zones + zid; | 191 | *zonep = NODE_DATA(nid)->node_zones + zid; |
181 | *evictionp = entry; | 192 | *evictionp = entry << bucket_order; |
182 | } | 193 | } |
183 | 194 | ||
184 | /** | 195 | /** |
@@ -400,8 +411,25 @@ static struct lock_class_key shadow_nodes_key; | |||
400 | 411 | ||
401 | static int __init workingset_init(void) | 412 | static int __init workingset_init(void) |
402 | { | 413 | { |
414 | unsigned int timestamp_bits; | ||
415 | unsigned int max_order; | ||
403 | int ret; | 416 | int ret; |
404 | 417 | ||
418 | BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT); | ||
419 | /* | ||
420 | * Calculate the eviction bucket size to cover the longest | ||
421 | * actionable refault distance, which is currently half of | ||
422 | * memory (totalram_pages/2). However, memory hotplug may add | ||
423 | * some more pages at runtime, so keep working with up to | ||
424 | * double the initial memory by using totalram_pages as-is. | ||
425 | */ | ||
426 | timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; | ||
427 | max_order = fls_long(totalram_pages - 1); | ||
428 | if (max_order > timestamp_bits) | ||
429 | bucket_order = max_order - timestamp_bits; | ||
430 | printk("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", | ||
431 | timestamp_bits, max_order, bucket_order); | ||
432 | |||
405 | ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key); | 433 | ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key); |
406 | if (ret) | 434 | if (ret) |
407 | goto err; | 435 | goto err; |