summaryrefslogtreecommitdiffstats
path: root/mm/z3fold.c
diff options
context:
space:
mode:
authorVitaly Wool <vitalywool@gmail.com>2017-02-24 17:57:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-24 20:46:54 -0500
commit1b096e5ae9f7181c770d59c6895f23a76c63adee (patch)
treeed06fa4bd36deac956ab130cd68b4d9520e78353 /mm/z3fold.c
parentede93213aab623b3343f1d7dcb03aebac0489357 (diff)
z3fold: extend compaction function
z3fold_compact_page() currently only handles the situation when there's a single middle chunk within the z3fold page. However it may be worth it to move middle chunk closer to either first or last chunk, whichever is there, if the gap between them is big enough. This patch adds the relevant code, using BIG_CHUNK_GAP define as a threshold for middle chunk to be worth moving. Link: http://lkml.kernel.org/r/20170131214334.c4f3eac9a477af0fa9a22c46@gmail.com Signed-off-by: Vitaly Wool <vitalywool@gmail.com> Reviewed-by: Dan Streetman <ddstreet@ieee.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/z3fold.c')
-rw-r--r--mm/z3fold.c26
1 files changed, 25 insertions, 1 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 98ab01f910bc..be8b56e21c2d 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -268,6 +268,7 @@ static inline void *mchunk_memmove(struct z3fold_header *zhdr,
268 zhdr->middle_chunks << CHUNK_SHIFT); 268 zhdr->middle_chunks << CHUNK_SHIFT);
269} 269}
270 270
271#define BIG_CHUNK_GAP 3
271/* Has to be called with lock held */ 272/* Has to be called with lock held */
272static int z3fold_compact_page(struct z3fold_header *zhdr) 273static int z3fold_compact_page(struct z3fold_header *zhdr)
273{ 274{
@@ -286,8 +287,31 @@ static int z3fold_compact_page(struct z3fold_header *zhdr)
286 zhdr->middle_chunks = 0; 287 zhdr->middle_chunks = 0;
287 zhdr->start_middle = 0; 288 zhdr->start_middle = 0;
288 zhdr->first_num++; 289 zhdr->first_num++;
290 return 1;
289 } 291 }
290 return 1; 292
293 /*
294 * moving data is expensive, so let's only do that if
295 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
296 */
297 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
298 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
299 BIG_CHUNK_GAP) {
300 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
301 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
302 return 1;
303 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
304 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
305 + zhdr->middle_chunks) >=
306 BIG_CHUNK_GAP) {
307 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
308 zhdr->middle_chunks;
309 mchunk_memmove(zhdr, new_start);
310 zhdr->start_middle = new_start;
311 return 1;
312 }
313
314 return 0;
291} 315}
292 316
293/** 317/**