aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/z3fold.c26
1 files changed, 25 insertions, 1 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 98ab01f910bc..be8b56e21c2d 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -268,6 +268,7 @@ static inline void *mchunk_memmove(struct z3fold_header *zhdr,
268 zhdr->middle_chunks << CHUNK_SHIFT); 268 zhdr->middle_chunks << CHUNK_SHIFT);
269} 269}
270 270
271#define BIG_CHUNK_GAP 3
271/* Has to be called with lock held */ 272/* Has to be called with lock held */
272static int z3fold_compact_page(struct z3fold_header *zhdr) 273static int z3fold_compact_page(struct z3fold_header *zhdr)
273{ 274{
@@ -286,8 +287,31 @@ static int z3fold_compact_page(struct z3fold_header *zhdr)
286 zhdr->middle_chunks = 0; 287 zhdr->middle_chunks = 0;
287 zhdr->start_middle = 0; 288 zhdr->start_middle = 0;
288 zhdr->first_num++; 289 zhdr->first_num++;
290 return 1;
289 } 291 }
290 return 1; 292
293 /*
294 * moving data is expensive, so let's only do that if
295 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
296 */
297 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
298 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
299 BIG_CHUNK_GAP) {
300 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
301 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
302 return 1;
303 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
304 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
305 + zhdr->middle_chunks) >=
306 BIG_CHUNK_GAP) {
307 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
308 zhdr->middle_chunks;
309 mchunk_memmove(zhdr, new_start);
310 zhdr->start_middle = new_start;
311 return 1;
312 }
313
314 return 0;
291} 315}
292 316
293/** 317/**