summaryrefslogtreecommitdiffstats
path: root/mm/z3fold.c
diff options
context:
space:
mode:
authorVitaly Wool <vitalywool@gmail.com>2017-02-24 17:57:17 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-24 20:46:54 -0500
commitede93213aab623b3343f1d7dcb03aebac0489357 (patch)
tree9d1dafaa9b91260cc8f7d25af7d50c8809865338 /mm/z3fold.c
parent12d59ae678242b383671abb7ffa3c94bb2d6c4de (diff)
z3fold: fix header size related issues
Currently the whole kernel build will be stopped if the size of struct z3fold_header is greater than the size of one chunk, which is 64 bytes by default. This patch instead defines the offset for z3fold objects as the size of the z3fold header in chunks. Fixed also are the calculation of num_free_chunks() and the address to move the middle chunk to in case of in-page compaction in z3fold_compact_page(). Link: http://lkml.kernel.org/r/20170131214057.d98677032bc7b1c6c59a80c9@gmail.com Signed-off-by: Vitaly Wool <vitalywool@gmail.com> Reviewed-by: Dan Streetman <ddstreet@ieee.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/z3fold.c')
-rw-r--r--mm/z3fold.c114
1 files changed, 64 insertions, 50 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 227378991ecf..98ab01f910bc 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -34,29 +34,58 @@
34/***************** 34/*****************
35 * Structures 35 * Structures
36*****************/ 36*****************/
37struct z3fold_pool;
38struct z3fold_ops {
39 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
40};
41
42enum buddy {
43 HEADLESS = 0,
44 FIRST,
45 MIDDLE,
46 LAST,
47 BUDDIES_MAX
48};
49
50/*
51 * struct z3fold_header - z3fold page metadata occupying the first chunk of each
52 * z3fold page, except for HEADLESS pages
53 * @buddy: links the z3fold page into the relevant list in the pool
54 * @first_chunks: the size of the first buddy in chunks, 0 if free
55 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
56 * @last_chunks: the size of the last buddy in chunks, 0 if free
57 * @first_num: the starting number (for the first handle)
58 */
59struct z3fold_header {
60 struct list_head buddy;
61 unsigned short first_chunks;
62 unsigned short middle_chunks;
63 unsigned short last_chunks;
64 unsigned short start_middle;
65 unsigned short first_num:2;
66};
67
37/* 68/*
38 * NCHUNKS_ORDER determines the internal allocation granularity, effectively 69 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
39 * adjusting internal fragmentation. It also determines the number of 70 * adjusting internal fragmentation. It also determines the number of
40 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the 71 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
41 * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk 72 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
42 * in allocated page is occupied by z3fold header, NCHUNKS will be calculated 73 * in the beginning of an allocated page are occupied by z3fold header, so
43 * to 63 which shows the max number of free chunks in z3fold page, also there 74 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
44 * will be 63 freelists per pool. 75 * which shows the max number of free chunks in z3fold page, also there will
76 * be 63, or 62, respectively, freelists per pool.
45 */ 77 */
46#define NCHUNKS_ORDER 6 78#define NCHUNKS_ORDER 6
47 79
48#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) 80#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
49#define CHUNK_SIZE (1 << CHUNK_SHIFT) 81#define CHUNK_SIZE (1 << CHUNK_SHIFT)
50#define ZHDR_SIZE_ALIGNED CHUNK_SIZE 82#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
83#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
84#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
51#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) 85#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
52 86
53#define BUDDY_MASK (0x3) 87#define BUDDY_MASK (0x3)
54 88
55struct z3fold_pool;
56struct z3fold_ops {
57 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
58};
59
60/** 89/**
61 * struct z3fold_pool - stores metadata for each z3fold pool 90 * struct z3fold_pool - stores metadata for each z3fold pool
62 * @lock: protects all pool fields and first|last_chunk fields of any 91 * @lock: protects all pool fields and first|last_chunk fields of any
@@ -86,32 +115,6 @@ struct z3fold_pool {
86 const struct zpool_ops *zpool_ops; 115 const struct zpool_ops *zpool_ops;
87}; 116};
88 117
89enum buddy {
90 HEADLESS = 0,
91 FIRST,
92 MIDDLE,
93 LAST,
94 BUDDIES_MAX
95};
96
97/*
98 * struct z3fold_header - z3fold page metadata occupying the first chunk of each
99 * z3fold page, except for HEADLESS pages
100 * @buddy: links the z3fold page into the relevant list in the pool
101 * @first_chunks: the size of the first buddy in chunks, 0 if free
102 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
103 * @last_chunks: the size of the last buddy in chunks, 0 if free
104 * @first_num: the starting number (for the first handle)
105 */
106struct z3fold_header {
107 struct list_head buddy;
108 unsigned short first_chunks;
109 unsigned short middle_chunks;
110 unsigned short last_chunks;
111 unsigned short start_middle;
112 unsigned short first_num:2;
113};
114
115/* 118/*
116 * Internal z3fold page flags 119 * Internal z3fold page flags
117 */ 120 */
@@ -121,6 +124,7 @@ enum z3fold_page_flags {
121 MIDDLE_CHUNK_MAPPED, 124 MIDDLE_CHUNK_MAPPED,
122}; 125};
123 126
127
124/***************** 128/*****************
125 * Helpers 129 * Helpers
126*****************/ 130*****************/
@@ -204,9 +208,10 @@ static int num_free_chunks(struct z3fold_header *zhdr)
204 */ 208 */
205 if (zhdr->middle_chunks != 0) { 209 if (zhdr->middle_chunks != 0) {
206 int nfree_before = zhdr->first_chunks ? 210 int nfree_before = zhdr->first_chunks ?
207 0 : zhdr->start_middle - 1; 211 0 : zhdr->start_middle - ZHDR_CHUNKS;
208 int nfree_after = zhdr->last_chunks ? 212 int nfree_after = zhdr->last_chunks ?
209 0 : NCHUNKS - zhdr->start_middle - zhdr->middle_chunks; 213 0 : TOTAL_CHUNKS -
214 (zhdr->start_middle + zhdr->middle_chunks);
210 nfree = max(nfree_before, nfree_after); 215 nfree = max(nfree_before, nfree_after);
211 } else 216 } else
212 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; 217 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
@@ -254,26 +259,35 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
254 kfree(pool); 259 kfree(pool);
255} 260}
256 261
262static inline void *mchunk_memmove(struct z3fold_header *zhdr,
263 unsigned short dst_chunk)
264{
265 void *beg = zhdr;
266 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
267 beg + (zhdr->start_middle << CHUNK_SHIFT),
268 zhdr->middle_chunks << CHUNK_SHIFT);
269}
270
257/* Has to be called with lock held */ 271/* Has to be called with lock held */
258static int z3fold_compact_page(struct z3fold_header *zhdr) 272static int z3fold_compact_page(struct z3fold_header *zhdr)
259{ 273{
260 struct page *page = virt_to_page(zhdr); 274 struct page *page = virt_to_page(zhdr);
261 void *beg = zhdr;
262 275
276 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
277 return 0; /* can't move middle chunk, it's used */
263 278
264 if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) && 279 if (zhdr->middle_chunks == 0)
265 zhdr->middle_chunks != 0 && 280 return 0; /* nothing to compact */
266 zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { 281
267 memmove(beg + ZHDR_SIZE_ALIGNED, 282 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
268 beg + (zhdr->start_middle << CHUNK_SHIFT), 283 /* move to the beginning */
269 zhdr->middle_chunks << CHUNK_SHIFT); 284 mchunk_memmove(zhdr, ZHDR_CHUNKS);
270 zhdr->first_chunks = zhdr->middle_chunks; 285 zhdr->first_chunks = zhdr->middle_chunks;
271 zhdr->middle_chunks = 0; 286 zhdr->middle_chunks = 0;
272 zhdr->start_middle = 0; 287 zhdr->start_middle = 0;
273 zhdr->first_num++; 288 zhdr->first_num++;
274 return 1;
275 } 289 }
276 return 0; 290 return 1;
277} 291}
278 292
279/** 293/**
@@ -365,7 +379,7 @@ found:
365 zhdr->last_chunks = chunks; 379 zhdr->last_chunks = chunks;
366 else { 380 else {
367 zhdr->middle_chunks = chunks; 381 zhdr->middle_chunks = chunks;
368 zhdr->start_middle = zhdr->first_chunks + 1; 382 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
369 } 383 }
370 384
371 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || 385 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
@@ -778,8 +792,8 @@ MODULE_ALIAS("zpool-z3fold");
778 792
779static int __init init_z3fold(void) 793static int __init init_z3fold(void)
780{ 794{
781 /* Make sure the z3fold header will fit in one chunk */ 795 /* Make sure the z3fold header is not larger than the page size */
782 BUILD_BUG_ON(sizeof(struct z3fold_header) > ZHDR_SIZE_ALIGNED); 796 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
783 zpool_register_driver(&z3fold_zpool_driver); 797 zpool_register_driver(&z3fold_zpool_driver);
784 798
785 return 0; 799 return 0;