diff options
author | Vitaly Wool <vitalywool@gmail.com> | 2017-02-24 17:57:26 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-24 20:46:54 -0500 |
commit | 5a27aa8220290b64cd5da066a1e29375aa867e69 (patch) | |
tree | 6a2fbfc07e38793e686b6037c0388a806d29af58 /mm | |
parent | 2f1e5e4d8430f365f979a818f515123a71b640ec (diff) |
z3fold: add kref refcounting
With both coming and already present locking optimizations, introducing
kref to reference-count z3fold objects is the right thing to do.
Moreover, it makes buddied list no longer necessary, and allows for a
simpler handling of headless pages.
[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/20170131214650.8ea78033d91ded233f552bc0@gmail.com
Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
Reviewed-by: Dan Streetman <ddstreet@ieee.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/z3fold.c | 155 |
1 files changed, 69 insertions, 86 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c index fa91b56dbd19..8970a2fd3b1a 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c | |||
@@ -52,6 +52,7 @@ enum buddy { | |||
52 | * z3fold page, except for HEADLESS pages | 52 | * z3fold page, except for HEADLESS pages |
53 | * @buddy: links the z3fold page into the relevant list in the pool | 53 | * @buddy: links the z3fold page into the relevant list in the pool |
54 | * @page_lock: per-page lock | 54 | * @page_lock: per-page lock |
55 | * @refcount: reference cound for the z3fold page | ||
55 | * @first_chunks: the size of the first buddy in chunks, 0 if free | 56 | * @first_chunks: the size of the first buddy in chunks, 0 if free |
56 | * @middle_chunks: the size of the middle buddy in chunks, 0 if free | 57 | * @middle_chunks: the size of the middle buddy in chunks, 0 if free |
57 | * @last_chunks: the size of the last buddy in chunks, 0 if free | 58 | * @last_chunks: the size of the last buddy in chunks, 0 if free |
@@ -60,6 +61,7 @@ enum buddy { | |||
60 | struct z3fold_header { | 61 | struct z3fold_header { |
61 | struct list_head buddy; | 62 | struct list_head buddy; |
62 | spinlock_t page_lock; | 63 | spinlock_t page_lock; |
64 | struct kref refcount; | ||
63 | unsigned short first_chunks; | 65 | unsigned short first_chunks; |
64 | unsigned short middle_chunks; | 66 | unsigned short middle_chunks; |
65 | unsigned short last_chunks; | 67 | unsigned short last_chunks; |
@@ -95,8 +97,6 @@ struct z3fold_header { | |||
95 | * @unbuddied: array of lists tracking z3fold pages that contain 2- buddies; | 97 | * @unbuddied: array of lists tracking z3fold pages that contain 2- buddies; |
96 | * the lists each z3fold page is added to depends on the size of | 98 | * the lists each z3fold page is added to depends on the size of |
97 | * its free region. | 99 | * its free region. |
98 | * @buddied: list tracking the z3fold pages that contain 3 buddies; | ||
99 | * these z3fold pages are full | ||
100 | * @lru: list tracking the z3fold pages in LRU order by most recently | 100 | * @lru: list tracking the z3fold pages in LRU order by most recently |
101 | * added buddy. | 101 | * added buddy. |
102 | * @pages_nr: number of z3fold pages in the pool. | 102 | * @pages_nr: number of z3fold pages in the pool. |
@@ -109,7 +109,6 @@ struct z3fold_header { | |||
109 | struct z3fold_pool { | 109 | struct z3fold_pool { |
110 | spinlock_t lock; | 110 | spinlock_t lock; |
111 | struct list_head unbuddied[NCHUNKS]; | 111 | struct list_head unbuddied[NCHUNKS]; |
112 | struct list_head buddied; | ||
113 | struct list_head lru; | 112 | struct list_head lru; |
114 | atomic64_t pages_nr; | 113 | atomic64_t pages_nr; |
115 | const struct z3fold_ops *ops; | 114 | const struct z3fold_ops *ops; |
@@ -121,8 +120,7 @@ struct z3fold_pool { | |||
121 | * Internal z3fold page flags | 120 | * Internal z3fold page flags |
122 | */ | 121 | */ |
123 | enum z3fold_page_flags { | 122 | enum z3fold_page_flags { |
124 | UNDER_RECLAIM = 0, | 123 | PAGE_HEADLESS = 0, |
125 | PAGE_HEADLESS, | ||
126 | MIDDLE_CHUNK_MAPPED, | 124 | MIDDLE_CHUNK_MAPPED, |
127 | }; | 125 | }; |
128 | 126 | ||
@@ -146,11 +144,11 @@ static struct z3fold_header *init_z3fold_page(struct page *page) | |||
146 | struct z3fold_header *zhdr = page_address(page); | 144 | struct z3fold_header *zhdr = page_address(page); |
147 | 145 | ||
148 | INIT_LIST_HEAD(&page->lru); | 146 | INIT_LIST_HEAD(&page->lru); |
149 | clear_bit(UNDER_RECLAIM, &page->private); | ||
150 | clear_bit(PAGE_HEADLESS, &page->private); | 147 | clear_bit(PAGE_HEADLESS, &page->private); |
151 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); | 148 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); |
152 | 149 | ||
153 | spin_lock_init(&zhdr->page_lock); | 150 | spin_lock_init(&zhdr->page_lock); |
151 | kref_init(&zhdr->refcount); | ||
154 | zhdr->first_chunks = 0; | 152 | zhdr->first_chunks = 0; |
155 | zhdr->middle_chunks = 0; | 153 | zhdr->middle_chunks = 0; |
156 | zhdr->last_chunks = 0; | 154 | zhdr->last_chunks = 0; |
@@ -161,9 +159,24 @@ static struct z3fold_header *init_z3fold_page(struct page *page) | |||
161 | } | 159 | } |
162 | 160 | ||
163 | /* Resets the struct page fields and frees the page */ | 161 | /* Resets the struct page fields and frees the page */ |
164 | static void free_z3fold_page(struct z3fold_header *zhdr) | 162 | static void free_z3fold_page(struct page *page) |
165 | { | 163 | { |
166 | __free_page(virt_to_page(zhdr)); | 164 | __free_page(page); |
165 | } | ||
166 | |||
167 | static void release_z3fold_page(struct kref *ref) | ||
168 | { | ||
169 | struct z3fold_header *zhdr; | ||
170 | struct page *page; | ||
171 | |||
172 | zhdr = container_of(ref, struct z3fold_header, refcount); | ||
173 | page = virt_to_page(zhdr); | ||
174 | |||
175 | if (!list_empty(&zhdr->buddy)) | ||
176 | list_del(&zhdr->buddy); | ||
177 | if (!list_empty(&page->lru)) | ||
178 | list_del(&page->lru); | ||
179 | free_z3fold_page(page); | ||
167 | } | 180 | } |
168 | 181 | ||
169 | /* Lock a z3fold page */ | 182 | /* Lock a z3fold page */ |
@@ -178,7 +191,6 @@ static inline void z3fold_page_unlock(struct z3fold_header *zhdr) | |||
178 | spin_unlock(&zhdr->page_lock); | 191 | spin_unlock(&zhdr->page_lock); |
179 | } | 192 | } |
180 | 193 | ||
181 | |||
182 | /* | 194 | /* |
183 | * Encodes the handle of a particular buddy within a z3fold page | 195 | * Encodes the handle of a particular buddy within a z3fold page |
184 | * Pool lock should be held as this function accesses first_num | 196 | * Pool lock should be held as this function accesses first_num |
@@ -257,7 +269,6 @@ static struct z3fold_pool *z3fold_create_pool(gfp_t gfp, | |||
257 | spin_lock_init(&pool->lock); | 269 | spin_lock_init(&pool->lock); |
258 | for_each_unbuddied_list(i, 0) | 270 | for_each_unbuddied_list(i, 0) |
259 | INIT_LIST_HEAD(&pool->unbuddied[i]); | 271 | INIT_LIST_HEAD(&pool->unbuddied[i]); |
260 | INIT_LIST_HEAD(&pool->buddied); | ||
261 | INIT_LIST_HEAD(&pool->lru); | 272 | INIT_LIST_HEAD(&pool->lru); |
262 | atomic64_set(&pool->pages_nr, 0); | 273 | atomic64_set(&pool->pages_nr, 0); |
263 | pool->ops = ops; | 274 | pool->ops = ops; |
@@ -378,6 +389,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, | |||
378 | spin_unlock(&pool->lock); | 389 | spin_unlock(&pool->lock); |
379 | continue; | 390 | continue; |
380 | } | 391 | } |
392 | kref_get(&zhdr->refcount); | ||
381 | list_del_init(&zhdr->buddy); | 393 | list_del_init(&zhdr->buddy); |
382 | spin_unlock(&pool->lock); | 394 | spin_unlock(&pool->lock); |
383 | 395 | ||
@@ -394,10 +406,12 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, | |||
394 | else if (zhdr->middle_chunks == 0) | 406 | else if (zhdr->middle_chunks == 0) |
395 | bud = MIDDLE; | 407 | bud = MIDDLE; |
396 | else { | 408 | else { |
409 | z3fold_page_unlock(zhdr); | ||
397 | spin_lock(&pool->lock); | 410 | spin_lock(&pool->lock); |
398 | list_add(&zhdr->buddy, &pool->buddied); | 411 | if (kref_put(&zhdr->refcount, |
412 | release_z3fold_page)) | ||
413 | atomic64_dec(&pool->pages_nr); | ||
399 | spin_unlock(&pool->lock); | 414 | spin_unlock(&pool->lock); |
400 | z3fold_page_unlock(zhdr); | ||
401 | pr_err("No free chunks in unbuddied\n"); | 415 | pr_err("No free chunks in unbuddied\n"); |
402 | WARN_ON(1); | 416 | WARN_ON(1); |
403 | continue; | 417 | continue; |
@@ -438,9 +452,6 @@ found: | |||
438 | /* Add to unbuddied list */ | 452 | /* Add to unbuddied list */ |
439 | freechunks = num_free_chunks(zhdr); | 453 | freechunks = num_free_chunks(zhdr); |
440 | list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); | 454 | list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); |
441 | } else { | ||
442 | /* Add to buddied list */ | ||
443 | list_add(&zhdr->buddy, &pool->buddied); | ||
444 | } | 455 | } |
445 | 456 | ||
446 | headless: | 457 | headless: |
@@ -504,52 +515,29 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) | |||
504 | } | 515 | } |
505 | } | 516 | } |
506 | 517 | ||
507 | if (test_bit(UNDER_RECLAIM, &page->private)) { | 518 | if (bud == HEADLESS) { |
508 | /* z3fold page is under reclaim, reclaim will free */ | ||
509 | if (bud != HEADLESS) | ||
510 | z3fold_page_unlock(zhdr); | ||
511 | return; | ||
512 | } | ||
513 | |||
514 | /* Remove from existing buddy list */ | ||
515 | if (bud != HEADLESS) { | ||
516 | spin_lock(&pool->lock); | ||
517 | /* | ||
518 | * this object may have been removed from its list by | ||
519 | * z3fold_alloc(). In that case we just do nothing, | ||
520 | * z3fold_alloc() will allocate an object and add the page | ||
521 | * to the relevant list. | ||
522 | */ | ||
523 | if (!list_empty(&zhdr->buddy)) { | ||
524 | list_del(&zhdr->buddy); | ||
525 | } else { | ||
526 | spin_unlock(&pool->lock); | ||
527 | z3fold_page_unlock(zhdr); | ||
528 | return; | ||
529 | } | ||
530 | spin_unlock(&pool->lock); | ||
531 | } | ||
532 | |||
533 | if (bud == HEADLESS || | ||
534 | (zhdr->first_chunks == 0 && zhdr->middle_chunks == 0 && | ||
535 | zhdr->last_chunks == 0)) { | ||
536 | /* z3fold page is empty, free */ | ||
537 | spin_lock(&pool->lock); | 519 | spin_lock(&pool->lock); |
538 | list_del(&page->lru); | 520 | list_del(&page->lru); |
539 | spin_unlock(&pool->lock); | 521 | spin_unlock(&pool->lock); |
540 | clear_bit(PAGE_HEADLESS, &page->private); | 522 | free_z3fold_page(page); |
541 | if (bud != HEADLESS) | ||
542 | z3fold_page_unlock(zhdr); | ||
543 | free_z3fold_page(zhdr); | ||
544 | atomic64_dec(&pool->pages_nr); | 523 | atomic64_dec(&pool->pages_nr); |
545 | } else { | 524 | } else { |
546 | z3fold_compact_page(zhdr); | 525 | if (zhdr->first_chunks != 0 || zhdr->middle_chunks != 0 || |
547 | /* Add to the unbuddied list */ | 526 | zhdr->last_chunks != 0) { |
527 | z3fold_compact_page(zhdr); | ||
528 | /* Add to the unbuddied list */ | ||
529 | spin_lock(&pool->lock); | ||
530 | if (!list_empty(&zhdr->buddy)) | ||
531 | list_del(&zhdr->buddy); | ||
532 | freechunks = num_free_chunks(zhdr); | ||
533 | list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); | ||
534 | spin_unlock(&pool->lock); | ||
535 | } | ||
536 | z3fold_page_unlock(zhdr); | ||
548 | spin_lock(&pool->lock); | 537 | spin_lock(&pool->lock); |
549 | freechunks = num_free_chunks(zhdr); | 538 | if (kref_put(&zhdr->refcount, release_z3fold_page)) |
550 | list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); | 539 | atomic64_dec(&pool->pages_nr); |
551 | spin_unlock(&pool->lock); | 540 | spin_unlock(&pool->lock); |
552 | z3fold_page_unlock(zhdr); | ||
553 | } | 541 | } |
554 | 542 | ||
555 | } | 543 | } |
@@ -608,13 +596,13 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) | |||
608 | return -EINVAL; | 596 | return -EINVAL; |
609 | } | 597 | } |
610 | page = list_last_entry(&pool->lru, struct page, lru); | 598 | page = list_last_entry(&pool->lru, struct page, lru); |
611 | list_del(&page->lru); | 599 | list_del_init(&page->lru); |
612 | 600 | ||
613 | /* Protect z3fold page against free */ | ||
614 | set_bit(UNDER_RECLAIM, &page->private); | ||
615 | zhdr = page_address(page); | 601 | zhdr = page_address(page); |
616 | if (!test_bit(PAGE_HEADLESS, &page->private)) { | 602 | if (!test_bit(PAGE_HEADLESS, &page->private)) { |
617 | list_del(&zhdr->buddy); | 603 | if (!list_empty(&zhdr->buddy)) |
604 | list_del_init(&zhdr->buddy); | ||
605 | kref_get(&zhdr->refcount); | ||
618 | spin_unlock(&pool->lock); | 606 | spin_unlock(&pool->lock); |
619 | z3fold_page_lock(zhdr); | 607 | z3fold_page_lock(zhdr); |
620 | /* | 608 | /* |
@@ -655,30 +643,19 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) | |||
655 | goto next; | 643 | goto next; |
656 | } | 644 | } |
657 | next: | 645 | next: |
658 | if (!test_bit(PAGE_HEADLESS, &page->private)) | 646 | if (test_bit(PAGE_HEADLESS, &page->private)) { |
659 | z3fold_page_lock(zhdr); | 647 | if (ret == 0) { |
660 | clear_bit(UNDER_RECLAIM, &page->private); | 648 | free_z3fold_page(page); |
661 | if ((test_bit(PAGE_HEADLESS, &page->private) && ret == 0) || | 649 | return 0; |
662 | (zhdr->first_chunks == 0 && zhdr->last_chunks == 0 && | ||
663 | zhdr->middle_chunks == 0)) { | ||
664 | /* | ||
665 | * All buddies are now free, free the z3fold page and | ||
666 | * return success. | ||
667 | */ | ||
668 | if (!test_and_clear_bit(PAGE_HEADLESS, &page->private)) | ||
669 | z3fold_page_unlock(zhdr); | ||
670 | free_z3fold_page(zhdr); | ||
671 | atomic64_dec(&pool->pages_nr); | ||
672 | return 0; | ||
673 | } else if (!test_bit(PAGE_HEADLESS, &page->private)) { | ||
674 | if (zhdr->first_chunks != 0 && | ||
675 | zhdr->last_chunks != 0 && | ||
676 | zhdr->middle_chunks != 0) { | ||
677 | /* Full, add to buddied list */ | ||
678 | spin_lock(&pool->lock); | ||
679 | list_add(&zhdr->buddy, &pool->buddied); | ||
680 | spin_unlock(&pool->lock); | ||
681 | } else { | 650 | } else { |
651 | spin_lock(&pool->lock); | ||
652 | } | ||
653 | } else { | ||
654 | z3fold_page_lock(zhdr); | ||
655 | if ((zhdr->first_chunks || zhdr->last_chunks || | ||
656 | zhdr->middle_chunks) && | ||
657 | !(zhdr->first_chunks && zhdr->last_chunks && | ||
658 | zhdr->middle_chunks)) { | ||
682 | z3fold_compact_page(zhdr); | 659 | z3fold_compact_page(zhdr); |
683 | /* add to unbuddied list */ | 660 | /* add to unbuddied list */ |
684 | spin_lock(&pool->lock); | 661 | spin_lock(&pool->lock); |
@@ -687,13 +664,19 @@ next: | |||
687 | &pool->unbuddied[freechunks]); | 664 | &pool->unbuddied[freechunks]); |
688 | spin_unlock(&pool->lock); | 665 | spin_unlock(&pool->lock); |
689 | } | 666 | } |
690 | } | ||
691 | |||
692 | if (!test_bit(PAGE_HEADLESS, &page->private)) | ||
693 | z3fold_page_unlock(zhdr); | 667 | z3fold_page_unlock(zhdr); |
668 | spin_lock(&pool->lock); | ||
669 | if (kref_put(&zhdr->refcount, release_z3fold_page)) { | ||
670 | atomic64_dec(&pool->pages_nr); | ||
671 | return 0; | ||
672 | } | ||
673 | } | ||
694 | 674 | ||
695 | spin_lock(&pool->lock); | 675 | /* |
696 | /* add to beginning of LRU */ | 676 | * Add to the beginning of LRU. |
677 | * Pool lock has to be kept here to ensure the page has | ||
678 | * not already been released | ||
679 | */ | ||
697 | list_add(&page->lru, &pool->lru); | 680 | list_add(&page->lru, &pool->lru); |
698 | } | 681 | } |
699 | spin_unlock(&pool->lock); | 682 | spin_unlock(&pool->lock); |