diff options
author | Vitaly Wool <vitalywool@gmail.com> | 2017-02-24 17:57:23 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-24 20:46:54 -0500 |
commit | 2f1e5e4d8430f365f979a818f515123a71b640ec (patch) | |
tree | d7e421b9780b1106c9bc29db1d5f451ad4f0b9a3 /mm/z3fold.c | |
parent | 1b096e5ae9f7181c770d59c6895f23a76c63adee (diff) |
z3fold: use per-page spinlock
Most of z3fold operations are in-page, such as modifying z3fold page
header or moving z3fold objects within a page. Taking per-pool spinlock
to protect per-page objects is therefore suboptimal, and the idea of
having a per-page spinlock (or rwlock) has been around for some time.
This patch implements spinlock-based per-page locking mechanism which is
lightweight enough to normally fit ok into the z3fold header.
Link: http://lkml.kernel.org/r/20170131214438.433e0a5fda908337b63206d3@gmail.com
Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
Reviewed-by: Dan Streetman <ddstreet@ieee.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/z3fold.c')
-rw-r--r-- | mm/z3fold.c | 148 |
1 files changed, 106 insertions, 42 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c index be8b56e21c2d..fa91b56dbd19 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c | |||
@@ -51,6 +51,7 @@ enum buddy { | |||
51 | * struct z3fold_header - z3fold page metadata occupying the first chunk of each | 51 | * struct z3fold_header - z3fold page metadata occupying the first chunk of each |
52 | * z3fold page, except for HEADLESS pages | 52 | * z3fold page, except for HEADLESS pages |
53 | * @buddy: links the z3fold page into the relevant list in the pool | 53 | * @buddy: links the z3fold page into the relevant list in the pool |
54 | * @page_lock: per-page lock | ||
54 | * @first_chunks: the size of the first buddy in chunks, 0 if free | 55 | * @first_chunks: the size of the first buddy in chunks, 0 if free |
55 | * @middle_chunks: the size of the middle buddy in chunks, 0 if free | 56 | * @middle_chunks: the size of the middle buddy in chunks, 0 if free |
56 | * @last_chunks: the size of the last buddy in chunks, 0 if free | 57 | * @last_chunks: the size of the last buddy in chunks, 0 if free |
@@ -58,6 +59,7 @@ enum buddy { | |||
58 | */ | 59 | */ |
59 | struct z3fold_header { | 60 | struct z3fold_header { |
60 | struct list_head buddy; | 61 | struct list_head buddy; |
62 | spinlock_t page_lock; | ||
61 | unsigned short first_chunks; | 63 | unsigned short first_chunks; |
62 | unsigned short middle_chunks; | 64 | unsigned short middle_chunks; |
63 | unsigned short last_chunks; | 65 | unsigned short last_chunks; |
@@ -148,6 +150,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page) | |||
148 | clear_bit(PAGE_HEADLESS, &page->private); | 150 | clear_bit(PAGE_HEADLESS, &page->private); |
149 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); | 151 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); |
150 | 152 | ||
153 | spin_lock_init(&zhdr->page_lock); | ||
151 | zhdr->first_chunks = 0; | 154 | zhdr->first_chunks = 0; |
152 | zhdr->middle_chunks = 0; | 155 | zhdr->middle_chunks = 0; |
153 | zhdr->last_chunks = 0; | 156 | zhdr->last_chunks = 0; |
@@ -163,6 +166,19 @@ static void free_z3fold_page(struct z3fold_header *zhdr) | |||
163 | __free_page(virt_to_page(zhdr)); | 166 | __free_page(virt_to_page(zhdr)); |
164 | } | 167 | } |
165 | 168 | ||
169 | /* Lock a z3fold page */ | ||
170 | static inline void z3fold_page_lock(struct z3fold_header *zhdr) | ||
171 | { | ||
172 | spin_lock(&zhdr->page_lock); | ||
173 | } | ||
174 | |||
175 | /* Unlock a z3fold page */ | ||
176 | static inline void z3fold_page_unlock(struct z3fold_header *zhdr) | ||
177 | { | ||
178 | spin_unlock(&zhdr->page_lock); | ||
179 | } | ||
180 | |||
181 | |||
166 | /* | 182 | /* |
167 | * Encodes the handle of a particular buddy within a z3fold page | 183 | * Encodes the handle of a particular buddy within a z3fold page |
168 | * Pool lock should be held as this function accesses first_num | 184 | * Pool lock should be held as this function accesses first_num |
@@ -351,50 +367,60 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, | |||
351 | bud = HEADLESS; | 367 | bud = HEADLESS; |
352 | else { | 368 | else { |
353 | chunks = size_to_chunks(size); | 369 | chunks = size_to_chunks(size); |
354 | spin_lock(&pool->lock); | ||
355 | 370 | ||
356 | /* First, try to find an unbuddied z3fold page. */ | 371 | /* First, try to find an unbuddied z3fold page. */ |
357 | zhdr = NULL; | 372 | zhdr = NULL; |
358 | for_each_unbuddied_list(i, chunks) { | 373 | for_each_unbuddied_list(i, chunks) { |
359 | if (!list_empty(&pool->unbuddied[i])) { | 374 | spin_lock(&pool->lock); |
360 | zhdr = list_first_entry(&pool->unbuddied[i], | 375 | zhdr = list_first_entry_or_null(&pool->unbuddied[i], |
361 | struct z3fold_header, buddy); | 376 | struct z3fold_header, buddy); |
362 | page = virt_to_page(zhdr); | 377 | if (!zhdr) { |
363 | if (zhdr->first_chunks == 0) { | 378 | spin_unlock(&pool->lock); |
364 | if (zhdr->middle_chunks != 0 && | 379 | continue; |
365 | chunks >= zhdr->start_middle) | 380 | } |
366 | bud = LAST; | 381 | list_del_init(&zhdr->buddy); |
367 | else | 382 | spin_unlock(&pool->lock); |
368 | bud = FIRST; | 383 | |
369 | } else if (zhdr->last_chunks == 0) | 384 | page = virt_to_page(zhdr); |
385 | z3fold_page_lock(zhdr); | ||
386 | if (zhdr->first_chunks == 0) { | ||
387 | if (zhdr->middle_chunks != 0 && | ||
388 | chunks >= zhdr->start_middle) | ||
370 | bud = LAST; | 389 | bud = LAST; |
371 | else if (zhdr->middle_chunks == 0) | 390 | else |
372 | bud = MIDDLE; | 391 | bud = FIRST; |
373 | else { | 392 | } else if (zhdr->last_chunks == 0) |
374 | pr_err("No free chunks in unbuddied\n"); | 393 | bud = LAST; |
375 | WARN_ON(1); | 394 | else if (zhdr->middle_chunks == 0) |
376 | continue; | 395 | bud = MIDDLE; |
377 | } | 396 | else { |
378 | list_del(&zhdr->buddy); | 397 | spin_lock(&pool->lock); |
379 | goto found; | 398 | list_add(&zhdr->buddy, &pool->buddied); |
399 | spin_unlock(&pool->lock); | ||
400 | z3fold_page_unlock(zhdr); | ||
401 | pr_err("No free chunks in unbuddied\n"); | ||
402 | WARN_ON(1); | ||
403 | continue; | ||
380 | } | 404 | } |
405 | goto found; | ||
381 | } | 406 | } |
382 | bud = FIRST; | 407 | bud = FIRST; |
383 | spin_unlock(&pool->lock); | ||
384 | } | 408 | } |
385 | 409 | ||
386 | /* Couldn't find unbuddied z3fold page, create new one */ | 410 | /* Couldn't find unbuddied z3fold page, create new one */ |
387 | page = alloc_page(gfp); | 411 | page = alloc_page(gfp); |
388 | if (!page) | 412 | if (!page) |
389 | return -ENOMEM; | 413 | return -ENOMEM; |
390 | spin_lock(&pool->lock); | 414 | |
391 | atomic64_inc(&pool->pages_nr); | 415 | atomic64_inc(&pool->pages_nr); |
392 | zhdr = init_z3fold_page(page); | 416 | zhdr = init_z3fold_page(page); |
393 | 417 | ||
394 | if (bud == HEADLESS) { | 418 | if (bud == HEADLESS) { |
395 | set_bit(PAGE_HEADLESS, &page->private); | 419 | set_bit(PAGE_HEADLESS, &page->private); |
420 | spin_lock(&pool->lock); | ||
396 | goto headless; | 421 | goto headless; |
397 | } | 422 | } |
423 | z3fold_page_lock(zhdr); | ||
398 | 424 | ||
399 | found: | 425 | found: |
400 | if (bud == FIRST) | 426 | if (bud == FIRST) |
@@ -406,6 +432,7 @@ found: | |||
406 | zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; | 432 | zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; |
407 | } | 433 | } |
408 | 434 | ||
435 | spin_lock(&pool->lock); | ||
409 | if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || | 436 | if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || |
410 | zhdr->middle_chunks == 0) { | 437 | zhdr->middle_chunks == 0) { |
411 | /* Add to unbuddied list */ | 438 | /* Add to unbuddied list */ |
@@ -425,6 +452,8 @@ headless: | |||
425 | 452 | ||
426 | *handle = encode_handle(zhdr, bud); | 453 | *handle = encode_handle(zhdr, bud); |
427 | spin_unlock(&pool->lock); | 454 | spin_unlock(&pool->lock); |
455 | if (bud != HEADLESS) | ||
456 | z3fold_page_unlock(zhdr); | ||
428 | 457 | ||
429 | return 0; | 458 | return 0; |
430 | } | 459 | } |
@@ -446,7 +475,6 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) | |||
446 | struct page *page; | 475 | struct page *page; |
447 | enum buddy bud; | 476 | enum buddy bud; |
448 | 477 | ||
449 | spin_lock(&pool->lock); | ||
450 | zhdr = handle_to_z3fold_header(handle); | 478 | zhdr = handle_to_z3fold_header(handle); |
451 | page = virt_to_page(zhdr); | 479 | page = virt_to_page(zhdr); |
452 | 480 | ||
@@ -454,6 +482,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) | |||
454 | /* HEADLESS page stored */ | 482 | /* HEADLESS page stored */ |
455 | bud = HEADLESS; | 483 | bud = HEADLESS; |
456 | } else { | 484 | } else { |
485 | z3fold_page_lock(zhdr); | ||
457 | bud = handle_to_buddy(handle); | 486 | bud = handle_to_buddy(handle); |
458 | 487 | ||
459 | switch (bud) { | 488 | switch (bud) { |
@@ -470,37 +499,59 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) | |||
470 | default: | 499 | default: |
471 | pr_err("%s: unknown bud %d\n", __func__, bud); | 500 | pr_err("%s: unknown bud %d\n", __func__, bud); |
472 | WARN_ON(1); | 501 | WARN_ON(1); |
473 | spin_unlock(&pool->lock); | 502 | z3fold_page_unlock(zhdr); |
474 | return; | 503 | return; |
475 | } | 504 | } |
476 | } | 505 | } |
477 | 506 | ||
478 | if (test_bit(UNDER_RECLAIM, &page->private)) { | 507 | if (test_bit(UNDER_RECLAIM, &page->private)) { |
479 | /* z3fold page is under reclaim, reclaim will free */ | 508 | /* z3fold page is under reclaim, reclaim will free */ |
480 | spin_unlock(&pool->lock); | 509 | if (bud != HEADLESS) |
510 | z3fold_page_unlock(zhdr); | ||
481 | return; | 511 | return; |
482 | } | 512 | } |
483 | 513 | ||
484 | /* Remove from existing buddy list */ | 514 | /* Remove from existing buddy list */ |
485 | if (bud != HEADLESS) | 515 | if (bud != HEADLESS) { |
486 | list_del(&zhdr->buddy); | 516 | spin_lock(&pool->lock); |
517 | /* | ||
518 | * this object may have been removed from its list by | ||
519 | * z3fold_alloc(). In that case we just do nothing, | ||
520 | * z3fold_alloc() will allocate an object and add the page | ||
521 | * to the relevant list. | ||
522 | */ | ||
523 | if (!list_empty(&zhdr->buddy)) { | ||
524 | list_del(&zhdr->buddy); | ||
525 | } else { | ||
526 | spin_unlock(&pool->lock); | ||
527 | z3fold_page_unlock(zhdr); | ||
528 | return; | ||
529 | } | ||
530 | spin_unlock(&pool->lock); | ||
531 | } | ||
487 | 532 | ||
488 | if (bud == HEADLESS || | 533 | if (bud == HEADLESS || |
489 | (zhdr->first_chunks == 0 && zhdr->middle_chunks == 0 && | 534 | (zhdr->first_chunks == 0 && zhdr->middle_chunks == 0 && |
490 | zhdr->last_chunks == 0)) { | 535 | zhdr->last_chunks == 0)) { |
491 | /* z3fold page is empty, free */ | 536 | /* z3fold page is empty, free */ |
537 | spin_lock(&pool->lock); | ||
492 | list_del(&page->lru); | 538 | list_del(&page->lru); |
539 | spin_unlock(&pool->lock); | ||
493 | clear_bit(PAGE_HEADLESS, &page->private); | 540 | clear_bit(PAGE_HEADLESS, &page->private); |
541 | if (bud != HEADLESS) | ||
542 | z3fold_page_unlock(zhdr); | ||
494 | free_z3fold_page(zhdr); | 543 | free_z3fold_page(zhdr); |
495 | atomic64_dec(&pool->pages_nr); | 544 | atomic64_dec(&pool->pages_nr); |
496 | } else { | 545 | } else { |
497 | z3fold_compact_page(zhdr); | 546 | z3fold_compact_page(zhdr); |
498 | /* Add to the unbuddied list */ | 547 | /* Add to the unbuddied list */ |
548 | spin_lock(&pool->lock); | ||
499 | freechunks = num_free_chunks(zhdr); | 549 | freechunks = num_free_chunks(zhdr); |
500 | list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); | 550 | list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); |
551 | spin_unlock(&pool->lock); | ||
552 | z3fold_page_unlock(zhdr); | ||
501 | } | 553 | } |
502 | 554 | ||
503 | spin_unlock(&pool->lock); | ||
504 | } | 555 | } |
505 | 556 | ||
506 | /** | 557 | /** |
@@ -547,12 +598,15 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) | |||
547 | unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; | 598 | unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; |
548 | 599 | ||
549 | spin_lock(&pool->lock); | 600 | spin_lock(&pool->lock); |
550 | if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) || | 601 | if (!pool->ops || !pool->ops->evict || retries == 0) { |
551 | retries == 0) { | ||
552 | spin_unlock(&pool->lock); | 602 | spin_unlock(&pool->lock); |
553 | return -EINVAL; | 603 | return -EINVAL; |
554 | } | 604 | } |
555 | for (i = 0; i < retries; i++) { | 605 | for (i = 0; i < retries; i++) { |
606 | if (list_empty(&pool->lru)) { | ||
607 | spin_unlock(&pool->lock); | ||
608 | return -EINVAL; | ||
609 | } | ||
556 | page = list_last_entry(&pool->lru, struct page, lru); | 610 | page = list_last_entry(&pool->lru, struct page, lru); |
557 | list_del(&page->lru); | 611 | list_del(&page->lru); |
558 | 612 | ||
@@ -561,6 +615,8 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) | |||
561 | zhdr = page_address(page); | 615 | zhdr = page_address(page); |
562 | if (!test_bit(PAGE_HEADLESS, &page->private)) { | 616 | if (!test_bit(PAGE_HEADLESS, &page->private)) { |
563 | list_del(&zhdr->buddy); | 617 | list_del(&zhdr->buddy); |
618 | spin_unlock(&pool->lock); | ||
619 | z3fold_page_lock(zhdr); | ||
564 | /* | 620 | /* |
565 | * We need encode the handles before unlocking, since | 621 | * We need encode the handles before unlocking, since |
566 | * we can race with free that will set | 622 | * we can race with free that will set |
@@ -575,13 +631,13 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) | |||
575 | middle_handle = encode_handle(zhdr, MIDDLE); | 631 | middle_handle = encode_handle(zhdr, MIDDLE); |
576 | if (zhdr->last_chunks) | 632 | if (zhdr->last_chunks) |
577 | last_handle = encode_handle(zhdr, LAST); | 633 | last_handle = encode_handle(zhdr, LAST); |
634 | z3fold_page_unlock(zhdr); | ||
578 | } else { | 635 | } else { |
579 | first_handle = encode_handle(zhdr, HEADLESS); | 636 | first_handle = encode_handle(zhdr, HEADLESS); |
580 | last_handle = middle_handle = 0; | 637 | last_handle = middle_handle = 0; |
638 | spin_unlock(&pool->lock); | ||
581 | } | 639 | } |
582 | 640 | ||
583 | spin_unlock(&pool->lock); | ||
584 | |||
585 | /* Issue the eviction callback(s) */ | 641 | /* Issue the eviction callback(s) */ |
586 | if (middle_handle) { | 642 | if (middle_handle) { |
587 | ret = pool->ops->evict(pool, middle_handle); | 643 | ret = pool->ops->evict(pool, middle_handle); |
@@ -599,7 +655,8 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) | |||
599 | goto next; | 655 | goto next; |
600 | } | 656 | } |
601 | next: | 657 | next: |
602 | spin_lock(&pool->lock); | 658 | if (!test_bit(PAGE_HEADLESS, &page->private)) |
659 | z3fold_page_lock(zhdr); | ||
603 | clear_bit(UNDER_RECLAIM, &page->private); | 660 | clear_bit(UNDER_RECLAIM, &page->private); |
604 | if ((test_bit(PAGE_HEADLESS, &page->private) && ret == 0) || | 661 | if ((test_bit(PAGE_HEADLESS, &page->private) && ret == 0) || |
605 | (zhdr->first_chunks == 0 && zhdr->last_chunks == 0 && | 662 | (zhdr->first_chunks == 0 && zhdr->last_chunks == 0 && |
@@ -608,26 +665,34 @@ next: | |||
608 | * All buddies are now free, free the z3fold page and | 665 | * All buddies are now free, free the z3fold page and |
609 | * return success. | 666 | * return success. |
610 | */ | 667 | */ |
611 | clear_bit(PAGE_HEADLESS, &page->private); | 668 | if (!test_and_clear_bit(PAGE_HEADLESS, &page->private)) |
669 | z3fold_page_unlock(zhdr); | ||
612 | free_z3fold_page(zhdr); | 670 | free_z3fold_page(zhdr); |
613 | atomic64_dec(&pool->pages_nr); | 671 | atomic64_dec(&pool->pages_nr); |
614 | spin_unlock(&pool->lock); | ||
615 | return 0; | 672 | return 0; |
616 | } else if (!test_bit(PAGE_HEADLESS, &page->private)) { | 673 | } else if (!test_bit(PAGE_HEADLESS, &page->private)) { |
617 | if (zhdr->first_chunks != 0 && | 674 | if (zhdr->first_chunks != 0 && |
618 | zhdr->last_chunks != 0 && | 675 | zhdr->last_chunks != 0 && |
619 | zhdr->middle_chunks != 0) { | 676 | zhdr->middle_chunks != 0) { |
620 | /* Full, add to buddied list */ | 677 | /* Full, add to buddied list */ |
678 | spin_lock(&pool->lock); | ||
621 | list_add(&zhdr->buddy, &pool->buddied); | 679 | list_add(&zhdr->buddy, &pool->buddied); |
680 | spin_unlock(&pool->lock); | ||
622 | } else { | 681 | } else { |
623 | z3fold_compact_page(zhdr); | 682 | z3fold_compact_page(zhdr); |
624 | /* add to unbuddied list */ | 683 | /* add to unbuddied list */ |
684 | spin_lock(&pool->lock); | ||
625 | freechunks = num_free_chunks(zhdr); | 685 | freechunks = num_free_chunks(zhdr); |
626 | list_add(&zhdr->buddy, | 686 | list_add(&zhdr->buddy, |
627 | &pool->unbuddied[freechunks]); | 687 | &pool->unbuddied[freechunks]); |
688 | spin_unlock(&pool->lock); | ||
628 | } | 689 | } |
629 | } | 690 | } |
630 | 691 | ||
692 | if (!test_bit(PAGE_HEADLESS, &page->private)) | ||
693 | z3fold_page_unlock(zhdr); | ||
694 | |||
695 | spin_lock(&pool->lock); | ||
631 | /* add to beginning of LRU */ | 696 | /* add to beginning of LRU */ |
632 | list_add(&page->lru, &pool->lru); | 697 | list_add(&page->lru, &pool->lru); |
633 | } | 698 | } |
@@ -652,7 +717,6 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) | |||
652 | void *addr; | 717 | void *addr; |
653 | enum buddy buddy; | 718 | enum buddy buddy; |
654 | 719 | ||
655 | spin_lock(&pool->lock); | ||
656 | zhdr = handle_to_z3fold_header(handle); | 720 | zhdr = handle_to_z3fold_header(handle); |
657 | addr = zhdr; | 721 | addr = zhdr; |
658 | page = virt_to_page(zhdr); | 722 | page = virt_to_page(zhdr); |
@@ -660,6 +724,7 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) | |||
660 | if (test_bit(PAGE_HEADLESS, &page->private)) | 724 | if (test_bit(PAGE_HEADLESS, &page->private)) |
661 | goto out; | 725 | goto out; |
662 | 726 | ||
727 | z3fold_page_lock(zhdr); | ||
663 | buddy = handle_to_buddy(handle); | 728 | buddy = handle_to_buddy(handle); |
664 | switch (buddy) { | 729 | switch (buddy) { |
665 | case FIRST: | 730 | case FIRST: |
@@ -678,8 +743,9 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) | |||
678 | addr = NULL; | 743 | addr = NULL; |
679 | break; | 744 | break; |
680 | } | 745 | } |
746 | |||
747 | z3fold_page_unlock(zhdr); | ||
681 | out: | 748 | out: |
682 | spin_unlock(&pool->lock); | ||
683 | return addr; | 749 | return addr; |
684 | } | 750 | } |
685 | 751 | ||
@@ -694,19 +760,17 @@ static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) | |||
694 | struct page *page; | 760 | struct page *page; |
695 | enum buddy buddy; | 761 | enum buddy buddy; |
696 | 762 | ||
697 | spin_lock(&pool->lock); | ||
698 | zhdr = handle_to_z3fold_header(handle); | 763 | zhdr = handle_to_z3fold_header(handle); |
699 | page = virt_to_page(zhdr); | 764 | page = virt_to_page(zhdr); |
700 | 765 | ||
701 | if (test_bit(PAGE_HEADLESS, &page->private)) { | 766 | if (test_bit(PAGE_HEADLESS, &page->private)) |
702 | spin_unlock(&pool->lock); | ||
703 | return; | 767 | return; |
704 | } | ||
705 | 768 | ||
769 | z3fold_page_lock(zhdr); | ||
706 | buddy = handle_to_buddy(handle); | 770 | buddy = handle_to_buddy(handle); |
707 | if (buddy == MIDDLE) | 771 | if (buddy == MIDDLE) |
708 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); | 772 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); |
709 | spin_unlock(&pool->lock); | 773 | z3fold_page_unlock(zhdr); |
710 | } | 774 | } |
711 | 775 | ||
712 | /** | 776 | /** |