aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVitaly Wool <vitalywool@gmail.com>2019-05-13 20:22:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 12:47:50 -0400
commit9050cce104c04982fb7b5dea9aee4f66f245d6d3 (patch)
treea1819d628e925c24512a17aea18c65190d348547
parent1c52e6d0681020e2272c0cbce270dd91a215e7d3 (diff)
mm/z3fold.c: introduce helper functions
Patch series "z3fold: support page migration", v2. This patchset implements page migration support and slightly better buddy search. To implement page migration support, z3fold has to move away from the current scheme of handle encoding. i. e. stop encoding page address in handles. Instead, a small per-page structure is created which will contain actual addresses for z3fold objects, while pointers to fields of that structure will be used as handles. Thus, it will be possible to change the underlying addresses to reflect page migration. To support migration itself, 3 callbacks will be implemented: 1: isolation callback: z3fold_page_isolate(): try to isolate the page by removing it from all lists. Pages scheduled for some activity and mapped pages will not be isolated. Return true if isolation was successful or false otherwise 2: migration callback: z3fold_page_migrate(): re-check critical conditions and migrate page contents to the new page provided by the system. Returns 0 on success or negative error code otherwise 3: putback callback: z3fold_page_putback(): put back the page if z3fold_page_migrate() for it failed permanently (i. e. not with -EAGAIN code). To make sure an isolated page doesn't get freed, its kref is incremented in z3fold_page_isolate() and decremented during post-migration compaction, if migration was successful, or by z3fold_page_putback() in the other case. Since the new handle encoding scheme implies slight memory consumption increase, better buddy search (which decreases memory consumption) is included in this patchset. This patch (of 4): Introduce a separate helper function for object allocation, as well as 2 smaller helpers to add a buddy to the list and to get a pointer to the pool from the z3fold header. No functional changes here. Link: http://lkml.kernel.org/r/20190417103633.a4bb770b5bf0fb7e43ce1666@gmail.com Signed-off-by: Vitaly Wool <vitaly.vul@sony.com> Cc: Dan Streetman <ddstreet@ieee.org> Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Cc: Krzysztof Kozlowski <k.kozlowski@samsung.com> Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sonymobile.com> Cc: Uladzislau Rezki <urezki@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/z3fold.c184
1 files changed, 100 insertions, 84 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c
index aee9b0b8d907..7a59875d880c 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -255,10 +255,15 @@ static enum buddy handle_to_buddy(unsigned long handle)
255 return (handle - zhdr->first_num) & BUDDY_MASK; 255 return (handle - zhdr->first_num) & BUDDY_MASK;
256} 256}
257 257
258static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
259{
260 return zhdr->pool;
261}
262
258static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) 263static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
259{ 264{
260 struct page *page = virt_to_page(zhdr); 265 struct page *page = virt_to_page(zhdr);
261 struct z3fold_pool *pool = zhdr->pool; 266 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
262 267
263 WARN_ON(!list_empty(&zhdr->buddy)); 268 WARN_ON(!list_empty(&zhdr->buddy));
264 set_bit(PAGE_STALE, &page->private); 269 set_bit(PAGE_STALE, &page->private);
@@ -295,9 +300,10 @@ static void release_z3fold_page_locked_list(struct kref *ref)
295{ 300{
296 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, 301 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
297 refcount); 302 refcount);
298 spin_lock(&zhdr->pool->lock); 303 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
304 spin_lock(&pool->lock);
299 list_del_init(&zhdr->buddy); 305 list_del_init(&zhdr->buddy);
300 spin_unlock(&zhdr->pool->lock); 306 spin_unlock(&pool->lock);
301 307
302 WARN_ON(z3fold_page_trylock(zhdr)); 308 WARN_ON(z3fold_page_trylock(zhdr));
303 __release_z3fold_page(zhdr, true); 309 __release_z3fold_page(zhdr, true);
@@ -349,6 +355,23 @@ static int num_free_chunks(struct z3fold_header *zhdr)
349 return nfree; 355 return nfree;
350} 356}
351 357
358/* Add to the appropriate unbuddied list */
359static inline void add_to_unbuddied(struct z3fold_pool *pool,
360 struct z3fold_header *zhdr)
361{
362 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
363 zhdr->middle_chunks == 0) {
364 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
365
366 int freechunks = num_free_chunks(zhdr);
367 spin_lock(&pool->lock);
368 list_add(&zhdr->buddy, &unbuddied[freechunks]);
369 spin_unlock(&pool->lock);
370 zhdr->cpu = smp_processor_id();
371 put_cpu_ptr(pool->unbuddied);
372 }
373}
374
352static inline void *mchunk_memmove(struct z3fold_header *zhdr, 375static inline void *mchunk_memmove(struct z3fold_header *zhdr,
353 unsigned short dst_chunk) 376 unsigned short dst_chunk)
354{ 377{
@@ -406,10 +429,8 @@ static int z3fold_compact_page(struct z3fold_header *zhdr)
406 429
407static void do_compact_page(struct z3fold_header *zhdr, bool locked) 430static void do_compact_page(struct z3fold_header *zhdr, bool locked)
408{ 431{
409 struct z3fold_pool *pool = zhdr->pool; 432 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
410 struct page *page; 433 struct page *page;
411 struct list_head *unbuddied;
412 int fchunks;
413 434
414 page = virt_to_page(zhdr); 435 page = virt_to_page(zhdr);
415 if (locked) 436 if (locked)
@@ -430,18 +451,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
430 } 451 }
431 452
432 z3fold_compact_page(zhdr); 453 z3fold_compact_page(zhdr);
433 unbuddied = get_cpu_ptr(pool->unbuddied); 454 add_to_unbuddied(pool, zhdr);
434 fchunks = num_free_chunks(zhdr);
435 if (fchunks < NCHUNKS &&
436 (!zhdr->first_chunks || !zhdr->middle_chunks ||
437 !zhdr->last_chunks)) {
438 /* the page's not completely free and it's unbuddied */
439 spin_lock(&pool->lock);
440 list_add(&zhdr->buddy, &unbuddied[fchunks]);
441 spin_unlock(&pool->lock);
442 zhdr->cpu = smp_processor_id();
443 }
444 put_cpu_ptr(pool->unbuddied);
445 z3fold_page_unlock(zhdr); 455 z3fold_page_unlock(zhdr);
446} 456}
447 457
@@ -453,6 +463,67 @@ static void compact_page_work(struct work_struct *w)
453 do_compact_page(zhdr, false); 463 do_compact_page(zhdr, false);
454} 464}
455 465
466/* returns _locked_ z3fold page header or NULL */
467static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
468 size_t size, bool can_sleep)
469{
470 struct z3fold_header *zhdr = NULL;
471 struct page *page;
472 struct list_head *unbuddied;
473 int chunks = size_to_chunks(size), i;
474
475lookup:
476 /* First, try to find an unbuddied z3fold page. */
477 unbuddied = get_cpu_ptr(pool->unbuddied);
478 for_each_unbuddied_list(i, chunks) {
479 struct list_head *l = &unbuddied[i];
480
481 zhdr = list_first_entry_or_null(READ_ONCE(l),
482 struct z3fold_header, buddy);
483
484 if (!zhdr)
485 continue;
486
487 /* Re-check under lock. */
488 spin_lock(&pool->lock);
489 l = &unbuddied[i];
490 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
491 struct z3fold_header, buddy)) ||
492 !z3fold_page_trylock(zhdr)) {
493 spin_unlock(&pool->lock);
494 zhdr = NULL;
495 put_cpu_ptr(pool->unbuddied);
496 if (can_sleep)
497 cond_resched();
498 goto lookup;
499 }
500 list_del_init(&zhdr->buddy);
501 zhdr->cpu = -1;
502 spin_unlock(&pool->lock);
503
504 page = virt_to_page(zhdr);
505 if (test_bit(NEEDS_COMPACTING, &page->private)) {
506 z3fold_page_unlock(zhdr);
507 zhdr = NULL;
508 put_cpu_ptr(pool->unbuddied);
509 if (can_sleep)
510 cond_resched();
511 goto lookup;
512 }
513
514 /*
515 * this page could not be removed from its unbuddied
516 * list while pool lock was held, and then we've taken
517 * page lock so kref_put could not be called before
518 * we got here, so it's safe to just call kref_get()
519 */
520 kref_get(&zhdr->refcount);
521 break;
522 }
523 put_cpu_ptr(pool->unbuddied);
524
525 return zhdr;
526}
456 527
457/* 528/*
458 * API Functions 529 * API Functions
@@ -546,7 +617,7 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
546static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, 617static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
547 unsigned long *handle) 618 unsigned long *handle)
548{ 619{
549 int chunks = 0, i, freechunks; 620 int chunks = size_to_chunks(size);
550 struct z3fold_header *zhdr = NULL; 621 struct z3fold_header *zhdr = NULL;
551 struct page *page = NULL; 622 struct page *page = NULL;
552 enum buddy bud; 623 enum buddy bud;
@@ -561,56 +632,8 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
561 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) 632 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
562 bud = HEADLESS; 633 bud = HEADLESS;
563 else { 634 else {
564 struct list_head *unbuddied; 635retry:
565 chunks = size_to_chunks(size); 636 zhdr = __z3fold_alloc(pool, size, can_sleep);
566
567lookup:
568 /* First, try to find an unbuddied z3fold page. */
569 unbuddied = get_cpu_ptr(pool->unbuddied);
570 for_each_unbuddied_list(i, chunks) {
571 struct list_head *l = &unbuddied[i];
572
573 zhdr = list_first_entry_or_null(READ_ONCE(l),
574 struct z3fold_header, buddy);
575
576 if (!zhdr)
577 continue;
578
579 /* Re-check under lock. */
580 spin_lock(&pool->lock);
581 l = &unbuddied[i];
582 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
583 struct z3fold_header, buddy)) ||
584 !z3fold_page_trylock(zhdr)) {
585 spin_unlock(&pool->lock);
586 put_cpu_ptr(pool->unbuddied);
587 goto lookup;
588 }
589 list_del_init(&zhdr->buddy);
590 zhdr->cpu = -1;
591 spin_unlock(&pool->lock);
592
593 page = virt_to_page(zhdr);
594 if (test_bit(NEEDS_COMPACTING, &page->private)) {
595 z3fold_page_unlock(zhdr);
596 zhdr = NULL;
597 put_cpu_ptr(pool->unbuddied);
598 if (can_sleep)
599 cond_resched();
600 goto lookup;
601 }
602
603 /*
604 * this page could not be removed from its unbuddied
605 * list while pool lock was held, and then we've taken
606 * page lock so kref_put could not be called before
607 * we got here, so it's safe to just call kref_get()
608 */
609 kref_get(&zhdr->refcount);
610 break;
611 }
612 put_cpu_ptr(pool->unbuddied);
613
614 if (zhdr) { 637 if (zhdr) {
615 if (zhdr->first_chunks == 0) { 638 if (zhdr->first_chunks == 0) {
616 if (zhdr->middle_chunks != 0 && 639 if (zhdr->middle_chunks != 0 &&
@@ -630,8 +653,9 @@ lookup:
630 z3fold_page_unlock(zhdr); 653 z3fold_page_unlock(zhdr);
631 pr_err("No free chunks in unbuddied\n"); 654 pr_err("No free chunks in unbuddied\n");
632 WARN_ON(1); 655 WARN_ON(1);
633 goto lookup; 656 goto retry;
634 } 657 }
658 page = virt_to_page(zhdr);
635 goto found; 659 goto found;
636 } 660 }
637 bud = FIRST; 661 bud = FIRST;
@@ -662,8 +686,12 @@ lookup:
662 if (!page) 686 if (!page)
663 return -ENOMEM; 687 return -ENOMEM;
664 688
665 atomic64_inc(&pool->pages_nr);
666 zhdr = init_z3fold_page(page, pool); 689 zhdr = init_z3fold_page(page, pool);
690 if (!zhdr) {
691 __free_page(page);
692 return -ENOMEM;
693 }
694 atomic64_inc(&pool->pages_nr);
667 695
668 if (bud == HEADLESS) { 696 if (bud == HEADLESS) {
669 set_bit(PAGE_HEADLESS, &page->private); 697 set_bit(PAGE_HEADLESS, &page->private);
@@ -680,19 +708,7 @@ found:
680 zhdr->middle_chunks = chunks; 708 zhdr->middle_chunks = chunks;
681 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; 709 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
682 } 710 }
683 711 add_to_unbuddied(pool, zhdr);
684 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
685 zhdr->middle_chunks == 0) {
686 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
687
688 /* Add to unbuddied list */
689 freechunks = num_free_chunks(zhdr);
690 spin_lock(&pool->lock);
691 list_add(&zhdr->buddy, &unbuddied[freechunks]);
692 spin_unlock(&pool->lock);
693 zhdr->cpu = smp_processor_id();
694 put_cpu_ptr(pool->unbuddied);
695 }
696 712
697headless: 713headless:
698 spin_lock(&pool->lock); 714 spin_lock(&pool->lock);