aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-08-27 20:18:11 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:32 -0400
commit7dc19d5affd71370754a2c3d36b485810eaee7a1 (patch)
tree56b84fb2f7462e81c8340dea6341c30ce247f798 /drivers/md
parent1ab6c4997e04a00c50c6d786c2f046adc0d1f5de (diff)
drivers: convert shrinkers to new count/scan API
Convert the driver shrinkers to the new API. Most changes are compile tested only because I either don't have the hardware or it's staging stuff. FWIW, the md and android code is pretty good, but the rest of it makes me want to claw my eyes out. The amount of broken code I just encountered is mind boggling. I've added comments explaining what is broken, but I fear that some of the code would be best dealt with by being dragged behind the bike shed, burying in mud up to it's neck and then run over repeatedly with a blunt lawn mower. Special mention goes to the zcache/zcache2 drivers. They can't co-exist in the build at the same time, they are under different menu options in menuconfig, they only show up when you've got the right set of mm subsystem options configured and so even compile testing is an exercise in pulling teeth. And that doesn't even take into account the horrible, broken code... [glommer@openvz.org: fixes for i915, android lowmem, zcache, bcache] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Kent Overstreet <koverstreet@google.com> Cc: John Stultz <john.stultz@linaro.org> Cc: David Rientjes <rientjes@google.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/btree.c43
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/dm-bufio.c64
3 files changed, 66 insertions, 43 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index ee372884c405..f9764e61978b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -597,24 +597,19 @@ static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order)
597 return 0; 597 return 0;
598} 598}
599 599
600static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc) 600static unsigned long bch_mca_scan(struct shrinker *shrink,
601 struct shrink_control *sc)
601{ 602{
602 struct cache_set *c = container_of(shrink, struct cache_set, shrink); 603 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
603 struct btree *b, *t; 604 struct btree *b, *t;
604 unsigned long i, nr = sc->nr_to_scan; 605 unsigned long i, nr = sc->nr_to_scan;
606 unsigned long freed = 0;
605 607
606 if (c->shrinker_disabled) 608 if (c->shrinker_disabled)
607 return 0; 609 return SHRINK_STOP;
608 610
609 if (c->try_harder) 611 if (c->try_harder)
610 return 0; 612 return SHRINK_STOP;
611
612 /*
613 * If nr == 0, we're supposed to return the number of items we have
614 * cached. Not allowed to return -1.
615 */
616 if (!nr)
617 return mca_can_free(c) * c->btree_pages;
618 613
619 /* Return -1 if we can't do anything right now */ 614 /* Return -1 if we can't do anything right now */
620 if (sc->gfp_mask & __GFP_WAIT) 615 if (sc->gfp_mask & __GFP_WAIT)
@@ -634,14 +629,14 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
634 629
635 i = 0; 630 i = 0;
636 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { 631 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
637 if (!nr) 632 if (freed >= nr)
638 break; 633 break;
639 634
640 if (++i > 3 && 635 if (++i > 3 &&
641 !mca_reap(b, NULL, 0)) { 636 !mca_reap(b, NULL, 0)) {
642 mca_data_free(b); 637 mca_data_free(b);
643 rw_unlock(true, b); 638 rw_unlock(true, b);
644 --nr; 639 freed++;
645 } 640 }
646 } 641 }
647 642
@@ -652,7 +647,7 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
652 if (list_empty(&c->btree_cache)) 647 if (list_empty(&c->btree_cache))
653 goto out; 648 goto out;
654 649
655 for (i = 0; nr && i < c->bucket_cache_used; i++) { 650 for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
656 b = list_first_entry(&c->btree_cache, struct btree, list); 651 b = list_first_entry(&c->btree_cache, struct btree, list);
657 list_rotate_left(&c->btree_cache); 652 list_rotate_left(&c->btree_cache);
658 653
@@ -661,14 +656,27 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
661 mca_bucket_free(b); 656 mca_bucket_free(b);
662 mca_data_free(b); 657 mca_data_free(b);
663 rw_unlock(true, b); 658 rw_unlock(true, b);
664 --nr; 659 freed++;
665 } else 660 } else
666 b->accessed = 0; 661 b->accessed = 0;
667 } 662 }
668out: 663out:
669 nr = mca_can_free(c) * c->btree_pages;
670 mutex_unlock(&c->bucket_lock); 664 mutex_unlock(&c->bucket_lock);
671 return nr; 665 return freed;
666}
667
668static unsigned long bch_mca_count(struct shrinker *shrink,
669 struct shrink_control *sc)
670{
671 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
672
673 if (c->shrinker_disabled)
674 return 0;
675
676 if (c->try_harder)
677 return 0;
678
679 return mca_can_free(c) * c->btree_pages;
672} 680}
673 681
674void bch_btree_cache_free(struct cache_set *c) 682void bch_btree_cache_free(struct cache_set *c)
@@ -737,7 +745,8 @@ int bch_btree_cache_alloc(struct cache_set *c)
737 c->verify_data = NULL; 745 c->verify_data = NULL;
738#endif 746#endif
739 747
740 c->shrink.shrink = bch_mca_shrink; 748 c->shrink.count_objects = bch_mca_count;
749 c->shrink.scan_objects = bch_mca_scan;
741 c->shrink.seeks = 4; 750 c->shrink.seeks = 4;
742 c->shrink.batch = c->btree_pages * 2; 751 c->shrink.batch = c->btree_pages * 2;
743 register_shrinker(&c->shrink); 752 register_shrinker(&c->shrink);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 12a2c2846f99..4fe6ab2fbe2e 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -556,7 +556,7 @@ STORE(__bch_cache_set)
556 struct shrink_control sc; 556 struct shrink_control sc;
557 sc.gfp_mask = GFP_KERNEL; 557 sc.gfp_mask = GFP_KERNEL;
558 sc.nr_to_scan = strtoul_or_return(buf); 558 sc.nr_to_scan = strtoul_or_return(buf);
559 c->shrink.shrink(&c->shrink, &sc); 559 c->shrink.scan_objects(&c->shrink, &sc);
560 } 560 }
561 561
562 sysfs_strtoul(congested_read_threshold_us, 562 sysfs_strtoul(congested_read_threshold_us,
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 5227e079a6e3..173cbb20d104 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1425,62 +1425,75 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
1425 unsigned long max_jiffies) 1425 unsigned long max_jiffies)
1426{ 1426{
1427 if (jiffies - b->last_accessed < max_jiffies) 1427 if (jiffies - b->last_accessed < max_jiffies)
1428 return 1; 1428 return 0;
1429 1429
1430 if (!(gfp & __GFP_IO)) { 1430 if (!(gfp & __GFP_IO)) {
1431 if (test_bit(B_READING, &b->state) || 1431 if (test_bit(B_READING, &b->state) ||
1432 test_bit(B_WRITING, &b->state) || 1432 test_bit(B_WRITING, &b->state) ||
1433 test_bit(B_DIRTY, &b->state)) 1433 test_bit(B_DIRTY, &b->state))
1434 return 1; 1434 return 0;
1435 } 1435 }
1436 1436
1437 if (b->hold_count) 1437 if (b->hold_count)
1438 return 1; 1438 return 0;
1439 1439
1440 __make_buffer_clean(b); 1440 __make_buffer_clean(b);
1441 __unlink_buffer(b); 1441 __unlink_buffer(b);
1442 __free_buffer_wake(b); 1442 __free_buffer_wake(b);
1443 1443
1444 return 0; 1444 return 1;
1445} 1445}
1446 1446
1447static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, 1447static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1448 struct shrink_control *sc) 1448 gfp_t gfp_mask)
1449{ 1449{
1450 int l; 1450 int l;
1451 struct dm_buffer *b, *tmp; 1451 struct dm_buffer *b, *tmp;
1452 long freed = 0;
1452 1453
1453 for (l = 0; l < LIST_SIZE; l++) { 1454 for (l = 0; l < LIST_SIZE; l++) {
1454 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) 1455 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1455 if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) && 1456 freed += __cleanup_old_buffer(b, gfp_mask, 0);
1456 !--nr_to_scan) 1457 if (!--nr_to_scan)
1457 return; 1458 break;
1459 }
1458 dm_bufio_cond_resched(); 1460 dm_bufio_cond_resched();
1459 } 1461 }
1462 return freed;
1460} 1463}
1461 1464
1462static int shrink(struct shrinker *shrinker, struct shrink_control *sc) 1465static unsigned long
1466dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1463{ 1467{
1464 struct dm_bufio_client *c = 1468 struct dm_bufio_client *c;
1465 container_of(shrinker, struct dm_bufio_client, shrinker); 1469 unsigned long freed;
1466 unsigned long r;
1467 unsigned long nr_to_scan = sc->nr_to_scan;
1468 1470
1471 c = container_of(shrink, struct dm_bufio_client, shrinker);
1469 if (sc->gfp_mask & __GFP_IO) 1472 if (sc->gfp_mask & __GFP_IO)
1470 dm_bufio_lock(c); 1473 dm_bufio_lock(c);
1471 else if (!dm_bufio_trylock(c)) 1474 else if (!dm_bufio_trylock(c))
1472 return !nr_to_scan ? 0 : -1; 1475 return SHRINK_STOP;
1473 1476
1474 if (nr_to_scan) 1477 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1475 __scan(c, nr_to_scan, sc); 1478 dm_bufio_unlock(c);
1479 return freed;
1480}
1476 1481
1477 r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; 1482static unsigned long
1478 if (r > INT_MAX) 1483dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1479 r = INT_MAX; 1484{
1485 struct dm_bufio_client *c;
1486 unsigned long count;
1480 1487
1481 dm_bufio_unlock(c); 1488 c = container_of(shrink, struct dm_bufio_client, shrinker);
1489 if (sc->gfp_mask & __GFP_IO)
1490 dm_bufio_lock(c);
1491 else if (!dm_bufio_trylock(c))
1492 return 0;
1482 1493
1483 return r; 1494 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1495 dm_bufio_unlock(c);
1496 return count;
1484} 1497}
1485 1498
1486/* 1499/*
@@ -1582,7 +1595,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
1582 __cache_size_refresh(); 1595 __cache_size_refresh();
1583 mutex_unlock(&dm_bufio_clients_lock); 1596 mutex_unlock(&dm_bufio_clients_lock);
1584 1597
1585 c->shrinker.shrink = shrink; 1598 c->shrinker.count_objects = dm_bufio_shrink_count;
1599 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1586 c->shrinker.seeks = 1; 1600 c->shrinker.seeks = 1;
1587 c->shrinker.batch = 0; 1601 c->shrinker.batch = 0;
1588 register_shrinker(&c->shrinker); 1602 register_shrinker(&c->shrinker);
@@ -1669,7 +1683,7 @@ static void cleanup_old_buffers(void)
1669 struct dm_buffer *b; 1683 struct dm_buffer *b;
1670 b = list_entry(c->lru[LIST_CLEAN].prev, 1684 b = list_entry(c->lru[LIST_CLEAN].prev,
1671 struct dm_buffer, lru_list); 1685 struct dm_buffer, lru_list);
1672 if (__cleanup_old_buffer(b, 0, max_age * HZ)) 1686 if (!__cleanup_old_buffer(b, 0, max_age * HZ))
1673 break; 1687 break;
1674 dm_bufio_cond_resched(); 1688 dm_bufio_cond_resched();
1675 } 1689 }