aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-05-11 20:07:26 -0400
committerKent Overstreet <koverstreet@google.com>2013-06-26 20:09:16 -0400
commit444fc0b6b167ed164e7436621a9d095e042644dd (patch)
tree4625e5b019f6f36794268c358fad393248df5bd6
parent6ded34d1a54c046a45db071d3cb7b37bd0a4a31f (diff)
bcache: Initialize sectors_dirty when attaching
Previously, dirty_data wouldn't get initialized until the first garbage collection... which was a bit of a problem for background writeback (as the PD controller keys off of it) and also confusing for users. This is also prep work for making background writeback aware of raid5/6 stripes. Signed-off-by: Kent Overstreet <koverstreet@google.com>
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/btree.c29
-rw-r--r--drivers/md/bcache/super.c1
-rw-r--r--drivers/md/bcache/writeback.c36
4 files changed, 39 insertions, 29 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 6fa5a1e33c49..d099d8894c2f 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -438,7 +438,6 @@ struct bcache_device {
438 atomic_t detaching; 438 atomic_t detaching;
439 439
440 atomic_long_t sectors_dirty; 440 atomic_long_t sectors_dirty;
441 unsigned long sectors_dirty_gc;
442 unsigned long sectors_dirty_last; 441 unsigned long sectors_dirty_last;
443 long sectors_dirty_derivative; 442 long sectors_dirty_derivative;
444 443
@@ -1225,6 +1224,7 @@ void bch_cache_set_stop(struct cache_set *);
1225struct cache_set *bch_cache_set_alloc(struct cache_sb *); 1224struct cache_set *bch_cache_set_alloc(struct cache_sb *);
1226void bch_btree_cache_free(struct cache_set *); 1225void bch_btree_cache_free(struct cache_set *);
1227int bch_btree_cache_alloc(struct cache_set *); 1226int bch_btree_cache_alloc(struct cache_set *);
1227void bch_sectors_dirty_init(struct cached_dev *);
1228void bch_cached_dev_writeback_init(struct cached_dev *); 1228void bch_cached_dev_writeback_init(struct cached_dev *);
1229void bch_moving_init_cache_set(struct cache_set *); 1229void bch_moving_init_cache_set(struct cache_set *);
1230 1230
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 53a0f4ef4e32..230c3a6d9be2 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1119,11 +1119,8 @@ static int btree_gc_mark_node(struct btree *b, unsigned *keys,
1119 gc->nkeys++; 1119 gc->nkeys++;
1120 1120
1121 gc->data += KEY_SIZE(k); 1121 gc->data += KEY_SIZE(k);
1122 if (KEY_DIRTY(k)) { 1122 if (KEY_DIRTY(k))
1123 gc->dirty += KEY_SIZE(k); 1123 gc->dirty += KEY_SIZE(k);
1124 if (d)
1125 d->sectors_dirty_gc += KEY_SIZE(k);
1126 }
1127 } 1124 }
1128 1125
1129 for (t = b->sets; t <= &b->sets[b->nsets]; t++) 1126 for (t = b->sets; t <= &b->sets[b->nsets]; t++)
@@ -1377,7 +1374,6 @@ static void btree_gc_start(struct cache_set *c)
1377{ 1374{
1378 struct cache *ca; 1375 struct cache *ca;
1379 struct bucket *b; 1376 struct bucket *b;
1380 struct bcache_device **d;
1381 unsigned i; 1377 unsigned i;
1382 1378
1383 if (!c->gc_mark_valid) 1379 if (!c->gc_mark_valid)
@@ -1395,12 +1391,6 @@ static void btree_gc_start(struct cache_set *c)
1395 SET_GC_MARK(b, GC_MARK_RECLAIMABLE); 1391 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
1396 } 1392 }
1397 1393
1398 for (d = c->devices;
1399 d < c->devices + c->nr_uuids;
1400 d++)
1401 if (*d)
1402 (*d)->sectors_dirty_gc = 0;
1403
1404 mutex_unlock(&c->bucket_lock); 1394 mutex_unlock(&c->bucket_lock);
1405} 1395}
1406 1396
@@ -1409,7 +1399,6 @@ size_t bch_btree_gc_finish(struct cache_set *c)
1409 size_t available = 0; 1399 size_t available = 0;
1410 struct bucket *b; 1400 struct bucket *b;
1411 struct cache *ca; 1401 struct cache *ca;
1412 struct bcache_device **d;
1413 unsigned i; 1402 unsigned i;
1414 1403
1415 mutex_lock(&c->bucket_lock); 1404 mutex_lock(&c->bucket_lock);
@@ -1452,22 +1441,6 @@ size_t bch_btree_gc_finish(struct cache_set *c)
1452 } 1441 }
1453 } 1442 }
1454 1443
1455 for (d = c->devices;
1456 d < c->devices + c->nr_uuids;
1457 d++)
1458 if (*d) {
1459 unsigned long last =
1460 atomic_long_read(&((*d)->sectors_dirty));
1461 long difference = (*d)->sectors_dirty_gc - last;
1462
1463 pr_debug("sectors dirty off by %li", difference);
1464
1465 (*d)->sectors_dirty_last += difference;
1466
1467 atomic_long_set(&((*d)->sectors_dirty),
1468 (*d)->sectors_dirty_gc);
1469 }
1470
1471 mutex_unlock(&c->bucket_lock); 1444 mutex_unlock(&c->bucket_lock);
1472 return available; 1445 return available;
1473} 1446}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 3c8474161e8e..dbfa1c38e85e 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -961,6 +961,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
961 atomic_set(&dc->count, 1); 961 atomic_set(&dc->count, 1);
962 962
963 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 963 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
964 bch_sectors_dirty_init(dc);
964 atomic_set(&dc->has_dirty, 1); 965 atomic_set(&dc->has_dirty, 1);
965 atomic_inc(&dc->count); 966 atomic_inc(&dc->count);
966 bch_writeback_queue(dc); 967 bch_writeback_queue(dc);
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 82f6d4577be2..553949eefd51 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -377,6 +377,42 @@ err:
377 refill_dirty(cl); 377 refill_dirty(cl);
378} 378}
379 379
380/* Init */
381
382static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op,
383 struct cached_dev *dc)
384{
385 struct bkey *k;
386 struct btree_iter iter;
387
388 bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
389 while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
390 if (!b->level) {
391 if (KEY_INODE(k) > dc->disk.id)
392 break;
393
394 if (KEY_DIRTY(k))
395 atomic_long_add(KEY_SIZE(k),
396 &dc->disk.sectors_dirty);
397 } else {
398 btree(sectors_dirty_init, k, b, op, dc);
399 if (KEY_INODE(k) > dc->disk.id)
400 break;
401
402 cond_resched();
403 }
404
405 return 0;
406}
407
408void bch_sectors_dirty_init(struct cached_dev *dc)
409{
410 struct btree_op op;
411
412 bch_btree_op_init_stack(&op);
413 btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
414}
415
380void bch_cached_dev_writeback_init(struct cached_dev *dc) 416void bch_cached_dev_writeback_init(struct cached_dev *dc)
381{ 417{
382 closure_init_unlocked(&dc->writeback); 418 closure_init_unlocked(&dc->writeback);