diff options
| -rw-r--r-- | drivers/md/bcache/extents.c | 22 | ||||
| -rw-r--r-- | drivers/md/bcache/sysfs.c | 126 |
2 files changed, 76 insertions, 72 deletions
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index 35887330b49d..3a0de4cf9771 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c | |||
| @@ -308,6 +308,16 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter, | |||
| 308 | return NULL; | 308 | return NULL; |
| 309 | } | 309 | } |
| 310 | 310 | ||
| 311 | static void bch_subtract_dirty(struct bkey *k, | ||
| 312 | struct cache_set *c, | ||
| 313 | uint64_t offset, | ||
| 314 | int sectors) | ||
| 315 | { | ||
| 316 | if (KEY_DIRTY(k)) | ||
| 317 | bcache_dev_sectors_dirty_add(c, KEY_INODE(k), | ||
| 318 | offset, -sectors); | ||
| 319 | } | ||
| 320 | |||
| 311 | static bool bch_extent_insert_fixup(struct btree_keys *b, | 321 | static bool bch_extent_insert_fixup(struct btree_keys *b, |
| 312 | struct bkey *insert, | 322 | struct bkey *insert, |
| 313 | struct btree_iter *iter, | 323 | struct btree_iter *iter, |
| @@ -315,13 +325,6 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, | |||
| 315 | { | 325 | { |
| 316 | struct cache_set *c = container_of(b, struct btree, keys)->c; | 326 | struct cache_set *c = container_of(b, struct btree, keys)->c; |
| 317 | 327 | ||
| 318 | void subtract_dirty(struct bkey *k, uint64_t offset, int sectors) | ||
| 319 | { | ||
| 320 | if (KEY_DIRTY(k)) | ||
| 321 | bcache_dev_sectors_dirty_add(c, KEY_INODE(k), | ||
| 322 | offset, -sectors); | ||
| 323 | } | ||
| 324 | |||
| 325 | uint64_t old_offset; | 328 | uint64_t old_offset; |
| 326 | unsigned old_size, sectors_found = 0; | 329 | unsigned old_size, sectors_found = 0; |
| 327 | 330 | ||
| @@ -398,7 +401,8 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, | |||
| 398 | 401 | ||
| 399 | struct bkey *top; | 402 | struct bkey *top; |
| 400 | 403 | ||
| 401 | subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert)); | 404 | bch_subtract_dirty(k, c, KEY_START(insert), |
| 405 | KEY_SIZE(insert)); | ||
| 402 | 406 | ||
| 403 | if (bkey_written(b, k)) { | 407 | if (bkey_written(b, k)) { |
| 404 | /* | 408 | /* |
| @@ -448,7 +452,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, | |||
| 448 | } | 452 | } |
| 449 | } | 453 | } |
| 450 | 454 | ||
| 451 | subtract_dirty(k, old_offset, old_size - KEY_SIZE(k)); | 455 | bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k)); |
| 452 | } | 456 | } |
| 453 | 457 | ||
| 454 | check_failed: | 458 | check_failed: |
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 89aaa2ef38f9..b3ff57d61dde 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c | |||
| @@ -405,7 +405,7 @@ struct bset_stats_op { | |||
| 405 | struct bset_stats stats; | 405 | struct bset_stats stats; |
| 406 | }; | 406 | }; |
| 407 | 407 | ||
| 408 | static int btree_bset_stats(struct btree_op *b_op, struct btree *b) | 408 | static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b) |
| 409 | { | 409 | { |
| 410 | struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op); | 410 | struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op); |
| 411 | 411 | ||
| @@ -423,7 +423,7 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf) | |||
| 423 | memset(&op, 0, sizeof(op)); | 423 | memset(&op, 0, sizeof(op)); |
| 424 | bch_btree_op_init(&op.op, -1); | 424 | bch_btree_op_init(&op.op, -1); |
| 425 | 425 | ||
| 426 | ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, btree_bset_stats); | 426 | ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats); |
| 427 | if (ret < 0) | 427 | if (ret < 0) |
| 428 | return ret; | 428 | return ret; |
| 429 | 429 | ||
| @@ -441,81 +441,81 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf) | |||
| 441 | op.stats.floats, op.stats.failed); | 441 | op.stats.floats, op.stats.failed); |
| 442 | } | 442 | } |
| 443 | 443 | ||
| 444 | SHOW(__bch_cache_set) | 444 | static unsigned bch_root_usage(struct cache_set *c) |
| 445 | { | 445 | { |
| 446 | unsigned root_usage(struct cache_set *c) | 446 | unsigned bytes = 0; |
| 447 | { | 447 | struct bkey *k; |
| 448 | unsigned bytes = 0; | 448 | struct btree *b; |
| 449 | struct bkey *k; | 449 | struct btree_iter iter; |
| 450 | struct btree *b; | ||
| 451 | struct btree_iter iter; | ||
| 452 | 450 | ||
| 453 | goto lock_root; | 451 | goto lock_root; |
| 454 | 452 | ||
| 455 | do { | 453 | do { |
| 456 | rw_unlock(false, b); | 454 | rw_unlock(false, b); |
| 457 | lock_root: | 455 | lock_root: |
| 458 | b = c->root; | 456 | b = c->root; |
| 459 | rw_lock(false, b, b->level); | 457 | rw_lock(false, b, b->level); |
| 460 | } while (b != c->root); | 458 | } while (b != c->root); |
| 461 | |||
| 462 | for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) | ||
| 463 | bytes += bkey_bytes(k); | ||
| 464 | 459 | ||
| 465 | rw_unlock(false, b); | 460 | for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) |
| 461 | bytes += bkey_bytes(k); | ||
| 466 | 462 | ||
| 467 | return (bytes * 100) / btree_bytes(c); | 463 | rw_unlock(false, b); |
| 468 | } | ||
| 469 | 464 | ||
| 470 | size_t cache_size(struct cache_set *c) | 465 | return (bytes * 100) / btree_bytes(c); |
| 471 | { | 466 | } |
| 472 | size_t ret = 0; | ||
| 473 | struct btree *b; | ||
| 474 | 467 | ||
| 475 | mutex_lock(&c->bucket_lock); | 468 | static size_t bch_cache_size(struct cache_set *c) |
| 476 | list_for_each_entry(b, &c->btree_cache, list) | 469 | { |
| 477 | ret += 1 << (b->keys.page_order + PAGE_SHIFT); | 470 | size_t ret = 0; |
| 471 | struct btree *b; | ||
| 478 | 472 | ||
| 479 | mutex_unlock(&c->bucket_lock); | 473 | mutex_lock(&c->bucket_lock); |
| 480 | return ret; | 474 | list_for_each_entry(b, &c->btree_cache, list) |
| 481 | } | 475 | ret += 1 << (b->keys.page_order + PAGE_SHIFT); |
| 482 | 476 | ||
| 483 | unsigned cache_max_chain(struct cache_set *c) | 477 | mutex_unlock(&c->bucket_lock); |
| 484 | { | 478 | return ret; |
| 485 | unsigned ret = 0; | 479 | } |
| 486 | struct hlist_head *h; | ||
| 487 | 480 | ||
| 488 | mutex_lock(&c->bucket_lock); | 481 | static unsigned bch_cache_max_chain(struct cache_set *c) |
| 482 | { | ||
| 483 | unsigned ret = 0; | ||
| 484 | struct hlist_head *h; | ||
| 489 | 485 | ||
| 490 | for (h = c->bucket_hash; | 486 | mutex_lock(&c->bucket_lock); |
| 491 | h < c->bucket_hash + (1 << BUCKET_HASH_BITS); | ||
| 492 | h++) { | ||
| 493 | unsigned i = 0; | ||
| 494 | struct hlist_node *p; | ||
| 495 | 487 | ||
| 496 | hlist_for_each(p, h) | 488 | for (h = c->bucket_hash; |
| 497 | i++; | 489 | h < c->bucket_hash + (1 << BUCKET_HASH_BITS); |
| 490 | h++) { | ||
| 491 | unsigned i = 0; | ||
| 492 | struct hlist_node *p; | ||
| 498 | 493 | ||
| 499 | ret = max(ret, i); | 494 | hlist_for_each(p, h) |
| 500 | } | 495 | i++; |
| 501 | 496 | ||
| 502 | mutex_unlock(&c->bucket_lock); | 497 | ret = max(ret, i); |
| 503 | return ret; | ||
| 504 | } | 498 | } |
| 505 | 499 | ||
| 506 | unsigned btree_used(struct cache_set *c) | 500 | mutex_unlock(&c->bucket_lock); |
| 507 | { | 501 | return ret; |
| 508 | return div64_u64(c->gc_stats.key_bytes * 100, | 502 | } |
| 509 | (c->gc_stats.nodes ?: 1) * btree_bytes(c)); | ||
| 510 | } | ||
| 511 | 503 | ||
| 512 | unsigned average_key_size(struct cache_set *c) | 504 | static unsigned bch_btree_used(struct cache_set *c) |
| 513 | { | 505 | { |
| 514 | return c->gc_stats.nkeys | 506 | return div64_u64(c->gc_stats.key_bytes * 100, |
| 515 | ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) | 507 | (c->gc_stats.nodes ?: 1) * btree_bytes(c)); |
| 516 | : 0; | 508 | } |
| 517 | } | ||
| 518 | 509 | ||
| 510 | static unsigned bch_average_key_size(struct cache_set *c) | ||
| 511 | { | ||
| 512 | return c->gc_stats.nkeys | ||
| 513 | ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) | ||
| 514 | : 0; | ||
| 515 | } | ||
| 516 | |||
| 517 | SHOW(__bch_cache_set) | ||
| 518 | { | ||
| 519 | struct cache_set *c = container_of(kobj, struct cache_set, kobj); | 519 | struct cache_set *c = container_of(kobj, struct cache_set, kobj); |
| 520 | 520 | ||
| 521 | sysfs_print(synchronous, CACHE_SYNC(&c->sb)); | 521 | sysfs_print(synchronous, CACHE_SYNC(&c->sb)); |
| @@ -523,10 +523,10 @@ lock_root: | |||
| 523 | sysfs_hprint(bucket_size, bucket_bytes(c)); | 523 | sysfs_hprint(bucket_size, bucket_bytes(c)); |
| 524 | sysfs_hprint(block_size, block_bytes(c)); | 524 | sysfs_hprint(block_size, block_bytes(c)); |
| 525 | sysfs_print(tree_depth, c->root->level); | 525 | sysfs_print(tree_depth, c->root->level); |
| 526 | sysfs_print(root_usage_percent, root_usage(c)); | 526 | sysfs_print(root_usage_percent, bch_root_usage(c)); |
| 527 | 527 | ||
| 528 | sysfs_hprint(btree_cache_size, cache_size(c)); | 528 | sysfs_hprint(btree_cache_size, bch_cache_size(c)); |
| 529 | sysfs_print(btree_cache_max_chain, cache_max_chain(c)); | 529 | sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c)); |
| 530 | sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use); | 530 | sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use); |
| 531 | 531 | ||
| 532 | sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms); | 532 | sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms); |
| @@ -534,9 +534,9 @@ lock_root: | |||
| 534 | sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us); | 534 | sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us); |
| 535 | sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us); | 535 | sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us); |
| 536 | 536 | ||
| 537 | sysfs_print(btree_used_percent, btree_used(c)); | 537 | sysfs_print(btree_used_percent, bch_btree_used(c)); |
| 538 | sysfs_print(btree_nodes, c->gc_stats.nodes); | 538 | sysfs_print(btree_nodes, c->gc_stats.nodes); |
| 539 | sysfs_hprint(average_key_size, average_key_size(c)); | 539 | sysfs_hprint(average_key_size, bch_average_key_size(c)); |
| 540 | 540 | ||
| 541 | sysfs_print(cache_read_races, | 541 | sysfs_print(cache_read_races, |
| 542 | atomic_long_read(&c->cache_read_races)); | 542 | atomic_long_read(&c->cache_read_races)); |
