diff options
Diffstat (limited to 'drivers/md/bcache/bcache.h')
-rw-r--r-- | drivers/md/bcache/bcache.h | 56 |
1 files changed, 19 insertions, 37 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index d3e15b42a4ab..342ba86c6e4f 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -178,7 +178,6 @@ | |||
178 | #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ | 178 | #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ |
179 | 179 | ||
180 | #include <linux/bio.h> | 180 | #include <linux/bio.h> |
181 | #include <linux/blktrace_api.h> | ||
182 | #include <linux/kobject.h> | 181 | #include <linux/kobject.h> |
183 | #include <linux/list.h> | 182 | #include <linux/list.h> |
184 | #include <linux/mutex.h> | 183 | #include <linux/mutex.h> |
@@ -388,8 +387,6 @@ struct keybuf_key { | |||
388 | typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *); | 387 | typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *); |
389 | 388 | ||
390 | struct keybuf { | 389 | struct keybuf { |
391 | keybuf_pred_fn *key_predicate; | ||
392 | |||
393 | struct bkey last_scanned; | 390 | struct bkey last_scanned; |
394 | spinlock_t lock; | 391 | spinlock_t lock; |
395 | 392 | ||
@@ -438,8 +435,10 @@ struct bcache_device { | |||
438 | /* If nonzero, we're detaching/unregistering from cache set */ | 435 | /* If nonzero, we're detaching/unregistering from cache set */ |
439 | atomic_t detaching; | 436 | atomic_t detaching; |
440 | 437 | ||
441 | atomic_long_t sectors_dirty; | 438 | uint64_t nr_stripes; |
442 | unsigned long sectors_dirty_gc; | 439 | unsigned stripe_size_bits; |
440 | atomic_t *stripe_sectors_dirty; | ||
441 | |||
443 | unsigned long sectors_dirty_last; | 442 | unsigned long sectors_dirty_last; |
444 | long sectors_dirty_derivative; | 443 | long sectors_dirty_derivative; |
445 | 444 | ||
@@ -531,6 +530,7 @@ struct cached_dev { | |||
531 | unsigned sequential_merge:1; | 530 | unsigned sequential_merge:1; |
532 | unsigned verify:1; | 531 | unsigned verify:1; |
533 | 532 | ||
533 | unsigned partial_stripes_expensive:1; | ||
534 | unsigned writeback_metadata:1; | 534 | unsigned writeback_metadata:1; |
535 | unsigned writeback_running:1; | 535 | unsigned writeback_running:1; |
536 | unsigned char writeback_percent; | 536 | unsigned char writeback_percent; |
@@ -565,8 +565,7 @@ struct cache { | |||
565 | 565 | ||
566 | unsigned watermark[WATERMARK_MAX]; | 566 | unsigned watermark[WATERMARK_MAX]; |
567 | 567 | ||
568 | struct closure alloc; | 568 | struct task_struct *alloc_thread; |
569 | struct workqueue_struct *alloc_workqueue; | ||
570 | 569 | ||
571 | struct closure prio; | 570 | struct closure prio; |
572 | struct prio_set *disk_buckets; | 571 | struct prio_set *disk_buckets; |
@@ -703,9 +702,6 @@ struct cache_set { | |||
703 | /* For the btree cache */ | 702 | /* For the btree cache */ |
704 | struct shrinker shrink; | 703 | struct shrinker shrink; |
705 | 704 | ||
706 | /* For the allocator itself */ | ||
707 | wait_queue_head_t alloc_wait; | ||
708 | |||
709 | /* For the btree cache and anything allocation related */ | 705 | /* For the btree cache and anything allocation related */ |
710 | struct mutex bucket_lock; | 706 | struct mutex bucket_lock; |
711 | 707 | ||
@@ -823,10 +819,9 @@ struct cache_set { | |||
823 | 819 | ||
824 | /* | 820 | /* |
825 | * A btree node on disk could have too many bsets for an iterator to fit | 821 | * A btree node on disk could have too many bsets for an iterator to fit |
826 | * on the stack - this is a single element mempool for btree_read_work() | 822 | * on the stack - have to dynamically allocate them |
827 | */ | 823 | */ |
828 | struct mutex fill_lock; | 824 | mempool_t *fill_iter; |
829 | struct btree_iter *fill_iter; | ||
830 | 825 | ||
831 | /* | 826 | /* |
832 | * btree_sort() is a merge sort and requires temporary space - single | 827 | * btree_sort() is a merge sort and requires temporary space - single |
@@ -834,6 +829,7 @@ struct cache_set { | |||
834 | */ | 829 | */ |
835 | struct mutex sort_lock; | 830 | struct mutex sort_lock; |
836 | struct bset *sort; | 831 | struct bset *sort; |
832 | unsigned sort_crit_factor; | ||
837 | 833 | ||
838 | /* List of buckets we're currently writing data to */ | 834 | /* List of buckets we're currently writing data to */ |
839 | struct list_head data_buckets; | 835 | struct list_head data_buckets; |
@@ -906,8 +902,6 @@ static inline unsigned local_clock_us(void) | |||
906 | return local_clock() >> 10; | 902 | return local_clock() >> 10; |
907 | } | 903 | } |
908 | 904 | ||
909 | #define MAX_BSETS 4U | ||
910 | |||
911 | #define BTREE_PRIO USHRT_MAX | 905 | #define BTREE_PRIO USHRT_MAX |
912 | #define INITIAL_PRIO 32768 | 906 | #define INITIAL_PRIO 32768 |
913 | 907 | ||
@@ -1112,23 +1106,6 @@ static inline void __bkey_put(struct cache_set *c, struct bkey *k) | |||
1112 | atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); | 1106 | atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); |
1113 | } | 1107 | } |
1114 | 1108 | ||
1115 | /* Blktrace macros */ | ||
1116 | |||
1117 | #define blktrace_msg(c, fmt, ...) \ | ||
1118 | do { \ | ||
1119 | struct request_queue *q = bdev_get_queue(c->bdev); \ | ||
1120 | if (q) \ | ||
1121 | blk_add_trace_msg(q, fmt, ##__VA_ARGS__); \ | ||
1122 | } while (0) | ||
1123 | |||
1124 | #define blktrace_msg_all(s, fmt, ...) \ | ||
1125 | do { \ | ||
1126 | struct cache *_c; \ | ||
1127 | unsigned i; \ | ||
1128 | for_each_cache(_c, (s), i) \ | ||
1129 | blktrace_msg(_c, fmt, ##__VA_ARGS__); \ | ||
1130 | } while (0) | ||
1131 | |||
1132 | static inline void cached_dev_put(struct cached_dev *dc) | 1109 | static inline void cached_dev_put(struct cached_dev *dc) |
1133 | { | 1110 | { |
1134 | if (atomic_dec_and_test(&dc->count)) | 1111 | if (atomic_dec_and_test(&dc->count)) |
@@ -1173,10 +1150,16 @@ static inline uint8_t bucket_disk_gen(struct bucket *b) | |||
1173 | static struct kobj_attribute ksysfs_##n = \ | 1150 | static struct kobj_attribute ksysfs_##n = \ |
1174 | __ATTR(n, S_IWUSR|S_IRUSR, show, store) | 1151 | __ATTR(n, S_IWUSR|S_IRUSR, show, store) |
1175 | 1152 | ||
1176 | /* Forward declarations */ | 1153 | static inline void wake_up_allocators(struct cache_set *c) |
1154 | { | ||
1155 | struct cache *ca; | ||
1156 | unsigned i; | ||
1157 | |||
1158 | for_each_cache(ca, c, i) | ||
1159 | wake_up_process(ca->alloc_thread); | ||
1160 | } | ||
1177 | 1161 | ||
1178 | void bch_writeback_queue(struct cached_dev *); | 1162 | /* Forward declarations */ |
1179 | void bch_writeback_add(struct cached_dev *, unsigned); | ||
1180 | 1163 | ||
1181 | void bch_count_io_errors(struct cache *, int, const char *); | 1164 | void bch_count_io_errors(struct cache *, int, const char *); |
1182 | void bch_bbio_count_io_errors(struct cache_set *, struct bio *, | 1165 | void bch_bbio_count_io_errors(struct cache_set *, struct bio *, |
@@ -1193,7 +1176,6 @@ void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); | |||
1193 | uint8_t bch_inc_gen(struct cache *, struct bucket *); | 1176 | uint8_t bch_inc_gen(struct cache *, struct bucket *); |
1194 | void bch_rescale_priorities(struct cache_set *, int); | 1177 | void bch_rescale_priorities(struct cache_set *, int); |
1195 | bool bch_bucket_add_unused(struct cache *, struct bucket *); | 1178 | bool bch_bucket_add_unused(struct cache *, struct bucket *); |
1196 | void bch_allocator_thread(struct closure *); | ||
1197 | 1179 | ||
1198 | long bch_bucket_alloc(struct cache *, unsigned, struct closure *); | 1180 | long bch_bucket_alloc(struct cache *, unsigned, struct closure *); |
1199 | void bch_bucket_free(struct cache_set *, struct bkey *); | 1181 | void bch_bucket_free(struct cache_set *, struct bkey *); |
@@ -1241,9 +1223,9 @@ void bch_cache_set_stop(struct cache_set *); | |||
1241 | struct cache_set *bch_cache_set_alloc(struct cache_sb *); | 1223 | struct cache_set *bch_cache_set_alloc(struct cache_sb *); |
1242 | void bch_btree_cache_free(struct cache_set *); | 1224 | void bch_btree_cache_free(struct cache_set *); |
1243 | int bch_btree_cache_alloc(struct cache_set *); | 1225 | int bch_btree_cache_alloc(struct cache_set *); |
1244 | void bch_cached_dev_writeback_init(struct cached_dev *); | ||
1245 | void bch_moving_init_cache_set(struct cache_set *); | 1226 | void bch_moving_init_cache_set(struct cache_set *); |
1246 | 1227 | ||
1228 | int bch_cache_allocator_start(struct cache *ca); | ||
1247 | void bch_cache_allocator_exit(struct cache *ca); | 1229 | void bch_cache_allocator_exit(struct cache *ca); |
1248 | int bch_cache_allocator_init(struct cache *ca); | 1230 | int bch_cache_allocator_init(struct cache *ca); |
1249 | 1231 | ||