diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-08 14:51:05 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-08 14:51:05 -0400 |
| commit | ebb37277796269da36a8bc5d72ed1e8e1fb7d34b (patch) | |
| tree | 0ded627a62a5cec70b18d12825dd858855c135d3 /drivers/md/bcache/bcache.h | |
| parent | 4de13d7aa8f4d02f4dc99d4609575659f92b3c5a (diff) | |
| parent | f50efd2fdbd9b35b11f5778ed85beb764184bda9 (diff) | |
Merge branch 'for-3.10/drivers' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe:
"It might look big in volume, but when categorized, not a lot of
drivers are touched. The pull request contains:
- mtip32xx fixes from Micron.
- A slew of drbd updates, this time in a nicer series.
- bcache, a flash/ssd caching framework from Kent.
- Fixes for cciss"
* 'for-3.10/drivers' of git://git.kernel.dk/linux-block: (66 commits)
bcache: Use bd_link_disk_holder()
bcache: Allocator cleanup/fixes
cciss: bug fix to prevent cciss from loading in kdump crash kernel
cciss: add cciss_allow_hpsa module parameter
drivers/block/mg_disk.c: add CONFIG_PM_SLEEP to suspend/resume functions
mtip32xx: Workaround for unaligned writes
bcache: Make sure blocksize isn't smaller than device blocksize
bcache: Fix merge_bvec_fn usage for when it modifies the bvm
bcache: Correctly check against BIO_MAX_PAGES
bcache: Hack around stuff that clones up to bi_max_vecs
bcache: Set ra_pages based on backing device's ra_pages
bcache: Take data offset from the bdev superblock.
mtip32xx: mtip32xx: Disable TRIM support
mtip32xx: fix a smatch warning
bcache: Disable broken btree fuzz tester
bcache: Fix a format string overflow
bcache: Fix a minor memory leak on device teardown
bcache: Documentation updates
bcache: Use WARN_ONCE() instead of __WARN()
bcache: Add missing #include <linux/prefetch.h>
...
Diffstat (limited to 'drivers/md/bcache/bcache.h')
| -rw-r--r-- | drivers/md/bcache/bcache.h | 1259 |
1 files changed, 1259 insertions, 0 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h new file mode 100644 index 000000000000..340146d7c17f --- /dev/null +++ b/drivers/md/bcache/bcache.h | |||
| @@ -0,0 +1,1259 @@ | |||
| 1 | #ifndef _BCACHE_H | ||
| 2 | #define _BCACHE_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * SOME HIGH LEVEL CODE DOCUMENTATION: | ||
| 6 | * | ||
| 7 | * Bcache mostly works with cache sets, cache devices, and backing devices. | ||
| 8 | * | ||
| 9 | * Support for multiple cache devices hasn't quite been finished off yet, but | ||
| 10 | * it's about 95% plumbed through. A cache set and its cache devices is sort of | ||
| 11 | * like a md raid array and its component devices. Most of the code doesn't care | ||
| 12 | * about individual cache devices, the main abstraction is the cache set. | ||
| 13 | * | ||
| 14 | * Multiple cache devices is intended to give us the ability to mirror dirty | ||
| 15 | * cached data and metadata, without mirroring clean cached data. | ||
| 16 | * | ||
| 17 | * Backing devices are different, in that they have a lifetime independent of a | ||
| 18 | * cache set. When you register a newly formatted backing device it'll come up | ||
| 19 | * in passthrough mode, and then you can attach and detach a backing device from | ||
| 20 | * a cache set at runtime - while it's mounted and in use. Detaching implicitly | ||
| 21 | * invalidates any cached data for that backing device. | ||
| 22 | * | ||
| 23 | * A cache set can have multiple (many) backing devices attached to it. | ||
| 24 | * | ||
| 25 | * There's also flash only volumes - this is the reason for the distinction | ||
| 26 | * between struct cached_dev and struct bcache_device. A flash only volume | ||
| 27 | * works much like a bcache device that has a backing device, except the | ||
| 28 | * "cached" data is always dirty. The end result is that we get thin | ||
| 29 | * provisioning with very little additional code. | ||
| 30 | * | ||
| 31 | * Flash only volumes work but they're not production ready because the moving | ||
| 32 | * garbage collector needs more work. More on that later. | ||
| 33 | * | ||
| 34 | * BUCKETS/ALLOCATION: | ||
| 35 | * | ||
| 36 | * Bcache is primarily designed for caching, which means that in normal | ||
| 37 | * operation all of our available space will be allocated. Thus, we need an | ||
| 38 | * efficient way of deleting things from the cache so we can write new things to | ||
| 39 | * it. | ||
| 40 | * | ||
| 41 | * To do this, we first divide the cache device up into buckets. A bucket is the | ||
| 42 | * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+ | ||
| 43 | * works efficiently. | ||
| 44 | * | ||
| 45 | * Each bucket has a 16 bit priority, and an 8 bit generation associated with | ||
| 46 | * it. The gens and priorities for all the buckets are stored contiguously and | ||
| 47 | * packed on disk (in a linked list of buckets - aside from the superblock, all | ||
| 48 | * of bcache's metadata is stored in buckets). | ||
| 49 | * | ||
| 50 | * The priority is used to implement an LRU. We reset a bucket's priority when | ||
| 51 | * we allocate it or on cache it, and every so often we decrement the priority | ||
| 52 | * of each bucket. It could be used to implement something more sophisticated, | ||
| 53 | * if anyone ever gets around to it. | ||
| 54 | * | ||
| 55 | * The generation is used for invalidating buckets. Each pointer also has an 8 | ||
| 56 | * bit generation embedded in it; for a pointer to be considered valid, its gen | ||
| 57 | * must match the gen of the bucket it points into. Thus, to reuse a bucket all | ||
| 58 | * we have to do is increment its gen (and write its new gen to disk; we batch | ||
| 59 | * this up). | ||
| 60 | * | ||
| 61 | * Bcache is entirely COW - we never write twice to a bucket, even buckets that | ||
| 62 | * contain metadata (including btree nodes). | ||
| 63 | * | ||
| 64 | * THE BTREE: | ||
| 65 | * | ||
| 66 | * Bcache is in large part design around the btree. | ||
| 67 | * | ||
| 68 | * At a high level, the btree is just an index of key -> ptr tuples. | ||
| 69 | * | ||
| 70 | * Keys represent extents, and thus have a size field. Keys also have a variable | ||
| 71 | * number of pointers attached to them (potentially zero, which is handy for | ||
| 72 | * invalidating the cache). | ||
| 73 | * | ||
| 74 | * The key itself is an inode:offset pair. The inode number corresponds to a | ||
| 75 | * backing device or a flash only volume. The offset is the ending offset of the | ||
| 76 | * extent within the inode - not the starting offset; this makes lookups | ||
| 77 | * slightly more convenient. | ||
| 78 | * | ||
| 79 | * Pointers contain the cache device id, the offset on that device, and an 8 bit | ||
| 80 | * generation number. More on the gen later. | ||
| 81 | * | ||
| 82 | * Index lookups are not fully abstracted - cache lookups in particular are | ||
| 83 | * still somewhat mixed in with the btree code, but things are headed in that | ||
| 84 | * direction. | ||
| 85 | * | ||
| 86 | * Updates are fairly well abstracted, though. There are two different ways of | ||
| 87 | * updating the btree; insert and replace. | ||
| 88 | * | ||
| 89 | * BTREE_INSERT will just take a list of keys and insert them into the btree - | ||
| 90 | * overwriting (possibly only partially) any extents they overlap with. This is | ||
| 91 | * used to update the index after a write. | ||
| 92 | * | ||
| 93 | * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is | ||
| 94 | * overwriting a key that matches another given key. This is used for inserting | ||
| 95 | * data into the cache after a cache miss, and for background writeback, and for | ||
| 96 | * the moving garbage collector. | ||
| 97 | * | ||
| 98 | * There is no "delete" operation; deleting things from the index is | ||
| 99 | * accomplished by either by invalidating pointers (by incrementing a bucket's | ||
| 100 | * gen) or by inserting a key with 0 pointers - which will overwrite anything | ||
| 101 | * previously present at that location in the index. | ||
| 102 | * | ||
| 103 | * This means that there are always stale/invalid keys in the btree. They're | ||
| 104 | * filtered out by the code that iterates through a btree node, and removed when | ||
| 105 | * a btree node is rewritten. | ||
| 106 | * | ||
| 107 | * BTREE NODES: | ||
| 108 | * | ||
| 109 | * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and | ||
| 110 | * free smaller than a bucket - so, that's how big our btree nodes are. | ||
| 111 | * | ||
| 112 | * (If buckets are really big we'll only use part of the bucket for a btree node | ||
| 113 | * - no less than 1/4th - but a bucket still contains no more than a single | ||
| 114 | * btree node. I'd actually like to change this, but for now we rely on the | ||
| 115 | * bucket's gen for deleting btree nodes when we rewrite/split a node.) | ||
| 116 | * | ||
| 117 | * Anyways, btree nodes are big - big enough to be inefficient with a textbook | ||
| 118 | * btree implementation. | ||
| 119 | * | ||
| 120 | * The way this is solved is that btree nodes are internally log structured; we | ||
| 121 | * can append new keys to an existing btree node without rewriting it. This | ||
| 122 | * means each set of keys we write is sorted, but the node is not. | ||
| 123 | * | ||
| 124 | * We maintain this log structure in memory - keeping 1Mb of keys sorted would | ||
| 125 | * be expensive, and we have to distinguish between the keys we have written and | ||
| 126 | * the keys we haven't. So to do a lookup in a btree node, we have to search | ||
| 127 | * each sorted set. But we do merge written sets together lazily, so the cost of | ||
| 128 | * these extra searches is quite low (normally most of the keys in a btree node | ||
| 129 | * will be in one big set, and then there'll be one or two sets that are much | ||
| 130 | * smaller). | ||
| 131 | * | ||
| 132 | * This log structure makes bcache's btree more of a hybrid between a | ||
| 133 | * conventional btree and a compacting data structure, with some of the | ||
| 134 | * advantages of both. | ||
| 135 | * | ||
| 136 | * GARBAGE COLLECTION: | ||
| 137 | * | ||
| 138 | * We can't just invalidate any bucket - it might contain dirty data or | ||
| 139 | * metadata. If it once contained dirty data, other writes might overwrite it | ||
| 140 | * later, leaving no valid pointers into that bucket in the index. | ||
| 141 | * | ||
| 142 | * Thus, the primary purpose of garbage collection is to find buckets to reuse. | ||
| 143 | * It also counts how much valid data it each bucket currently contains, so that | ||
| 144 | * allocation can reuse buckets sooner when they've been mostly overwritten. | ||
| 145 | * | ||
| 146 | * It also does some things that are really internal to the btree | ||
| 147 | * implementation. If a btree node contains pointers that are stale by more than | ||
| 148 | * some threshold, it rewrites the btree node to avoid the bucket's generation | ||
| 149 | * wrapping around. It also merges adjacent btree nodes if they're empty enough. | ||
| 150 | * | ||
| 151 | * THE JOURNAL: | ||
| 152 | * | ||
| 153 | * Bcache's journal is not necessary for consistency; we always strictly | ||
| 154 | * order metadata writes so that the btree and everything else is consistent on | ||
| 155 | * disk in the event of an unclean shutdown, and in fact bcache had writeback | ||
| 156 | * caching (with recovery from unclean shutdown) before journalling was | ||
| 157 | * implemented. | ||
| 158 | * | ||
| 159 | * Rather, the journal is purely a performance optimization; we can't complete a | ||
| 160 | * write until we've updated the index on disk, otherwise the cache would be | ||
| 161 | * inconsistent in the event of an unclean shutdown. This means that without the | ||
| 162 | * journal, on random write workloads we constantly have to update all the leaf | ||
| 163 | * nodes in the btree, and those writes will be mostly empty (appending at most | ||
| 164 | * a few keys each) - highly inefficient in terms of amount of metadata writes, | ||
| 165 | * and it puts more strain on the various btree resorting/compacting code. | ||
| 166 | * | ||
| 167 | * The journal is just a log of keys we've inserted; on startup we just reinsert | ||
| 168 | * all the keys in the open journal entries. That means that when we're updating | ||
| 169 | * a node in the btree, we can wait until a 4k block of keys fills up before | ||
| 170 | * writing them out. | ||
| 171 | * | ||
| 172 | * For simplicity, we only journal updates to leaf nodes; updates to parent | ||
| 173 | * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth | ||
| 174 | * the complexity to deal with journalling them (in particular, journal replay) | ||
| 175 | * - updates to non leaf nodes just happen synchronously (see btree_split()). | ||
| 176 | */ | ||
| 177 | |||
| 178 | #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ | ||
| 179 | |||
| 180 | #include <linux/bio.h> | ||
| 181 | #include <linux/blktrace_api.h> | ||
| 182 | #include <linux/kobject.h> | ||
| 183 | #include <linux/list.h> | ||
| 184 | #include <linux/mutex.h> | ||
| 185 | #include <linux/rbtree.h> | ||
| 186 | #include <linux/rwsem.h> | ||
| 187 | #include <linux/types.h> | ||
| 188 | #include <linux/workqueue.h> | ||
| 189 | |||
| 190 | #include "util.h" | ||
| 191 | #include "closure.h" | ||
| 192 | |||
| 193 | struct bucket { | ||
| 194 | atomic_t pin; | ||
| 195 | uint16_t prio; | ||
| 196 | uint8_t gen; | ||
| 197 | uint8_t disk_gen; | ||
| 198 | uint8_t last_gc; /* Most out of date gen in the btree */ | ||
| 199 | uint8_t gc_gen; | ||
| 200 | uint16_t gc_mark; | ||
| 201 | }; | ||
| 202 | |||
| 203 | /* | ||
| 204 | * I'd use bitfields for these, but I don't trust the compiler not to screw me | ||
| 205 | * as multiple threads touch struct bucket without locking | ||
| 206 | */ | ||
| 207 | |||
| 208 | BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); | ||
| 209 | #define GC_MARK_RECLAIMABLE 0 | ||
| 210 | #define GC_MARK_DIRTY 1 | ||
| 211 | #define GC_MARK_METADATA 2 | ||
| 212 | BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); | ||
| 213 | |||
| 214 | struct bkey { | ||
| 215 | uint64_t high; | ||
| 216 | uint64_t low; | ||
| 217 | uint64_t ptr[]; | ||
| 218 | }; | ||
| 219 | |||
| 220 | /* Enough for a key with 6 pointers */ | ||
| 221 | #define BKEY_PAD 8 | ||
| 222 | |||
| 223 | #define BKEY_PADDED(key) \ | ||
| 224 | union { struct bkey key; uint64_t key ## _pad[BKEY_PAD]; } | ||
| 225 | |||
| 226 | /* Version 0: Cache device | ||
| 227 | * Version 1: Backing device | ||
| 228 | * Version 2: Seed pointer into btree node checksum | ||
| 229 | * Version 3: Cache device with new UUID format | ||
| 230 | * Version 4: Backing device with data offset | ||
| 231 | */ | ||
| 232 | #define BCACHE_SB_VERSION_CDEV 0 | ||
| 233 | #define BCACHE_SB_VERSION_BDEV 1 | ||
| 234 | #define BCACHE_SB_VERSION_CDEV_WITH_UUID 3 | ||
| 235 | #define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4 | ||
| 236 | #define BCACHE_SB_MAX_VERSION 4 | ||
| 237 | |||
| 238 | #define SB_SECTOR 8 | ||
| 239 | #define SB_SIZE 4096 | ||
| 240 | #define SB_LABEL_SIZE 32 | ||
| 241 | #define SB_JOURNAL_BUCKETS 256U | ||
| 242 | /* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */ | ||
| 243 | #define MAX_CACHES_PER_SET 8 | ||
| 244 | |||
| 245 | #define BDEV_DATA_START_DEFAULT 16 /* sectors */ | ||
| 246 | |||
| 247 | struct cache_sb { | ||
| 248 | uint64_t csum; | ||
| 249 | uint64_t offset; /* sector where this sb was written */ | ||
| 250 | uint64_t version; | ||
| 251 | |||
| 252 | uint8_t magic[16]; | ||
| 253 | |||
| 254 | uint8_t uuid[16]; | ||
| 255 | union { | ||
| 256 | uint8_t set_uuid[16]; | ||
| 257 | uint64_t set_magic; | ||
| 258 | }; | ||
| 259 | uint8_t label[SB_LABEL_SIZE]; | ||
| 260 | |||
| 261 | uint64_t flags; | ||
| 262 | uint64_t seq; | ||
| 263 | uint64_t pad[8]; | ||
| 264 | |||
| 265 | union { | ||
| 266 | struct { | ||
| 267 | /* Cache devices */ | ||
| 268 | uint64_t nbuckets; /* device size */ | ||
| 269 | |||
| 270 | uint16_t block_size; /* sectors */ | ||
| 271 | uint16_t bucket_size; /* sectors */ | ||
| 272 | |||
| 273 | uint16_t nr_in_set; | ||
| 274 | uint16_t nr_this_dev; | ||
| 275 | }; | ||
| 276 | struct { | ||
| 277 | /* Backing devices */ | ||
| 278 | uint64_t data_offset; | ||
| 279 | |||
| 280 | /* | ||
| 281 | * block_size from the cache device section is still used by | ||
| 282 | * backing devices, so don't add anything here until we fix | ||
| 283 | * things to not need it for backing devices anymore | ||
| 284 | */ | ||
| 285 | }; | ||
| 286 | }; | ||
| 287 | |||
| 288 | uint32_t last_mount; /* time_t */ | ||
| 289 | |||
| 290 | uint16_t first_bucket; | ||
| 291 | union { | ||
| 292 | uint16_t njournal_buckets; | ||
| 293 | uint16_t keys; | ||
| 294 | }; | ||
| 295 | uint64_t d[SB_JOURNAL_BUCKETS]; /* journal buckets */ | ||
| 296 | }; | ||
| 297 | |||
| 298 | BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1); | ||
| 299 | BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1); | ||
| 300 | BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3); | ||
| 301 | #define CACHE_REPLACEMENT_LRU 0U | ||
| 302 | #define CACHE_REPLACEMENT_FIFO 1U | ||
| 303 | #define CACHE_REPLACEMENT_RANDOM 2U | ||
| 304 | |||
| 305 | BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4); | ||
| 306 | #define CACHE_MODE_WRITETHROUGH 0U | ||
| 307 | #define CACHE_MODE_WRITEBACK 1U | ||
| 308 | #define CACHE_MODE_WRITEAROUND 2U | ||
| 309 | #define CACHE_MODE_NONE 3U | ||
| 310 | BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2); | ||
| 311 | #define BDEV_STATE_NONE 0U | ||
| 312 | #define BDEV_STATE_CLEAN 1U | ||
| 313 | #define BDEV_STATE_DIRTY 2U | ||
| 314 | #define BDEV_STATE_STALE 3U | ||
| 315 | |||
| 316 | /* Version 1: Seed pointer into btree node checksum | ||
| 317 | */ | ||
| 318 | #define BCACHE_BSET_VERSION 1 | ||
| 319 | |||
| 320 | /* | ||
| 321 | * This is the on disk format for btree nodes - a btree node on disk is a list | ||
| 322 | * of these; within each set the keys are sorted | ||
| 323 | */ | ||
| 324 | struct bset { | ||
| 325 | uint64_t csum; | ||
| 326 | uint64_t magic; | ||
| 327 | uint64_t seq; | ||
| 328 | uint32_t version; | ||
| 329 | uint32_t keys; | ||
| 330 | |||
| 331 | union { | ||
| 332 | struct bkey start[0]; | ||
| 333 | uint64_t d[0]; | ||
| 334 | }; | ||
| 335 | }; | ||
| 336 | |||
| 337 | /* | ||
| 338 | * On disk format for priorities and gens - see super.c near prio_write() for | ||
| 339 | * more. | ||
| 340 | */ | ||
| 341 | struct prio_set { | ||
| 342 | uint64_t csum; | ||
| 343 | uint64_t magic; | ||
| 344 | uint64_t seq; | ||
| 345 | uint32_t version; | ||
| 346 | uint32_t pad; | ||
| 347 | |||
| 348 | uint64_t next_bucket; | ||
| 349 | |||
| 350 | struct bucket_disk { | ||
| 351 | uint16_t prio; | ||
| 352 | uint8_t gen; | ||
| 353 | } __attribute((packed)) data[]; | ||
| 354 | }; | ||
| 355 | |||
| 356 | struct uuid_entry { | ||
| 357 | union { | ||
| 358 | struct { | ||
| 359 | uint8_t uuid[16]; | ||
| 360 | uint8_t label[32]; | ||
| 361 | uint32_t first_reg; | ||
| 362 | uint32_t last_reg; | ||
| 363 | uint32_t invalidated; | ||
| 364 | |||
| 365 | uint32_t flags; | ||
| 366 | /* Size of flash only volumes */ | ||
| 367 | uint64_t sectors; | ||
| 368 | }; | ||
| 369 | |||
| 370 | uint8_t pad[128]; | ||
| 371 | }; | ||
| 372 | }; | ||
| 373 | |||
| 374 | BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1); | ||
| 375 | |||
| 376 | #include "journal.h" | ||
| 377 | #include "stats.h" | ||
| 378 | struct search; | ||
| 379 | struct btree; | ||
| 380 | struct keybuf; | ||
| 381 | |||
| 382 | struct keybuf_key { | ||
| 383 | struct rb_node node; | ||
| 384 | BKEY_PADDED(key); | ||
| 385 | void *private; | ||
| 386 | }; | ||
| 387 | |||
| 388 | typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *); | ||
| 389 | |||
| 390 | struct keybuf { | ||
| 391 | keybuf_pred_fn *key_predicate; | ||
| 392 | |||
| 393 | struct bkey last_scanned; | ||
| 394 | spinlock_t lock; | ||
| 395 | |||
| 396 | /* | ||
| 397 | * Beginning and end of range in rb tree - so that we can skip taking | ||
| 398 | * lock and checking the rb tree when we need to check for overlapping | ||
| 399 | * keys. | ||
| 400 | */ | ||
| 401 | struct bkey start; | ||
| 402 | struct bkey end; | ||
| 403 | |||
| 404 | struct rb_root keys; | ||
| 405 | |||
| 406 | #define KEYBUF_NR 100 | ||
| 407 | DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR); | ||
| 408 | }; | ||
| 409 | |||
| 410 | struct bio_split_pool { | ||
| 411 | struct bio_set *bio_split; | ||
| 412 | mempool_t *bio_split_hook; | ||
| 413 | }; | ||
| 414 | |||
| 415 | struct bio_split_hook { | ||
| 416 | struct closure cl; | ||
| 417 | struct bio_split_pool *p; | ||
| 418 | struct bio *bio; | ||
| 419 | bio_end_io_t *bi_end_io; | ||
| 420 | void *bi_private; | ||
| 421 | }; | ||
| 422 | |||
| 423 | struct bcache_device { | ||
| 424 | struct closure cl; | ||
| 425 | |||
| 426 | struct kobject kobj; | ||
| 427 | |||
| 428 | struct cache_set *c; | ||
| 429 | unsigned id; | ||
| 430 | #define BCACHEDEVNAME_SIZE 12 | ||
| 431 | char name[BCACHEDEVNAME_SIZE]; | ||
| 432 | |||
| 433 | struct gendisk *disk; | ||
| 434 | |||
| 435 | /* If nonzero, we're closing */ | ||
| 436 | atomic_t closing; | ||
| 437 | |||
| 438 | /* If nonzero, we're detaching/unregistering from cache set */ | ||
| 439 | atomic_t detaching; | ||
| 440 | |||
| 441 | atomic_long_t sectors_dirty; | ||
| 442 | unsigned long sectors_dirty_gc; | ||
| 443 | unsigned long sectors_dirty_last; | ||
| 444 | long sectors_dirty_derivative; | ||
| 445 | |||
| 446 | mempool_t *unaligned_bvec; | ||
| 447 | struct bio_set *bio_split; | ||
| 448 | |||
| 449 | unsigned data_csum:1; | ||
| 450 | |||
| 451 | int (*cache_miss)(struct btree *, struct search *, | ||
| 452 | struct bio *, unsigned); | ||
| 453 | int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long); | ||
| 454 | |||
| 455 | struct bio_split_pool bio_split_hook; | ||
| 456 | }; | ||
| 457 | |||
| 458 | struct io { | ||
| 459 | /* Used to track sequential IO so it can be skipped */ | ||
| 460 | struct hlist_node hash; | ||
| 461 | struct list_head lru; | ||
| 462 | |||
| 463 | unsigned long jiffies; | ||
| 464 | unsigned sequential; | ||
| 465 | sector_t last; | ||
| 466 | }; | ||
| 467 | |||
| 468 | struct cached_dev { | ||
| 469 | struct list_head list; | ||
| 470 | struct bcache_device disk; | ||
| 471 | struct block_device *bdev; | ||
| 472 | |||
| 473 | struct cache_sb sb; | ||
| 474 | struct bio sb_bio; | ||
| 475 | struct bio_vec sb_bv[1]; | ||
| 476 | struct closure_with_waitlist sb_write; | ||
| 477 | |||
| 478 | /* Refcount on the cache set. Always nonzero when we're caching. */ | ||
| 479 | atomic_t count; | ||
| 480 | struct work_struct detach; | ||
| 481 | |||
| 482 | /* | ||
| 483 | * Device might not be running if it's dirty and the cache set hasn't | ||
| 484 | * showed up yet. | ||
| 485 | */ | ||
| 486 | atomic_t running; | ||
| 487 | |||
| 488 | /* | ||
| 489 | * Writes take a shared lock from start to finish; scanning for dirty | ||
| 490 | * data to refill the rb tree requires an exclusive lock. | ||
| 491 | */ | ||
| 492 | struct rw_semaphore writeback_lock; | ||
| 493 | |||
| 494 | /* | ||
| 495 | * Nonzero, and writeback has a refcount (d->count), iff there is dirty | ||
| 496 | * data in the cache. Protected by writeback_lock; must have an | ||
| 497 | * shared lock to set and exclusive lock to clear. | ||
| 498 | */ | ||
| 499 | atomic_t has_dirty; | ||
| 500 | |||
| 501 | struct ratelimit writeback_rate; | ||
| 502 | struct delayed_work writeback_rate_update; | ||
| 503 | |||
| 504 | /* | ||
| 505 | * Internal to the writeback code, so read_dirty() can keep track of | ||
| 506 | * where it's at. | ||
| 507 | */ | ||
| 508 | sector_t last_read; | ||
| 509 | |||
| 510 | /* Number of writeback bios in flight */ | ||
| 511 | atomic_t in_flight; | ||
| 512 | struct closure_with_timer writeback; | ||
| 513 | struct closure_waitlist writeback_wait; | ||
| 514 | |||
| 515 | struct keybuf writeback_keys; | ||
| 516 | |||
| 517 | /* For tracking sequential IO */ | ||
| 518 | #define RECENT_IO_BITS 7 | ||
| 519 | #define RECENT_IO (1 << RECENT_IO_BITS) | ||
| 520 | struct io io[RECENT_IO]; | ||
| 521 | struct hlist_head io_hash[RECENT_IO + 1]; | ||
| 522 | struct list_head io_lru; | ||
| 523 | spinlock_t io_lock; | ||
| 524 | |||
| 525 | struct cache_accounting accounting; | ||
| 526 | |||
| 527 | /* The rest of this all shows up in sysfs */ | ||
| 528 | unsigned sequential_cutoff; | ||
| 529 | unsigned readahead; | ||
| 530 | |||
| 531 | unsigned sequential_merge:1; | ||
| 532 | unsigned verify:1; | ||
| 533 | |||
| 534 | unsigned writeback_metadata:1; | ||
| 535 | unsigned writeback_running:1; | ||
| 536 | unsigned char writeback_percent; | ||
| 537 | unsigned writeback_delay; | ||
| 538 | |||
| 539 | int writeback_rate_change; | ||
| 540 | int64_t writeback_rate_derivative; | ||
| 541 | uint64_t writeback_rate_target; | ||
| 542 | |||
| 543 | unsigned writeback_rate_update_seconds; | ||
| 544 | unsigned writeback_rate_d_term; | ||
| 545 | unsigned writeback_rate_p_term_inverse; | ||
| 546 | unsigned writeback_rate_d_smooth; | ||
| 547 | }; | ||
| 548 | |||
| 549 | enum alloc_watermarks { | ||
| 550 | WATERMARK_PRIO, | ||
| 551 | WATERMARK_METADATA, | ||
| 552 | WATERMARK_MOVINGGC, | ||
| 553 | WATERMARK_NONE, | ||
| 554 | WATERMARK_MAX | ||
| 555 | }; | ||
| 556 | |||
| 557 | struct cache { | ||
| 558 | struct cache_set *set; | ||
| 559 | struct cache_sb sb; | ||
| 560 | struct bio sb_bio; | ||
| 561 | struct bio_vec sb_bv[1]; | ||
| 562 | |||
| 563 | struct kobject kobj; | ||
| 564 | struct block_device *bdev; | ||
| 565 | |||
| 566 | unsigned watermark[WATERMARK_MAX]; | ||
| 567 | |||
| 568 | struct closure alloc; | ||
| 569 | struct workqueue_struct *alloc_workqueue; | ||
| 570 | |||
| 571 | struct closure prio; | ||
| 572 | struct prio_set *disk_buckets; | ||
| 573 | |||
| 574 | /* | ||
| 575 | * When allocating new buckets, prio_write() gets first dibs - since we | ||
| 576 | * may not be allocate at all without writing priorities and gens. | ||
| 577 | * prio_buckets[] contains the last buckets we wrote priorities to (so | ||
| 578 | * gc can mark them as metadata), prio_next[] contains the buckets | ||
| 579 | * allocated for the next prio write. | ||
| 580 | */ | ||
| 581 | uint64_t *prio_buckets; | ||
| 582 | uint64_t *prio_last_buckets; | ||
| 583 | |||
| 584 | /* | ||
| 585 | * free: Buckets that are ready to be used | ||
| 586 | * | ||
| 587 | * free_inc: Incoming buckets - these are buckets that currently have | ||
| 588 | * cached data in them, and we can't reuse them until after we write | ||
| 589 | * their new gen to disk. After prio_write() finishes writing the new | ||
| 590 | * gens/prios, they'll be moved to the free list (and possibly discarded | ||
| 591 | * in the process) | ||
| 592 | * | ||
| 593 | * unused: GC found nothing pointing into these buckets (possibly | ||
| 594 | * because all the data they contained was overwritten), so we only | ||
| 595 | * need to discard them before they can be moved to the free list. | ||
| 596 | */ | ||
| 597 | DECLARE_FIFO(long, free); | ||
| 598 | DECLARE_FIFO(long, free_inc); | ||
| 599 | DECLARE_FIFO(long, unused); | ||
| 600 | |||
| 601 | size_t fifo_last_bucket; | ||
| 602 | |||
| 603 | /* Allocation stuff: */ | ||
| 604 | struct bucket *buckets; | ||
| 605 | |||
| 606 | DECLARE_HEAP(struct bucket *, heap); | ||
| 607 | |||
| 608 | /* | ||
| 609 | * max(gen - disk_gen) for all buckets. When it gets too big we have to | ||
| 610 | * call prio_write() to keep gens from wrapping. | ||
| 611 | */ | ||
| 612 | uint8_t need_save_prio; | ||
| 613 | unsigned gc_move_threshold; | ||
| 614 | |||
| 615 | /* | ||
| 616 | * If nonzero, we know we aren't going to find any buckets to invalidate | ||
| 617 | * until a gc finishes - otherwise we could pointlessly burn a ton of | ||
| 618 | * cpu | ||
| 619 | */ | ||
| 620 | unsigned invalidate_needs_gc:1; | ||
| 621 | |||
| 622 | bool discard; /* Get rid of? */ | ||
| 623 | |||
| 624 | /* | ||
| 625 | * We preallocate structs for issuing discards to buckets, and keep them | ||
| 626 | * on this list when they're not in use; do_discard() issues discards | ||
| 627 | * whenever there's work to do and is called by free_some_buckets() and | ||
| 628 | * when a discard finishes. | ||
| 629 | */ | ||
| 630 | atomic_t discards_in_flight; | ||
| 631 | struct list_head discards; | ||
| 632 | |||
| 633 | struct journal_device journal; | ||
| 634 | |||
| 635 | /* The rest of this all shows up in sysfs */ | ||
| 636 | #define IO_ERROR_SHIFT 20 | ||
| 637 | atomic_t io_errors; | ||
| 638 | atomic_t io_count; | ||
| 639 | |||
| 640 | atomic_long_t meta_sectors_written; | ||
| 641 | atomic_long_t btree_sectors_written; | ||
| 642 | atomic_long_t sectors_written; | ||
| 643 | |||
| 644 | struct bio_split_pool bio_split_hook; | ||
| 645 | }; | ||
| 646 | |||
| 647 | struct gc_stat { | ||
| 648 | size_t nodes; | ||
| 649 | size_t key_bytes; | ||
| 650 | |||
| 651 | size_t nkeys; | ||
| 652 | uint64_t data; /* sectors */ | ||
| 653 | uint64_t dirty; /* sectors */ | ||
| 654 | unsigned in_use; /* percent */ | ||
| 655 | }; | ||
| 656 | |||
| 657 | /* | ||
| 658 | * Flag bits, for how the cache set is shutting down, and what phase it's at: | ||
| 659 | * | ||
| 660 | * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching | ||
| 661 | * all the backing devices first (their cached data gets invalidated, and they | ||
| 662 | * won't automatically reattach). | ||
| 663 | * | ||
| 664 | * CACHE_SET_STOPPING always gets set first when we're closing down a cache set; | ||
| 665 | * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. | ||
| 666 | * flushing dirty data). | ||
| 667 | * | ||
| 668 | * CACHE_SET_STOPPING_2 gets set at the last phase, when it's time to shut down | ||
| 669 | * the allocation thread. | ||
| 670 | */ | ||
| 671 | #define CACHE_SET_UNREGISTERING 0 | ||
| 672 | #define CACHE_SET_STOPPING 1 | ||
| 673 | #define CACHE_SET_STOPPING_2 2 | ||
| 674 | |||
| 675 | struct cache_set { | ||
| 676 | struct closure cl; | ||
| 677 | |||
| 678 | struct list_head list; | ||
| 679 | struct kobject kobj; | ||
| 680 | struct kobject internal; | ||
| 681 | struct dentry *debug; | ||
| 682 | struct cache_accounting accounting; | ||
| 683 | |||
| 684 | unsigned long flags; | ||
| 685 | |||
| 686 | struct cache_sb sb; | ||
| 687 | |||
| 688 | struct cache *cache[MAX_CACHES_PER_SET]; | ||
| 689 | struct cache *cache_by_alloc[MAX_CACHES_PER_SET]; | ||
| 690 | int caches_loaded; | ||
| 691 | |||
| 692 | struct bcache_device **devices; | ||
| 693 | struct list_head cached_devs; | ||
| 694 | uint64_t cached_dev_sectors; | ||
| 695 | struct closure caching; | ||
| 696 | |||
| 697 | struct closure_with_waitlist sb_write; | ||
| 698 | |||
| 699 | mempool_t *search; | ||
| 700 | mempool_t *bio_meta; | ||
| 701 | struct bio_set *bio_split; | ||
| 702 | |||
| 703 | /* For the btree cache */ | ||
| 704 | struct shrinker shrink; | ||
| 705 | |||
| 706 | /* For the allocator itself */ | ||
| 707 | wait_queue_head_t alloc_wait; | ||
| 708 | |||
| 709 | /* For the btree cache and anything allocation related */ | ||
| 710 | struct mutex bucket_lock; | ||
| 711 | |||
| 712 | /* log2(bucket_size), in sectors */ | ||
| 713 | unsigned short bucket_bits; | ||
| 714 | |||
| 715 | /* log2(block_size), in sectors */ | ||
| 716 | unsigned short block_bits; | ||
| 717 | |||
| 718 | /* | ||
| 719 | * Default number of pages for a new btree node - may be less than a | ||
| 720 | * full bucket | ||
| 721 | */ | ||
| 722 | unsigned btree_pages; | ||
| 723 | |||
| 724 | /* | ||
| 725 | * Lists of struct btrees; lru is the list for structs that have memory | ||
| 726 | * allocated for actual btree node, freed is for structs that do not. | ||
| 727 | * | ||
| 728 | * We never free a struct btree, except on shutdown - we just put it on | ||
| 729 | * the btree_cache_freed list and reuse it later. This simplifies the | ||
| 730 | * code, and it doesn't cost us much memory as the memory usage is | ||
| 731 | * dominated by buffers that hold the actual btree node data and those | ||
| 732 | * can be freed - and the number of struct btrees allocated is | ||
| 733 | * effectively bounded. | ||
| 734 | * | ||
| 735 | * btree_cache_freeable effectively is a small cache - we use it because | ||
| 736 | * high order page allocations can be rather expensive, and it's quite | ||
| 737 | * common to delete and allocate btree nodes in quick succession. It | ||
| 738 | * should never grow past ~2-3 nodes in practice. | ||
| 739 | */ | ||
| 740 | struct list_head btree_cache; | ||
| 741 | struct list_head btree_cache_freeable; | ||
| 742 | struct list_head btree_cache_freed; | ||
| 743 | |||
| 744 | /* Number of elements in btree_cache + btree_cache_freeable lists */ | ||
| 745 | unsigned bucket_cache_used; | ||
| 746 | |||
| 747 | /* | ||
| 748 | * If we need to allocate memory for a new btree node and that | ||
| 749 | * allocation fails, we can cannibalize another node in the btree cache | ||
| 750 | * to satisfy the allocation. However, only one thread can be doing this | ||
| 751 | * at a time, for obvious reasons - try_harder and try_wait are | ||
| 752 | * basically a lock for this that we can wait on asynchronously. The | ||
| 753 | * btree_root() macro releases the lock when it returns. | ||
| 754 | */ | ||
| 755 | struct closure *try_harder; | ||
| 756 | struct closure_waitlist try_wait; | ||
| 757 | uint64_t try_harder_start; | ||
| 758 | |||
| 759 | /* | ||
| 760 | * When we free a btree node, we increment the gen of the bucket the | ||
| 761 | * node is in - but we can't rewrite the prios and gens until we | ||
| 762 | * finished whatever it is we were doing, otherwise after a crash the | ||
| 763 | * btree node would be freed but for say a split, we might not have the | ||
| 764 | * pointers to the new nodes inserted into the btree yet. | ||
| 765 | * | ||
| 766 | * This is a refcount that blocks prio_write() until the new keys are | ||
| 767 | * written. | ||
| 768 | */ | ||
| 769 | atomic_t prio_blocked; | ||
| 770 | struct closure_waitlist bucket_wait; | ||
| 771 | |||
| 772 | /* | ||
| 773 | * For any bio we don't skip we subtract the number of sectors from | ||
| 774 | * rescale; when it hits 0 we rescale all the bucket priorities. | ||
| 775 | */ | ||
| 776 | atomic_t rescale; | ||
| 777 | /* | ||
| 778 | * When we invalidate buckets, we use both the priority and the amount | ||
| 779 | * of good data to determine which buckets to reuse first - to weight | ||
| 780 | * those together consistently we keep track of the smallest nonzero | ||
| 781 | * priority of any bucket. | ||
| 782 | */ | ||
| 783 | uint16_t min_prio; | ||
| 784 | |||
| 785 | /* | ||
| 786 | * max(gen - gc_gen) for all buckets. When it gets too big we have to gc | ||
| 787 | * to keep gens from wrapping around. | ||
| 788 | */ | ||
| 789 | uint8_t need_gc; | ||
| 790 | struct gc_stat gc_stats; | ||
| 791 | size_t nbuckets; | ||
| 792 | |||
| 793 | struct closure_with_waitlist gc; | ||
| 794 | /* Where in the btree gc currently is */ | ||
| 795 | struct bkey gc_done; | ||
| 796 | |||
| 797 | /* | ||
| 798 | * The allocation code needs gc_mark in struct bucket to be correct, but | ||
| 799 | * it's not while a gc is in progress. Protected by bucket_lock. | ||
| 800 | */ | ||
| 801 | int gc_mark_valid; | ||
| 802 | |||
| 803 | /* Counts how many sectors bio_insert has added to the cache */ | ||
| 804 | atomic_t sectors_to_gc; | ||
| 805 | |||
| 806 | struct closure moving_gc; | ||
| 807 | struct closure_waitlist moving_gc_wait; | ||
| 808 | struct keybuf moving_gc_keys; | ||
| 809 | /* Number of moving GC bios in flight */ | ||
| 810 | atomic_t in_flight; | ||
| 811 | |||
| 812 | struct btree *root; | ||
| 813 | |||
| 814 | #ifdef CONFIG_BCACHE_DEBUG | ||
| 815 | struct btree *verify_data; | ||
| 816 | struct mutex verify_lock; | ||
| 817 | #endif | ||
| 818 | |||
| 819 | unsigned nr_uuids; | ||
| 820 | struct uuid_entry *uuids; | ||
| 821 | BKEY_PADDED(uuid_bucket); | ||
| 822 | struct closure_with_waitlist uuid_write; | ||
| 823 | |||
| 824 | /* | ||
| 825 | * A btree node on disk could have too many bsets for an iterator to fit | ||
| 826 | * on the stack - this is a single element mempool for btree_read_work() | ||
| 827 | */ | ||
| 828 | struct mutex fill_lock; | ||
| 829 | struct btree_iter *fill_iter; | ||
| 830 | |||
| 831 | /* | ||
| 832 | * btree_sort() is a merge sort and requires temporary space - single | ||
| 833 | * element mempool | ||
| 834 | */ | ||
| 835 | struct mutex sort_lock; | ||
| 836 | struct bset *sort; | ||
| 837 | |||
| 838 | /* List of buckets we're currently writing data to */ | ||
| 839 | struct list_head data_buckets; | ||
| 840 | spinlock_t data_bucket_lock; | ||
| 841 | |||
| 842 | struct journal journal; | ||
| 843 | |||
| 844 | #define CONGESTED_MAX 1024 | ||
| 845 | unsigned congested_last_us; | ||
| 846 | atomic_t congested; | ||
| 847 | |||
| 848 | /* The rest of this all shows up in sysfs */ | ||
| 849 | unsigned congested_read_threshold_us; | ||
| 850 | unsigned congested_write_threshold_us; | ||
| 851 | |||
| 852 | spinlock_t sort_time_lock; | ||
| 853 | struct time_stats sort_time; | ||
| 854 | struct time_stats btree_gc_time; | ||
| 855 | struct time_stats btree_split_time; | ||
| 856 | spinlock_t btree_read_time_lock; | ||
| 857 | struct time_stats btree_read_time; | ||
| 858 | struct time_stats try_harder_time; | ||
| 859 | |||
| 860 | atomic_long_t cache_read_races; | ||
| 861 | atomic_long_t writeback_keys_done; | ||
| 862 | atomic_long_t writeback_keys_failed; | ||
| 863 | unsigned error_limit; | ||
| 864 | unsigned error_decay; | ||
| 865 | unsigned short journal_delay_ms; | ||
| 866 | unsigned verify:1; | ||
| 867 | unsigned key_merging_disabled:1; | ||
| 868 | unsigned gc_always_rewrite:1; | ||
| 869 | unsigned shrinker_disabled:1; | ||
| 870 | unsigned copy_gc_enabled:1; | ||
| 871 | |||
| 872 | #define BUCKET_HASH_BITS 12 | ||
| 873 | struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; | ||
| 874 | }; | ||
| 875 | |||
| 876 | static inline bool key_merging_disabled(struct cache_set *c) | ||
| 877 | { | ||
| 878 | #ifdef CONFIG_BCACHE_DEBUG | ||
| 879 | return c->key_merging_disabled; | ||
| 880 | #else | ||
| 881 | return 0; | ||
| 882 | #endif | ||
| 883 | } | ||
| 884 | |||
| 885 | static inline bool SB_IS_BDEV(const struct cache_sb *sb) | ||
| 886 | { | ||
| 887 | return sb->version == BCACHE_SB_VERSION_BDEV | ||
| 888 | || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET; | ||
| 889 | } | ||
| 890 | |||
| 891 | struct bbio { | ||
| 892 | unsigned submit_time_us; | ||
| 893 | union { | ||
| 894 | struct bkey key; | ||
| 895 | uint64_t _pad[3]; | ||
| 896 | /* | ||
| 897 | * We only need pad = 3 here because we only ever carry around a | ||
| 898 | * single pointer - i.e. the pointer we're doing io to/from. | ||
| 899 | */ | ||
| 900 | }; | ||
| 901 | struct bio bio; | ||
| 902 | }; | ||
| 903 | |||
| 904 | static inline unsigned local_clock_us(void) | ||
| 905 | { | ||
| 906 | return local_clock() >> 10; | ||
| 907 | } | ||
| 908 | |||
| 909 | #define MAX_BSETS 4U | ||
| 910 | |||
| 911 | #define BTREE_PRIO USHRT_MAX | ||
| 912 | #define INITIAL_PRIO 32768 | ||
| 913 | |||
| 914 | #define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE) | ||
| 915 | #define btree_blocks(b) \ | ||
| 916 | ((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits)) | ||
| 917 | |||
| 918 | #define btree_default_blocks(c) \ | ||
| 919 | ((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits)) | ||
| 920 | |||
| 921 | #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS) | ||
| 922 | #define bucket_bytes(c) ((c)->sb.bucket_size << 9) | ||
| 923 | #define block_bytes(c) ((c)->sb.block_size << 9) | ||
| 924 | |||
| 925 | #define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t)) | ||
| 926 | #define set_bytes(i) __set_bytes(i, i->keys) | ||
| 927 | |||
| 928 | #define __set_blocks(i, k, c) DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c)) | ||
| 929 | #define set_blocks(i, c) __set_blocks(i, (i)->keys, c) | ||
| 930 | |||
| 931 | #define node(i, j) ((struct bkey *) ((i)->d + (j))) | ||
| 932 | #define end(i) node(i, (i)->keys) | ||
| 933 | |||
| 934 | #define index(i, b) \ | ||
| 935 | ((size_t) (((void *) i - (void *) (b)->sets[0].data) / \ | ||
| 936 | block_bytes(b->c))) | ||
| 937 | |||
| 938 | #define btree_data_space(b) (PAGE_SIZE << (b)->page_order) | ||
| 939 | |||
| 940 | #define prios_per_bucket(c) \ | ||
| 941 | ((bucket_bytes(c) - sizeof(struct prio_set)) / \ | ||
| 942 | sizeof(struct bucket_disk)) | ||
| 943 | #define prio_buckets(c) \ | ||
| 944 | DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c)) | ||
| 945 | |||
| 946 | #define JSET_MAGIC 0x245235c1a3625032ULL | ||
| 947 | #define PSET_MAGIC 0x6750e15f87337f91ULL | ||
| 948 | #define BSET_MAGIC 0x90135c78b99e07f5ULL | ||
| 949 | |||
| 950 | #define jset_magic(c) ((c)->sb.set_magic ^ JSET_MAGIC) | ||
| 951 | #define pset_magic(c) ((c)->sb.set_magic ^ PSET_MAGIC) | ||
| 952 | #define bset_magic(c) ((c)->sb.set_magic ^ BSET_MAGIC) | ||
| 953 | |||
| 954 | /* Bkey fields: all units are in sectors */ | ||
| 955 | |||
| 956 | #define KEY_FIELD(name, field, offset, size) \ | ||
| 957 | BITMASK(name, struct bkey, field, offset, size) | ||
| 958 | |||
| 959 | #define PTR_FIELD(name, offset, size) \ | ||
| 960 | static inline uint64_t name(const struct bkey *k, unsigned i) \ | ||
| 961 | { return (k->ptr[i] >> offset) & ~(((uint64_t) ~0) << size); } \ | ||
| 962 | \ | ||
| 963 | static inline void SET_##name(struct bkey *k, unsigned i, uint64_t v)\ | ||
| 964 | { \ | ||
| 965 | k->ptr[i] &= ~(~((uint64_t) ~0 << size) << offset); \ | ||
| 966 | k->ptr[i] |= v << offset; \ | ||
| 967 | } | ||
| 968 | |||
| 969 | KEY_FIELD(KEY_PTRS, high, 60, 3) | ||
| 970 | KEY_FIELD(HEADER_SIZE, high, 58, 2) | ||
| 971 | KEY_FIELD(KEY_CSUM, high, 56, 2) | ||
| 972 | KEY_FIELD(KEY_PINNED, high, 55, 1) | ||
| 973 | KEY_FIELD(KEY_DIRTY, high, 36, 1) | ||
| 974 | |||
| 975 | KEY_FIELD(KEY_SIZE, high, 20, 16) | ||
| 976 | KEY_FIELD(KEY_INODE, high, 0, 20) | ||
| 977 | |||
| 978 | /* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */ | ||
| 979 | |||
| 980 | static inline uint64_t KEY_OFFSET(const struct bkey *k) | ||
| 981 | { | ||
| 982 | return k->low; | ||
| 983 | } | ||
| 984 | |||
| 985 | static inline void SET_KEY_OFFSET(struct bkey *k, uint64_t v) | ||
| 986 | { | ||
| 987 | k->low = v; | ||
| 988 | } | ||
| 989 | |||
| 990 | PTR_FIELD(PTR_DEV, 51, 12) | ||
| 991 | PTR_FIELD(PTR_OFFSET, 8, 43) | ||
| 992 | PTR_FIELD(PTR_GEN, 0, 8) | ||
| 993 | |||
| 994 | #define PTR_CHECK_DEV ((1 << 12) - 1) | ||
| 995 | |||
| 996 | #define PTR(gen, offset, dev) \ | ||
| 997 | ((((uint64_t) dev) << 51) | ((uint64_t) offset) << 8 | gen) | ||
| 998 | |||
| 999 | static inline size_t sector_to_bucket(struct cache_set *c, sector_t s) | ||
| 1000 | { | ||
| 1001 | return s >> c->bucket_bits; | ||
| 1002 | } | ||
| 1003 | |||
| 1004 | static inline sector_t bucket_to_sector(struct cache_set *c, size_t b) | ||
| 1005 | { | ||
| 1006 | return ((sector_t) b) << c->bucket_bits; | ||
| 1007 | } | ||
| 1008 | |||
| 1009 | static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) | ||
| 1010 | { | ||
| 1011 | return s & (c->sb.bucket_size - 1); | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | static inline struct cache *PTR_CACHE(struct cache_set *c, | ||
| 1015 | const struct bkey *k, | ||
| 1016 | unsigned ptr) | ||
| 1017 | { | ||
| 1018 | return c->cache[PTR_DEV(k, ptr)]; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | static inline size_t PTR_BUCKET_NR(struct cache_set *c, | ||
| 1022 | const struct bkey *k, | ||
| 1023 | unsigned ptr) | ||
| 1024 | { | ||
| 1025 | return sector_to_bucket(c, PTR_OFFSET(k, ptr)); | ||
| 1026 | } | ||
| 1027 | |||
| 1028 | static inline struct bucket *PTR_BUCKET(struct cache_set *c, | ||
| 1029 | const struct bkey *k, | ||
| 1030 | unsigned ptr) | ||
| 1031 | { | ||
| 1032 | return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr); | ||
| 1033 | } | ||
| 1034 | |||
| 1035 | /* Btree key macros */ | ||
| 1036 | |||
| 1037 | /* | ||
| 1038 | * The high bit being set is a relic from when we used it to do binary | ||
| 1039 | * searches - it told you where a key started. It's not used anymore, | ||
| 1040 | * and can probably be safely dropped. | ||
| 1041 | */ | ||
| 1042 | #define KEY(dev, sector, len) \ | ||
| 1043 | ((struct bkey) { \ | ||
| 1044 | .high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev), \ | ||
| 1045 | .low = (sector) \ | ||
| 1046 | }) | ||
| 1047 | |||
| 1048 | static inline void bkey_init(struct bkey *k) | ||
| 1049 | { | ||
| 1050 | *k = KEY(0, 0, 0); | ||
| 1051 | } | ||
| 1052 | |||
| 1053 | #define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k)) | ||
| 1054 | #define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0) | ||
| 1055 | #define MAX_KEY KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0) | ||
| 1056 | #define ZERO_KEY KEY(0, 0, 0) | ||
| 1057 | |||
| 1058 | /* | ||
| 1059 | * This is used for various on disk data structures - cache_sb, prio_set, bset, | ||
| 1060 | * jset: The checksum is _always_ the first 8 bytes of these structs | ||
| 1061 | */ | ||
| 1062 | #define csum_set(i) \ | ||
| 1063 | bch_crc64(((void *) (i)) + sizeof(uint64_t), \ | ||
| 1064 | ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t))) | ||
| 1065 | |||
| 1066 | /* Error handling macros */ | ||
| 1067 | |||
| 1068 | #define btree_bug(b, ...) \ | ||
| 1069 | do { \ | ||
| 1070 | if (bch_cache_set_error((b)->c, __VA_ARGS__)) \ | ||
| 1071 | dump_stack(); \ | ||
| 1072 | } while (0) | ||
| 1073 | |||
| 1074 | #define cache_bug(c, ...) \ | ||
| 1075 | do { \ | ||
| 1076 | if (bch_cache_set_error(c, __VA_ARGS__)) \ | ||
| 1077 | dump_stack(); \ | ||
| 1078 | } while (0) | ||
| 1079 | |||
| 1080 | #define btree_bug_on(cond, b, ...) \ | ||
| 1081 | do { \ | ||
| 1082 | if (cond) \ | ||
| 1083 | btree_bug(b, __VA_ARGS__); \ | ||
| 1084 | } while (0) | ||
| 1085 | |||
| 1086 | #define cache_bug_on(cond, c, ...) \ | ||
| 1087 | do { \ | ||
| 1088 | if (cond) \ | ||
| 1089 | cache_bug(c, __VA_ARGS__); \ | ||
| 1090 | } while (0) | ||
| 1091 | |||
| 1092 | #define cache_set_err_on(cond, c, ...) \ | ||
| 1093 | do { \ | ||
| 1094 | if (cond) \ | ||
| 1095 | bch_cache_set_error(c, __VA_ARGS__); \ | ||
| 1096 | } while (0) | ||
| 1097 | |||
| 1098 | /* Looping macros */ | ||
| 1099 | |||
| 1100 | #define for_each_cache(ca, cs, iter) \ | ||
| 1101 | for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++) | ||
| 1102 | |||
| 1103 | #define for_each_bucket(b, ca) \ | ||
| 1104 | for (b = (ca)->buckets + (ca)->sb.first_bucket; \ | ||
| 1105 | b < (ca)->buckets + (ca)->sb.nbuckets; b++) | ||
| 1106 | |||
| 1107 | static inline void __bkey_put(struct cache_set *c, struct bkey *k) | ||
| 1108 | { | ||
| 1109 | unsigned i; | ||
| 1110 | |||
| 1111 | for (i = 0; i < KEY_PTRS(k); i++) | ||
| 1112 | atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | /* Blktrace macros */ | ||
| 1116 | |||
| 1117 | #define blktrace_msg(c, fmt, ...) \ | ||
| 1118 | do { \ | ||
| 1119 | struct request_queue *q = bdev_get_queue(c->bdev); \ | ||
| 1120 | if (q) \ | ||
| 1121 | blk_add_trace_msg(q, fmt, ##__VA_ARGS__); \ | ||
| 1122 | } while (0) | ||
| 1123 | |||
| 1124 | #define blktrace_msg_all(s, fmt, ...) \ | ||
| 1125 | do { \ | ||
| 1126 | struct cache *_c; \ | ||
| 1127 | unsigned i; \ | ||
| 1128 | for_each_cache(_c, (s), i) \ | ||
| 1129 | blktrace_msg(_c, fmt, ##__VA_ARGS__); \ | ||
| 1130 | } while (0) | ||
| 1131 | |||
| 1132 | static inline void cached_dev_put(struct cached_dev *dc) | ||
| 1133 | { | ||
| 1134 | if (atomic_dec_and_test(&dc->count)) | ||
| 1135 | schedule_work(&dc->detach); | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | static inline bool cached_dev_get(struct cached_dev *dc) | ||
| 1139 | { | ||
| 1140 | if (!atomic_inc_not_zero(&dc->count)) | ||
| 1141 | return false; | ||
| 1142 | |||
| 1143 | /* Paired with the mb in cached_dev_attach */ | ||
| 1144 | smp_mb__after_atomic_inc(); | ||
| 1145 | return true; | ||
| 1146 | } | ||
| 1147 | |||
| 1148 | /* | ||
| 1149 | * bucket_gc_gen() returns the difference between the bucket's current gen and | ||
| 1150 | * the oldest gen of any pointer into that bucket in the btree (last_gc). | ||
| 1151 | * | ||
| 1152 | * bucket_disk_gen() returns the difference between the current gen and the gen | ||
| 1153 | * on disk; they're both used to make sure gens don't wrap around. | ||
| 1154 | */ | ||
| 1155 | |||
| 1156 | static inline uint8_t bucket_gc_gen(struct bucket *b) | ||
| 1157 | { | ||
| 1158 | return b->gen - b->last_gc; | ||
| 1159 | } | ||
| 1160 | |||
| 1161 | static inline uint8_t bucket_disk_gen(struct bucket *b) | ||
| 1162 | { | ||
| 1163 | return b->gen - b->disk_gen; | ||
| 1164 | } | ||
| 1165 | |||
| 1166 | #define BUCKET_GC_GEN_MAX 96U | ||
| 1167 | #define BUCKET_DISK_GEN_MAX 64U | ||
| 1168 | |||
| 1169 | #define kobj_attribute_write(n, fn) \ | ||
| 1170 | static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn) | ||
| 1171 | |||
| 1172 | #define kobj_attribute_rw(n, show, store) \ | ||
| 1173 | static struct kobj_attribute ksysfs_##n = \ | ||
| 1174 | __ATTR(n, S_IWUSR|S_IRUSR, show, store) | ||
| 1175 | |||
| 1176 | /* Forward declarations */ | ||
| 1177 | |||
| 1178 | void bch_writeback_queue(struct cached_dev *); | ||
| 1179 | void bch_writeback_add(struct cached_dev *, unsigned); | ||
| 1180 | |||
| 1181 | void bch_count_io_errors(struct cache *, int, const char *); | ||
| 1182 | void bch_bbio_count_io_errors(struct cache_set *, struct bio *, | ||
| 1183 | int, const char *); | ||
| 1184 | void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *); | ||
| 1185 | void bch_bbio_free(struct bio *, struct cache_set *); | ||
| 1186 | struct bio *bch_bbio_alloc(struct cache_set *); | ||
| 1187 | |||
| 1188 | struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *); | ||
| 1189 | void bch_generic_make_request(struct bio *, struct bio_split_pool *); | ||
| 1190 | void __bch_submit_bbio(struct bio *, struct cache_set *); | ||
| 1191 | void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); | ||
| 1192 | |||
| 1193 | uint8_t bch_inc_gen(struct cache *, struct bucket *); | ||
| 1194 | void bch_rescale_priorities(struct cache_set *, int); | ||
| 1195 | bool bch_bucket_add_unused(struct cache *, struct bucket *); | ||
| 1196 | void bch_allocator_thread(struct closure *); | ||
| 1197 | |||
| 1198 | long bch_bucket_alloc(struct cache *, unsigned, struct closure *); | ||
| 1199 | void bch_bucket_free(struct cache_set *, struct bkey *); | ||
| 1200 | |||
| 1201 | int __bch_bucket_alloc_set(struct cache_set *, unsigned, | ||
| 1202 | struct bkey *, int, struct closure *); | ||
| 1203 | int bch_bucket_alloc_set(struct cache_set *, unsigned, | ||
| 1204 | struct bkey *, int, struct closure *); | ||
| 1205 | |||
| 1206 | __printf(2, 3) | ||
| 1207 | bool bch_cache_set_error(struct cache_set *, const char *, ...); | ||
| 1208 | |||
| 1209 | void bch_prio_write(struct cache *); | ||
| 1210 | void bch_write_bdev_super(struct cached_dev *, struct closure *); | ||
| 1211 | |||
| 1212 | extern struct workqueue_struct *bcache_wq, *bch_gc_wq; | ||
| 1213 | extern const char * const bch_cache_modes[]; | ||
| 1214 | extern struct mutex bch_register_lock; | ||
| 1215 | extern struct list_head bch_cache_sets; | ||
| 1216 | |||
| 1217 | extern struct kobj_type bch_cached_dev_ktype; | ||
| 1218 | extern struct kobj_type bch_flash_dev_ktype; | ||
| 1219 | extern struct kobj_type bch_cache_set_ktype; | ||
| 1220 | extern struct kobj_type bch_cache_set_internal_ktype; | ||
| 1221 | extern struct kobj_type bch_cache_ktype; | ||
| 1222 | |||
| 1223 | void bch_cached_dev_release(struct kobject *); | ||
| 1224 | void bch_flash_dev_release(struct kobject *); | ||
| 1225 | void bch_cache_set_release(struct kobject *); | ||
| 1226 | void bch_cache_release(struct kobject *); | ||
| 1227 | |||
| 1228 | int bch_uuid_write(struct cache_set *); | ||
| 1229 | void bcache_write_super(struct cache_set *); | ||
| 1230 | |||
| 1231 | int bch_flash_dev_create(struct cache_set *c, uint64_t size); | ||
| 1232 | |||
| 1233 | int bch_cached_dev_attach(struct cached_dev *, struct cache_set *); | ||
| 1234 | void bch_cached_dev_detach(struct cached_dev *); | ||
| 1235 | void bch_cached_dev_run(struct cached_dev *); | ||
| 1236 | void bcache_device_stop(struct bcache_device *); | ||
| 1237 | |||
| 1238 | void bch_cache_set_unregister(struct cache_set *); | ||
| 1239 | void bch_cache_set_stop(struct cache_set *); | ||
| 1240 | |||
| 1241 | struct cache_set *bch_cache_set_alloc(struct cache_sb *); | ||
| 1242 | void bch_btree_cache_free(struct cache_set *); | ||
| 1243 | int bch_btree_cache_alloc(struct cache_set *); | ||
| 1244 | void bch_writeback_init_cached_dev(struct cached_dev *); | ||
| 1245 | void bch_moving_init_cache_set(struct cache_set *); | ||
| 1246 | |||
| 1247 | void bch_cache_allocator_exit(struct cache *ca); | ||
| 1248 | int bch_cache_allocator_init(struct cache *ca); | ||
| 1249 | |||
| 1250 | void bch_debug_exit(void); | ||
| 1251 | int bch_debug_init(struct kobject *); | ||
| 1252 | void bch_writeback_exit(void); | ||
| 1253 | int bch_writeback_init(void); | ||
| 1254 | void bch_request_exit(void); | ||
| 1255 | int bch_request_init(void); | ||
| 1256 | void bch_btree_exit(void); | ||
| 1257 | int bch_btree_init(void); | ||
| 1258 | |||
| 1259 | #endif /* _BCACHE_H */ | ||
