diff options
author | Kent Overstreet <koverstreet@google.com> | 2013-03-23 19:11:31 -0400 |
---|---|---|
committer | Kent Overstreet <koverstreet@google.com> | 2013-03-23 19:11:31 -0400 |
commit | cafe563591446cf80bfbc2fe3bc72a2e36cf1060 (patch) | |
tree | c8ae27b13dcdb0219634376ca5e667df32b1173a /drivers/md/bcache/super.c | |
parent | ea6749c705d9e629ed03c7336cc929fc6014b834 (diff) |
bcache: A block layer cache
Does writethrough and writeback caching, handles unclean shutdown, and
has a bunch of other nifty features motivated by real world usage.
See the wiki at http://bcache.evilpiepirate.org for more.
Signed-off-by: Kent Overstreet <koverstreet@google.com>
Diffstat (limited to 'drivers/md/bcache/super.c')
-rw-r--r-- | drivers/md/bcache/super.c | 1941 |
1 files changed, 1941 insertions, 0 deletions
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c new file mode 100644 index 000000000000..31ef47f1f3b6 --- /dev/null +++ b/drivers/md/bcache/super.c | |||
@@ -0,0 +1,1941 @@ | |||
1 | /* | ||
2 | * bcache setup/teardown code, and some metadata io - read a superblock and | ||
3 | * figure out what to do with it. | ||
4 | * | ||
5 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | ||
6 | * Copyright 2012 Google, Inc. | ||
7 | */ | ||
8 | |||
9 | #include "bcache.h" | ||
10 | #include "btree.h" | ||
11 | #include "debug.h" | ||
12 | #include "request.h" | ||
13 | |||
14 | #include <linux/buffer_head.h> | ||
15 | #include <linux/debugfs.h> | ||
16 | #include <linux/genhd.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/random.h> | ||
19 | #include <linux/reboot.h> | ||
20 | #include <linux/sysfs.h> | ||
21 | |||
22 | MODULE_LICENSE("GPL"); | ||
23 | MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); | ||
24 | |||
25 | static const char bcache_magic[] = { | ||
26 | 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, | ||
27 | 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 | ||
28 | }; | ||
29 | |||
30 | static const char invalid_uuid[] = { | ||
31 | 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, | ||
32 | 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 | ||
33 | }; | ||
34 | |||
35 | /* Default is -1; we skip past it for struct cached_dev's cache mode */ | ||
36 | const char * const bch_cache_modes[] = { | ||
37 | "default", | ||
38 | "writethrough", | ||
39 | "writeback", | ||
40 | "writearound", | ||
41 | "none", | ||
42 | NULL | ||
43 | }; | ||
44 | |||
45 | struct uuid_entry_v0 { | ||
46 | uint8_t uuid[16]; | ||
47 | uint8_t label[32]; | ||
48 | uint32_t first_reg; | ||
49 | uint32_t last_reg; | ||
50 | uint32_t invalidated; | ||
51 | uint32_t pad; | ||
52 | }; | ||
53 | |||
54 | static struct kobject *bcache_kobj; | ||
55 | struct mutex bch_register_lock; | ||
56 | LIST_HEAD(bch_cache_sets); | ||
57 | static LIST_HEAD(uncached_devices); | ||
58 | |||
59 | static int bcache_major, bcache_minor; | ||
60 | static wait_queue_head_t unregister_wait; | ||
61 | struct workqueue_struct *bcache_wq; | ||
62 | |||
63 | #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) | ||
64 | |||
65 | static void bio_split_pool_free(struct bio_split_pool *p) | ||
66 | { | ||
67 | if (p->bio_split) | ||
68 | bioset_free(p->bio_split); | ||
69 | |||
70 | } | ||
71 | |||
72 | static int bio_split_pool_init(struct bio_split_pool *p) | ||
73 | { | ||
74 | p->bio_split = bioset_create(4, 0); | ||
75 | if (!p->bio_split) | ||
76 | return -ENOMEM; | ||
77 | |||
78 | p->bio_split_hook = mempool_create_kmalloc_pool(4, | ||
79 | sizeof(struct bio_split_hook)); | ||
80 | if (!p->bio_split_hook) | ||
81 | return -ENOMEM; | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | /* Superblock */ | ||
87 | |||
88 | static const char *read_super(struct cache_sb *sb, struct block_device *bdev, | ||
89 | struct page **res) | ||
90 | { | ||
91 | const char *err; | ||
92 | struct cache_sb *s; | ||
93 | struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); | ||
94 | unsigned i; | ||
95 | |||
96 | if (!bh) | ||
97 | return "IO error"; | ||
98 | |||
99 | s = (struct cache_sb *) bh->b_data; | ||
100 | |||
101 | sb->offset = le64_to_cpu(s->offset); | ||
102 | sb->version = le64_to_cpu(s->version); | ||
103 | |||
104 | memcpy(sb->magic, s->magic, 16); | ||
105 | memcpy(sb->uuid, s->uuid, 16); | ||
106 | memcpy(sb->set_uuid, s->set_uuid, 16); | ||
107 | memcpy(sb->label, s->label, SB_LABEL_SIZE); | ||
108 | |||
109 | sb->flags = le64_to_cpu(s->flags); | ||
110 | sb->seq = le64_to_cpu(s->seq); | ||
111 | |||
112 | sb->nbuckets = le64_to_cpu(s->nbuckets); | ||
113 | sb->block_size = le16_to_cpu(s->block_size); | ||
114 | sb->bucket_size = le16_to_cpu(s->bucket_size); | ||
115 | |||
116 | sb->nr_in_set = le16_to_cpu(s->nr_in_set); | ||
117 | sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); | ||
118 | sb->last_mount = le32_to_cpu(s->last_mount); | ||
119 | |||
120 | sb->first_bucket = le16_to_cpu(s->first_bucket); | ||
121 | sb->keys = le16_to_cpu(s->keys); | ||
122 | |||
123 | for (i = 0; i < SB_JOURNAL_BUCKETS; i++) | ||
124 | sb->d[i] = le64_to_cpu(s->d[i]); | ||
125 | |||
126 | pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u", | ||
127 | sb->version, sb->flags, sb->seq, sb->keys); | ||
128 | |||
129 | err = "Not a bcache superblock"; | ||
130 | if (sb->offset != SB_SECTOR) | ||
131 | goto err; | ||
132 | |||
133 | if (memcmp(sb->magic, bcache_magic, 16)) | ||
134 | goto err; | ||
135 | |||
136 | err = "Too many journal buckets"; | ||
137 | if (sb->keys > SB_JOURNAL_BUCKETS) | ||
138 | goto err; | ||
139 | |||
140 | err = "Bad checksum"; | ||
141 | if (s->csum != csum_set(s)) | ||
142 | goto err; | ||
143 | |||
144 | err = "Bad UUID"; | ||
145 | if (is_zero(sb->uuid, 16)) | ||
146 | goto err; | ||
147 | |||
148 | err = "Unsupported superblock version"; | ||
149 | if (sb->version > BCACHE_SB_VERSION) | ||
150 | goto err; | ||
151 | |||
152 | err = "Bad block/bucket size"; | ||
153 | if (!is_power_of_2(sb->block_size) || sb->block_size > PAGE_SECTORS || | ||
154 | !is_power_of_2(sb->bucket_size) || sb->bucket_size < PAGE_SECTORS) | ||
155 | goto err; | ||
156 | |||
157 | err = "Too many buckets"; | ||
158 | if (sb->nbuckets > LONG_MAX) | ||
159 | goto err; | ||
160 | |||
161 | err = "Not enough buckets"; | ||
162 | if (sb->nbuckets < 1 << 7) | ||
163 | goto err; | ||
164 | |||
165 | err = "Invalid superblock: device too small"; | ||
166 | if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets) | ||
167 | goto err; | ||
168 | |||
169 | if (sb->version == CACHE_BACKING_DEV) | ||
170 | goto out; | ||
171 | |||
172 | err = "Bad UUID"; | ||
173 | if (is_zero(sb->set_uuid, 16)) | ||
174 | goto err; | ||
175 | |||
176 | err = "Bad cache device number in set"; | ||
177 | if (!sb->nr_in_set || | ||
178 | sb->nr_in_set <= sb->nr_this_dev || | ||
179 | sb->nr_in_set > MAX_CACHES_PER_SET) | ||
180 | goto err; | ||
181 | |||
182 | err = "Journal buckets not sequential"; | ||
183 | for (i = 0; i < sb->keys; i++) | ||
184 | if (sb->d[i] != sb->first_bucket + i) | ||
185 | goto err; | ||
186 | |||
187 | err = "Too many journal buckets"; | ||
188 | if (sb->first_bucket + sb->keys > sb->nbuckets) | ||
189 | goto err; | ||
190 | |||
191 | err = "Invalid superblock: first bucket comes before end of super"; | ||
192 | if (sb->first_bucket * sb->bucket_size < 16) | ||
193 | goto err; | ||
194 | out: | ||
195 | sb->last_mount = get_seconds(); | ||
196 | err = NULL; | ||
197 | |||
198 | get_page(bh->b_page); | ||
199 | *res = bh->b_page; | ||
200 | err: | ||
201 | put_bh(bh); | ||
202 | return err; | ||
203 | } | ||
204 | |||
205 | static void write_bdev_super_endio(struct bio *bio, int error) | ||
206 | { | ||
207 | struct cached_dev *dc = bio->bi_private; | ||
208 | /* XXX: error checking */ | ||
209 | |||
210 | closure_put(&dc->sb_write.cl); | ||
211 | } | ||
212 | |||
213 | static void __write_super(struct cache_sb *sb, struct bio *bio) | ||
214 | { | ||
215 | struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); | ||
216 | unsigned i; | ||
217 | |||
218 | bio->bi_sector = SB_SECTOR; | ||
219 | bio->bi_rw = REQ_SYNC|REQ_META; | ||
220 | bio->bi_size = SB_SIZE; | ||
221 | bio_map(bio, NULL); | ||
222 | |||
223 | out->offset = cpu_to_le64(sb->offset); | ||
224 | out->version = cpu_to_le64(sb->version); | ||
225 | |||
226 | memcpy(out->uuid, sb->uuid, 16); | ||
227 | memcpy(out->set_uuid, sb->set_uuid, 16); | ||
228 | memcpy(out->label, sb->label, SB_LABEL_SIZE); | ||
229 | |||
230 | out->flags = cpu_to_le64(sb->flags); | ||
231 | out->seq = cpu_to_le64(sb->seq); | ||
232 | |||
233 | out->last_mount = cpu_to_le32(sb->last_mount); | ||
234 | out->first_bucket = cpu_to_le16(sb->first_bucket); | ||
235 | out->keys = cpu_to_le16(sb->keys); | ||
236 | |||
237 | for (i = 0; i < sb->keys; i++) | ||
238 | out->d[i] = cpu_to_le64(sb->d[i]); | ||
239 | |||
240 | out->csum = csum_set(out); | ||
241 | |||
242 | pr_debug("ver %llu, flags %llu, seq %llu", | ||
243 | sb->version, sb->flags, sb->seq); | ||
244 | |||
245 | submit_bio(REQ_WRITE, bio); | ||
246 | } | ||
247 | |||
248 | void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) | ||
249 | { | ||
250 | struct closure *cl = &dc->sb_write.cl; | ||
251 | struct bio *bio = &dc->sb_bio; | ||
252 | |||
253 | closure_lock(&dc->sb_write, parent); | ||
254 | |||
255 | bio_reset(bio); | ||
256 | bio->bi_bdev = dc->bdev; | ||
257 | bio->bi_end_io = write_bdev_super_endio; | ||
258 | bio->bi_private = dc; | ||
259 | |||
260 | closure_get(cl); | ||
261 | __write_super(&dc->sb, bio); | ||
262 | |||
263 | closure_return(cl); | ||
264 | } | ||
265 | |||
266 | static void write_super_endio(struct bio *bio, int error) | ||
267 | { | ||
268 | struct cache *ca = bio->bi_private; | ||
269 | |||
270 | bch_count_io_errors(ca, error, "writing superblock"); | ||
271 | closure_put(&ca->set->sb_write.cl); | ||
272 | } | ||
273 | |||
274 | void bcache_write_super(struct cache_set *c) | ||
275 | { | ||
276 | struct closure *cl = &c->sb_write.cl; | ||
277 | struct cache *ca; | ||
278 | unsigned i; | ||
279 | |||
280 | closure_lock(&c->sb_write, &c->cl); | ||
281 | |||
282 | c->sb.seq++; | ||
283 | |||
284 | for_each_cache(ca, c, i) { | ||
285 | struct bio *bio = &ca->sb_bio; | ||
286 | |||
287 | ca->sb.version = BCACHE_SB_VERSION; | ||
288 | ca->sb.seq = c->sb.seq; | ||
289 | ca->sb.last_mount = c->sb.last_mount; | ||
290 | |||
291 | SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); | ||
292 | |||
293 | bio_reset(bio); | ||
294 | bio->bi_bdev = ca->bdev; | ||
295 | bio->bi_end_io = write_super_endio; | ||
296 | bio->bi_private = ca; | ||
297 | |||
298 | closure_get(cl); | ||
299 | __write_super(&ca->sb, bio); | ||
300 | } | ||
301 | |||
302 | closure_return(cl); | ||
303 | } | ||
304 | |||
305 | /* UUID io */ | ||
306 | |||
307 | static void uuid_endio(struct bio *bio, int error) | ||
308 | { | ||
309 | struct closure *cl = bio->bi_private; | ||
310 | struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl); | ||
311 | |||
312 | cache_set_err_on(error, c, "accessing uuids"); | ||
313 | bch_bbio_free(bio, c); | ||
314 | closure_put(cl); | ||
315 | } | ||
316 | |||
317 | static void uuid_io(struct cache_set *c, unsigned long rw, | ||
318 | struct bkey *k, struct closure *parent) | ||
319 | { | ||
320 | struct closure *cl = &c->uuid_write.cl; | ||
321 | struct uuid_entry *u; | ||
322 | unsigned i; | ||
323 | |||
324 | BUG_ON(!parent); | ||
325 | closure_lock(&c->uuid_write, parent); | ||
326 | |||
327 | for (i = 0; i < KEY_PTRS(k); i++) { | ||
328 | struct bio *bio = bch_bbio_alloc(c); | ||
329 | |||
330 | bio->bi_rw = REQ_SYNC|REQ_META|rw; | ||
331 | bio->bi_size = KEY_SIZE(k) << 9; | ||
332 | |||
333 | bio->bi_end_io = uuid_endio; | ||
334 | bio->bi_private = cl; | ||
335 | bio_map(bio, c->uuids); | ||
336 | |||
337 | bch_submit_bbio(bio, c, k, i); | ||
338 | |||
339 | if (!(rw & WRITE)) | ||
340 | break; | ||
341 | } | ||
342 | |||
343 | pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", | ||
344 | pkey(&c->uuid_bucket)); | ||
345 | |||
346 | for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) | ||
347 | if (!is_zero(u->uuid, 16)) | ||
348 | pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", | ||
349 | u - c->uuids, u->uuid, u->label, | ||
350 | u->first_reg, u->last_reg, u->invalidated); | ||
351 | |||
352 | closure_return(cl); | ||
353 | } | ||
354 | |||
355 | static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) | ||
356 | { | ||
357 | struct bkey *k = &j->uuid_bucket; | ||
358 | |||
359 | if (__bch_ptr_invalid(c, 1, k)) | ||
360 | return "bad uuid pointer"; | ||
361 | |||
362 | bkey_copy(&c->uuid_bucket, k); | ||
363 | uuid_io(c, READ_SYNC, k, cl); | ||
364 | |||
365 | if (j->version < BCACHE_JSET_VERSION_UUIDv1) { | ||
366 | struct uuid_entry_v0 *u0 = (void *) c->uuids; | ||
367 | struct uuid_entry *u1 = (void *) c->uuids; | ||
368 | int i; | ||
369 | |||
370 | closure_sync(cl); | ||
371 | |||
372 | /* | ||
373 | * Since the new uuid entry is bigger than the old, we have to | ||
374 | * convert starting at the highest memory address and work down | ||
375 | * in order to do it in place | ||
376 | */ | ||
377 | |||
378 | for (i = c->nr_uuids - 1; | ||
379 | i >= 0; | ||
380 | --i) { | ||
381 | memcpy(u1[i].uuid, u0[i].uuid, 16); | ||
382 | memcpy(u1[i].label, u0[i].label, 32); | ||
383 | |||
384 | u1[i].first_reg = u0[i].first_reg; | ||
385 | u1[i].last_reg = u0[i].last_reg; | ||
386 | u1[i].invalidated = u0[i].invalidated; | ||
387 | |||
388 | u1[i].flags = 0; | ||
389 | u1[i].sectors = 0; | ||
390 | } | ||
391 | } | ||
392 | |||
393 | return NULL; | ||
394 | } | ||
395 | |||
396 | static int __uuid_write(struct cache_set *c) | ||
397 | { | ||
398 | BKEY_PADDED(key) k; | ||
399 | struct closure cl; | ||
400 | closure_init_stack(&cl); | ||
401 | |||
402 | lockdep_assert_held(&bch_register_lock); | ||
403 | |||
404 | if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl)) | ||
405 | return 1; | ||
406 | |||
407 | SET_KEY_SIZE(&k.key, c->sb.bucket_size); | ||
408 | uuid_io(c, REQ_WRITE, &k.key, &cl); | ||
409 | closure_sync(&cl); | ||
410 | |||
411 | bkey_copy(&c->uuid_bucket, &k.key); | ||
412 | __bkey_put(c, &k.key); | ||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | int bch_uuid_write(struct cache_set *c) | ||
417 | { | ||
418 | int ret = __uuid_write(c); | ||
419 | |||
420 | if (!ret) | ||
421 | bch_journal_meta(c, NULL); | ||
422 | |||
423 | return ret; | ||
424 | } | ||
425 | |||
426 | static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) | ||
427 | { | ||
428 | struct uuid_entry *u; | ||
429 | |||
430 | for (u = c->uuids; | ||
431 | u < c->uuids + c->nr_uuids; u++) | ||
432 | if (!memcmp(u->uuid, uuid, 16)) | ||
433 | return u; | ||
434 | |||
435 | return NULL; | ||
436 | } | ||
437 | |||
438 | static struct uuid_entry *uuid_find_empty(struct cache_set *c) | ||
439 | { | ||
440 | static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; | ||
441 | return uuid_find(c, zero_uuid); | ||
442 | } | ||
443 | |||
444 | /* | ||
445 | * Bucket priorities/gens: | ||
446 | * | ||
447 | * For each bucket, we store on disk its | ||
448 | * 8 bit gen | ||
449 | * 16 bit priority | ||
450 | * | ||
451 | * See alloc.c for an explanation of the gen. The priority is used to implement | ||
452 | * lru (and in the future other) cache replacement policies; for most purposes | ||
453 | * it's just an opaque integer. | ||
454 | * | ||
455 | * The gens and the priorities don't have a whole lot to do with each other, and | ||
456 | * it's actually the gens that must be written out at specific times - it's no | ||
457 | * big deal if the priorities don't get written, if we lose them we just reuse | ||
458 | * buckets in suboptimal order. | ||
459 | * | ||
460 | * On disk they're stored in a packed array, and in as many buckets are required | ||
461 | * to fit them all. The buckets we use to store them form a list; the journal | ||
462 | * header points to the first bucket, the first bucket points to the second | ||
463 | * bucket, et cetera. | ||
464 | * | ||
465 | * This code is used by the allocation code; periodically (whenever it runs out | ||
466 | * of buckets to allocate from) the allocation code will invalidate some | ||
467 | * buckets, but it can't use those buckets until their new gens are safely on | ||
468 | * disk. | ||
469 | */ | ||
470 | |||
471 | static void prio_endio(struct bio *bio, int error) | ||
472 | { | ||
473 | struct cache *ca = bio->bi_private; | ||
474 | |||
475 | cache_set_err_on(error, ca->set, "accessing priorities"); | ||
476 | bch_bbio_free(bio, ca->set); | ||
477 | closure_put(&ca->prio); | ||
478 | } | ||
479 | |||
480 | static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) | ||
481 | { | ||
482 | struct closure *cl = &ca->prio; | ||
483 | struct bio *bio = bch_bbio_alloc(ca->set); | ||
484 | |||
485 | closure_init_stack(cl); | ||
486 | |||
487 | bio->bi_sector = bucket * ca->sb.bucket_size; | ||
488 | bio->bi_bdev = ca->bdev; | ||
489 | bio->bi_rw = REQ_SYNC|REQ_META|rw; | ||
490 | bio->bi_size = bucket_bytes(ca); | ||
491 | |||
492 | bio->bi_end_io = prio_endio; | ||
493 | bio->bi_private = ca; | ||
494 | bio_map(bio, ca->disk_buckets); | ||
495 | |||
496 | closure_bio_submit(bio, &ca->prio, ca); | ||
497 | closure_sync(cl); | ||
498 | } | ||
499 | |||
500 | #define buckets_free(c) "free %zu, free_inc %zu, unused %zu", \ | ||
501 | fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused) | ||
502 | |||
503 | void bch_prio_write(struct cache *ca) | ||
504 | { | ||
505 | int i; | ||
506 | struct bucket *b; | ||
507 | struct closure cl; | ||
508 | |||
509 | closure_init_stack(&cl); | ||
510 | |||
511 | lockdep_assert_held(&ca->set->bucket_lock); | ||
512 | |||
513 | for (b = ca->buckets; | ||
514 | b < ca->buckets + ca->sb.nbuckets; b++) | ||
515 | b->disk_gen = b->gen; | ||
516 | |||
517 | ca->disk_buckets->seq++; | ||
518 | |||
519 | atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), | ||
520 | &ca->meta_sectors_written); | ||
521 | |||
522 | pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), | ||
523 | fifo_used(&ca->free_inc), fifo_used(&ca->unused)); | ||
524 | blktrace_msg(ca, "Starting priorities: " buckets_free(ca)); | ||
525 | |||
526 | for (i = prio_buckets(ca) - 1; i >= 0; --i) { | ||
527 | long bucket; | ||
528 | struct prio_set *p = ca->disk_buckets; | ||
529 | struct bucket_disk *d = p->data, *end = d + prios_per_bucket(ca); | ||
530 | |||
531 | for (b = ca->buckets + i * prios_per_bucket(ca); | ||
532 | b < ca->buckets + ca->sb.nbuckets && d < end; | ||
533 | b++, d++) { | ||
534 | d->prio = cpu_to_le16(b->prio); | ||
535 | d->gen = b->gen; | ||
536 | } | ||
537 | |||
538 | p->next_bucket = ca->prio_buckets[i + 1]; | ||
539 | p->magic = pset_magic(ca); | ||
540 | p->csum = crc64(&p->magic, bucket_bytes(ca) - 8); | ||
541 | |||
542 | bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl); | ||
543 | BUG_ON(bucket == -1); | ||
544 | |||
545 | mutex_unlock(&ca->set->bucket_lock); | ||
546 | prio_io(ca, bucket, REQ_WRITE); | ||
547 | mutex_lock(&ca->set->bucket_lock); | ||
548 | |||
549 | ca->prio_buckets[i] = bucket; | ||
550 | atomic_dec_bug(&ca->buckets[bucket].pin); | ||
551 | } | ||
552 | |||
553 | mutex_unlock(&ca->set->bucket_lock); | ||
554 | |||
555 | bch_journal_meta(ca->set, &cl); | ||
556 | closure_sync(&cl); | ||
557 | |||
558 | mutex_lock(&ca->set->bucket_lock); | ||
559 | |||
560 | ca->need_save_prio = 0; | ||
561 | |||
562 | /* | ||
563 | * Don't want the old priorities to get garbage collected until after we | ||
564 | * finish writing the new ones, and they're journalled | ||
565 | */ | ||
566 | for (i = 0; i < prio_buckets(ca); i++) | ||
567 | ca->prio_last_buckets[i] = ca->prio_buckets[i]; | ||
568 | } | ||
569 | |||
570 | static void prio_read(struct cache *ca, uint64_t bucket) | ||
571 | { | ||
572 | struct prio_set *p = ca->disk_buckets; | ||
573 | struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; | ||
574 | struct bucket *b; | ||
575 | unsigned bucket_nr = 0; | ||
576 | |||
577 | for (b = ca->buckets; | ||
578 | b < ca->buckets + ca->sb.nbuckets; | ||
579 | b++, d++) { | ||
580 | if (d == end) { | ||
581 | ca->prio_buckets[bucket_nr] = bucket; | ||
582 | ca->prio_last_buckets[bucket_nr] = bucket; | ||
583 | bucket_nr++; | ||
584 | |||
585 | prio_io(ca, bucket, READ_SYNC); | ||
586 | |||
587 | if (p->csum != crc64(&p->magic, bucket_bytes(ca) - 8)) | ||
588 | pr_warn("bad csum reading priorities"); | ||
589 | |||
590 | if (p->magic != pset_magic(ca)) | ||
591 | pr_warn("bad magic reading priorities"); | ||
592 | |||
593 | bucket = p->next_bucket; | ||
594 | d = p->data; | ||
595 | } | ||
596 | |||
597 | b->prio = le16_to_cpu(d->prio); | ||
598 | b->gen = b->disk_gen = b->last_gc = b->gc_gen = d->gen; | ||
599 | } | ||
600 | } | ||
601 | |||
602 | /* Bcache device */ | ||
603 | |||
604 | static int open_dev(struct block_device *b, fmode_t mode) | ||
605 | { | ||
606 | struct bcache_device *d = b->bd_disk->private_data; | ||
607 | if (atomic_read(&d->closing)) | ||
608 | return -ENXIO; | ||
609 | |||
610 | closure_get(&d->cl); | ||
611 | return 0; | ||
612 | } | ||
613 | |||
614 | static int release_dev(struct gendisk *b, fmode_t mode) | ||
615 | { | ||
616 | struct bcache_device *d = b->private_data; | ||
617 | closure_put(&d->cl); | ||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | static int ioctl_dev(struct block_device *b, fmode_t mode, | ||
622 | unsigned int cmd, unsigned long arg) | ||
623 | { | ||
624 | struct bcache_device *d = b->bd_disk->private_data; | ||
625 | return d->ioctl(d, mode, cmd, arg); | ||
626 | } | ||
627 | |||
628 | static const struct block_device_operations bcache_ops = { | ||
629 | .open = open_dev, | ||
630 | .release = release_dev, | ||
631 | .ioctl = ioctl_dev, | ||
632 | .owner = THIS_MODULE, | ||
633 | }; | ||
634 | |||
635 | void bcache_device_stop(struct bcache_device *d) | ||
636 | { | ||
637 | if (!atomic_xchg(&d->closing, 1)) | ||
638 | closure_queue(&d->cl); | ||
639 | } | ||
640 | |||
641 | static void bcache_device_detach(struct bcache_device *d) | ||
642 | { | ||
643 | lockdep_assert_held(&bch_register_lock); | ||
644 | |||
645 | if (atomic_read(&d->detaching)) { | ||
646 | struct uuid_entry *u = d->c->uuids + d->id; | ||
647 | |||
648 | SET_UUID_FLASH_ONLY(u, 0); | ||
649 | memcpy(u->uuid, invalid_uuid, 16); | ||
650 | u->invalidated = cpu_to_le32(get_seconds()); | ||
651 | bch_uuid_write(d->c); | ||
652 | |||
653 | atomic_set(&d->detaching, 0); | ||
654 | } | ||
655 | |||
656 | d->c->devices[d->id] = NULL; | ||
657 | closure_put(&d->c->caching); | ||
658 | d->c = NULL; | ||
659 | } | ||
660 | |||
661 | static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, | ||
662 | unsigned id) | ||
663 | { | ||
664 | BUG_ON(test_bit(CACHE_SET_STOPPING, &c->flags)); | ||
665 | |||
666 | d->id = id; | ||
667 | d->c = c; | ||
668 | c->devices[id] = d; | ||
669 | |||
670 | closure_get(&c->caching); | ||
671 | } | ||
672 | |||
673 | static void bcache_device_link(struct bcache_device *d, struct cache_set *c, | ||
674 | const char *name) | ||
675 | { | ||
676 | snprintf(d->name, BCACHEDEVNAME_SIZE, | ||
677 | "%s%u", name, d->id); | ||
678 | |||
679 | WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || | ||
680 | sysfs_create_link(&c->kobj, &d->kobj, d->name), | ||
681 | "Couldn't create device <-> cache set symlinks"); | ||
682 | } | ||
683 | |||
684 | static void bcache_device_free(struct bcache_device *d) | ||
685 | { | ||
686 | lockdep_assert_held(&bch_register_lock); | ||
687 | |||
688 | pr_info("%s stopped", d->disk->disk_name); | ||
689 | |||
690 | if (d->c) | ||
691 | bcache_device_detach(d); | ||
692 | |||
693 | if (d->disk) | ||
694 | del_gendisk(d->disk); | ||
695 | if (d->disk && d->disk->queue) | ||
696 | blk_cleanup_queue(d->disk->queue); | ||
697 | if (d->disk) | ||
698 | put_disk(d->disk); | ||
699 | |||
700 | bio_split_pool_free(&d->bio_split_hook); | ||
701 | if (d->unaligned_bvec) | ||
702 | mempool_destroy(d->unaligned_bvec); | ||
703 | if (d->bio_split) | ||
704 | bioset_free(d->bio_split); | ||
705 | |||
706 | closure_debug_destroy(&d->cl); | ||
707 | } | ||
708 | |||
709 | static int bcache_device_init(struct bcache_device *d, unsigned block_size) | ||
710 | { | ||
711 | struct request_queue *q; | ||
712 | |||
713 | if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || | ||
714 | !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, | ||
715 | sizeof(struct bio_vec) * BIO_MAX_PAGES)) || | ||
716 | bio_split_pool_init(&d->bio_split_hook)) | ||
717 | |||
718 | return -ENOMEM; | ||
719 | |||
720 | d->disk = alloc_disk(1); | ||
721 | if (!d->disk) | ||
722 | return -ENOMEM; | ||
723 | |||
724 | snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); | ||
725 | |||
726 | d->disk->major = bcache_major; | ||
727 | d->disk->first_minor = bcache_minor++; | ||
728 | d->disk->fops = &bcache_ops; | ||
729 | d->disk->private_data = d; | ||
730 | |||
731 | q = blk_alloc_queue(GFP_KERNEL); | ||
732 | if (!q) | ||
733 | return -ENOMEM; | ||
734 | |||
735 | blk_queue_make_request(q, NULL); | ||
736 | d->disk->queue = q; | ||
737 | q->queuedata = d; | ||
738 | q->backing_dev_info.congested_data = d; | ||
739 | q->limits.max_hw_sectors = UINT_MAX; | ||
740 | q->limits.max_sectors = UINT_MAX; | ||
741 | q->limits.max_segment_size = UINT_MAX; | ||
742 | q->limits.max_segments = BIO_MAX_PAGES; | ||
743 | q->limits.max_discard_sectors = UINT_MAX; | ||
744 | q->limits.io_min = block_size; | ||
745 | q->limits.logical_block_size = block_size; | ||
746 | q->limits.physical_block_size = block_size; | ||
747 | set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); | ||
748 | set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); | ||
749 | |||
750 | return 0; | ||
751 | } | ||
752 | |||
753 | /* Cached device */ | ||
754 | |||
755 | static void calc_cached_dev_sectors(struct cache_set *c) | ||
756 | { | ||
757 | uint64_t sectors = 0; | ||
758 | struct cached_dev *dc; | ||
759 | |||
760 | list_for_each_entry(dc, &c->cached_devs, list) | ||
761 | sectors += bdev_sectors(dc->bdev); | ||
762 | |||
763 | c->cached_dev_sectors = sectors; | ||
764 | } | ||
765 | |||
766 | void bch_cached_dev_run(struct cached_dev *dc) | ||
767 | { | ||
768 | struct bcache_device *d = &dc->disk; | ||
769 | |||
770 | if (atomic_xchg(&dc->running, 1)) | ||
771 | return; | ||
772 | |||
773 | if (!d->c && | ||
774 | BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { | ||
775 | struct closure cl; | ||
776 | closure_init_stack(&cl); | ||
777 | |||
778 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); | ||
779 | bch_write_bdev_super(dc, &cl); | ||
780 | closure_sync(&cl); | ||
781 | } | ||
782 | |||
783 | add_disk(d->disk); | ||
784 | #if 0 | ||
785 | char *env[] = { "SYMLINK=label" , NULL }; | ||
786 | kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); | ||
787 | #endif | ||
788 | if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || | ||
789 | sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) | ||
790 | pr_debug("error creating sysfs link"); | ||
791 | } | ||
792 | |||
793 | static void cached_dev_detach_finish(struct work_struct *w) | ||
794 | { | ||
795 | struct cached_dev *dc = container_of(w, struct cached_dev, detach); | ||
796 | char buf[BDEVNAME_SIZE]; | ||
797 | struct closure cl; | ||
798 | closure_init_stack(&cl); | ||
799 | |||
800 | BUG_ON(!atomic_read(&dc->disk.detaching)); | ||
801 | BUG_ON(atomic_read(&dc->count)); | ||
802 | |||
803 | sysfs_remove_link(&dc->disk.c->kobj, dc->disk.name); | ||
804 | sysfs_remove_link(&dc->disk.kobj, "cache"); | ||
805 | |||
806 | mutex_lock(&bch_register_lock); | ||
807 | |||
808 | memset(&dc->sb.set_uuid, 0, 16); | ||
809 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); | ||
810 | |||
811 | bch_write_bdev_super(dc, &cl); | ||
812 | closure_sync(&cl); | ||
813 | |||
814 | bcache_device_detach(&dc->disk); | ||
815 | list_move(&dc->list, &uncached_devices); | ||
816 | |||
817 | mutex_unlock(&bch_register_lock); | ||
818 | |||
819 | pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); | ||
820 | |||
821 | /* Drop ref we took in cached_dev_detach() */ | ||
822 | closure_put(&dc->disk.cl); | ||
823 | } | ||
824 | |||
825 | void bch_cached_dev_detach(struct cached_dev *dc) | ||
826 | { | ||
827 | lockdep_assert_held(&bch_register_lock); | ||
828 | |||
829 | if (atomic_read(&dc->disk.closing)) | ||
830 | return; | ||
831 | |||
832 | if (atomic_xchg(&dc->disk.detaching, 1)) | ||
833 | return; | ||
834 | |||
835 | /* | ||
836 | * Block the device from being closed and freed until we're finished | ||
837 | * detaching | ||
838 | */ | ||
839 | closure_get(&dc->disk.cl); | ||
840 | |||
841 | bch_writeback_queue(dc); | ||
842 | cached_dev_put(dc); | ||
843 | } | ||
844 | |||
845 | int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) | ||
846 | { | ||
847 | uint32_t rtime = cpu_to_le32(get_seconds()); | ||
848 | struct uuid_entry *u; | ||
849 | char buf[BDEVNAME_SIZE]; | ||
850 | |||
851 | bdevname(dc->bdev, buf); | ||
852 | |||
853 | if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) | ||
854 | return -ENOENT; | ||
855 | |||
856 | if (dc->disk.c) { | ||
857 | pr_err("Can't attach %s: already attached", buf); | ||
858 | return -EINVAL; | ||
859 | } | ||
860 | |||
861 | if (test_bit(CACHE_SET_STOPPING, &c->flags)) { | ||
862 | pr_err("Can't attach %s: shutting down", buf); | ||
863 | return -EINVAL; | ||
864 | } | ||
865 | |||
866 | if (dc->sb.block_size < c->sb.block_size) { | ||
867 | /* Will die */ | ||
868 | pr_err("Couldn't attach %s: block size " | ||
869 | "less than set's block size", buf); | ||
870 | return -EINVAL; | ||
871 | } | ||
872 | |||
873 | u = uuid_find(c, dc->sb.uuid); | ||
874 | |||
875 | if (u && | ||
876 | (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || | ||
877 | BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { | ||
878 | memcpy(u->uuid, invalid_uuid, 16); | ||
879 | u->invalidated = cpu_to_le32(get_seconds()); | ||
880 | u = NULL; | ||
881 | } | ||
882 | |||
883 | if (!u) { | ||
884 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { | ||
885 | pr_err("Couldn't find uuid for %s in set", buf); | ||
886 | return -ENOENT; | ||
887 | } | ||
888 | |||
889 | u = uuid_find_empty(c); | ||
890 | if (!u) { | ||
891 | pr_err("Not caching %s, no room for UUID", buf); | ||
892 | return -EINVAL; | ||
893 | } | ||
894 | } | ||
895 | |||
896 | /* Deadlocks since we're called via sysfs... | ||
897 | sysfs_remove_file(&dc->kobj, &sysfs_attach); | ||
898 | */ | ||
899 | |||
900 | if (is_zero(u->uuid, 16)) { | ||
901 | struct closure cl; | ||
902 | closure_init_stack(&cl); | ||
903 | |||
904 | memcpy(u->uuid, dc->sb.uuid, 16); | ||
905 | memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); | ||
906 | u->first_reg = u->last_reg = rtime; | ||
907 | bch_uuid_write(c); | ||
908 | |||
909 | memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); | ||
910 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); | ||
911 | |||
912 | bch_write_bdev_super(dc, &cl); | ||
913 | closure_sync(&cl); | ||
914 | } else { | ||
915 | u->last_reg = rtime; | ||
916 | bch_uuid_write(c); | ||
917 | } | ||
918 | |||
919 | bcache_device_attach(&dc->disk, c, u - c->uuids); | ||
920 | bcache_device_link(&dc->disk, c, "bdev"); | ||
921 | list_move(&dc->list, &c->cached_devs); | ||
922 | calc_cached_dev_sectors(c); | ||
923 | |||
924 | smp_wmb(); | ||
925 | /* | ||
926 | * dc->c must be set before dc->count != 0 - paired with the mb in | ||
927 | * cached_dev_get() | ||
928 | */ | ||
929 | atomic_set(&dc->count, 1); | ||
930 | |||
931 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { | ||
932 | atomic_set(&dc->has_dirty, 1); | ||
933 | atomic_inc(&dc->count); | ||
934 | bch_writeback_queue(dc); | ||
935 | } | ||
936 | |||
937 | bch_cached_dev_run(dc); | ||
938 | |||
939 | pr_info("Caching %s as %s on set %pU", | ||
940 | bdevname(dc->bdev, buf), dc->disk.disk->disk_name, | ||
941 | dc->disk.c->sb.set_uuid); | ||
942 | return 0; | ||
943 | } | ||
944 | |||
945 | void bch_cached_dev_release(struct kobject *kobj) | ||
946 | { | ||
947 | struct cached_dev *dc = container_of(kobj, struct cached_dev, | ||
948 | disk.kobj); | ||
949 | kfree(dc); | ||
950 | module_put(THIS_MODULE); | ||
951 | } | ||
952 | |||
953 | static void cached_dev_free(struct closure *cl) | ||
954 | { | ||
955 | struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); | ||
956 | |||
957 | cancel_delayed_work_sync(&dc->writeback_rate_update); | ||
958 | |||
959 | mutex_lock(&bch_register_lock); | ||
960 | |||
961 | bcache_device_free(&dc->disk); | ||
962 | list_del(&dc->list); | ||
963 | |||
964 | mutex_unlock(&bch_register_lock); | ||
965 | |||
966 | if (!IS_ERR_OR_NULL(dc->bdev)) { | ||
967 | blk_sync_queue(bdev_get_queue(dc->bdev)); | ||
968 | blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); | ||
969 | } | ||
970 | |||
971 | wake_up(&unregister_wait); | ||
972 | |||
973 | kobject_put(&dc->disk.kobj); | ||
974 | } | ||
975 | |||
976 | static void cached_dev_flush(struct closure *cl) | ||
977 | { | ||
978 | struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); | ||
979 | struct bcache_device *d = &dc->disk; | ||
980 | |||
981 | bch_cache_accounting_destroy(&dc->accounting); | ||
982 | kobject_del(&d->kobj); | ||
983 | |||
984 | continue_at(cl, cached_dev_free, system_wq); | ||
985 | } | ||
986 | |||
987 | static int cached_dev_init(struct cached_dev *dc, unsigned block_size) | ||
988 | { | ||
989 | int err; | ||
990 | struct io *io; | ||
991 | |||
992 | closure_init(&dc->disk.cl, NULL); | ||
993 | set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); | ||
994 | |||
995 | __module_get(THIS_MODULE); | ||
996 | INIT_LIST_HEAD(&dc->list); | ||
997 | kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); | ||
998 | |||
999 | bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); | ||
1000 | |||
1001 | err = bcache_device_init(&dc->disk, block_size); | ||
1002 | if (err) | ||
1003 | goto err; | ||
1004 | |||
1005 | spin_lock_init(&dc->io_lock); | ||
1006 | closure_init_unlocked(&dc->sb_write); | ||
1007 | INIT_WORK(&dc->detach, cached_dev_detach_finish); | ||
1008 | |||
1009 | dc->sequential_merge = true; | ||
1010 | dc->sequential_cutoff = 4 << 20; | ||
1011 | |||
1012 | INIT_LIST_HEAD(&dc->io_lru); | ||
1013 | dc->sb_bio.bi_max_vecs = 1; | ||
1014 | dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; | ||
1015 | |||
1016 | for (io = dc->io; io < dc->io + RECENT_IO; io++) { | ||
1017 | list_add(&io->lru, &dc->io_lru); | ||
1018 | hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); | ||
1019 | } | ||
1020 | |||
1021 | bch_writeback_init_cached_dev(dc); | ||
1022 | return 0; | ||
1023 | err: | ||
1024 | bcache_device_stop(&dc->disk); | ||
1025 | return err; | ||
1026 | } | ||
1027 | |||
1028 | /* Cached device - bcache superblock */ | ||
1029 | |||
1030 | static const char *register_bdev(struct cache_sb *sb, struct page *sb_page, | ||
1031 | struct block_device *bdev, | ||
1032 | struct cached_dev *dc) | ||
1033 | { | ||
1034 | char name[BDEVNAME_SIZE]; | ||
1035 | const char *err = "cannot allocate memory"; | ||
1036 | struct gendisk *g; | ||
1037 | struct cache_set *c; | ||
1038 | |||
1039 | if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0) | ||
1040 | return err; | ||
1041 | |||
1042 | memcpy(&dc->sb, sb, sizeof(struct cache_sb)); | ||
1043 | dc->sb_bio.bi_io_vec[0].bv_page = sb_page; | ||
1044 | dc->bdev = bdev; | ||
1045 | dc->bdev->bd_holder = dc; | ||
1046 | |||
1047 | g = dc->disk.disk; | ||
1048 | |||
1049 | set_capacity(g, dc->bdev->bd_part->nr_sects - 16); | ||
1050 | |||
1051 | bch_cached_dev_request_init(dc); | ||
1052 | |||
1053 | err = "error creating kobject"; | ||
1054 | if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, | ||
1055 | "bcache")) | ||
1056 | goto err; | ||
1057 | if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) | ||
1058 | goto err; | ||
1059 | |||
1060 | list_add(&dc->list, &uncached_devices); | ||
1061 | list_for_each_entry(c, &bch_cache_sets, list) | ||
1062 | bch_cached_dev_attach(dc, c); | ||
1063 | |||
1064 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || | ||
1065 | BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) | ||
1066 | bch_cached_dev_run(dc); | ||
1067 | |||
1068 | return NULL; | ||
1069 | err: | ||
1070 | kobject_put(&dc->disk.kobj); | ||
1071 | pr_notice("error opening %s: %s", bdevname(bdev, name), err); | ||
1072 | /* | ||
1073 | * Return NULL instead of an error because kobject_put() cleans | ||
1074 | * everything up | ||
1075 | */ | ||
1076 | return NULL; | ||
1077 | } | ||
1078 | |||
1079 | /* Flash only volumes */ | ||
1080 | |||
1081 | void bch_flash_dev_release(struct kobject *kobj) | ||
1082 | { | ||
1083 | struct bcache_device *d = container_of(kobj, struct bcache_device, | ||
1084 | kobj); | ||
1085 | kfree(d); | ||
1086 | } | ||
1087 | |||
1088 | static void flash_dev_free(struct closure *cl) | ||
1089 | { | ||
1090 | struct bcache_device *d = container_of(cl, struct bcache_device, cl); | ||
1091 | bcache_device_free(d); | ||
1092 | kobject_put(&d->kobj); | ||
1093 | } | ||
1094 | |||
1095 | static void flash_dev_flush(struct closure *cl) | ||
1096 | { | ||
1097 | struct bcache_device *d = container_of(cl, struct bcache_device, cl); | ||
1098 | |||
1099 | sysfs_remove_link(&d->c->kobj, d->name); | ||
1100 | sysfs_remove_link(&d->kobj, "cache"); | ||
1101 | kobject_del(&d->kobj); | ||
1102 | continue_at(cl, flash_dev_free, system_wq); | ||
1103 | } | ||
1104 | |||
1105 | static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) | ||
1106 | { | ||
1107 | struct bcache_device *d = kzalloc(sizeof(struct bcache_device), | ||
1108 | GFP_KERNEL); | ||
1109 | if (!d) | ||
1110 | return -ENOMEM; | ||
1111 | |||
1112 | closure_init(&d->cl, NULL); | ||
1113 | set_closure_fn(&d->cl, flash_dev_flush, system_wq); | ||
1114 | |||
1115 | kobject_init(&d->kobj, &bch_flash_dev_ktype); | ||
1116 | |||
1117 | if (bcache_device_init(d, block_bytes(c))) | ||
1118 | goto err; | ||
1119 | |||
1120 | bcache_device_attach(d, c, u - c->uuids); | ||
1121 | set_capacity(d->disk, u->sectors); | ||
1122 | bch_flash_dev_request_init(d); | ||
1123 | add_disk(d->disk); | ||
1124 | |||
1125 | if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) | ||
1126 | goto err; | ||
1127 | |||
1128 | bcache_device_link(d, c, "volume"); | ||
1129 | |||
1130 | return 0; | ||
1131 | err: | ||
1132 | kobject_put(&d->kobj); | ||
1133 | return -ENOMEM; | ||
1134 | } | ||
1135 | |||
1136 | static int flash_devs_run(struct cache_set *c) | ||
1137 | { | ||
1138 | int ret = 0; | ||
1139 | struct uuid_entry *u; | ||
1140 | |||
1141 | for (u = c->uuids; | ||
1142 | u < c->uuids + c->nr_uuids && !ret; | ||
1143 | u++) | ||
1144 | if (UUID_FLASH_ONLY(u)) | ||
1145 | ret = flash_dev_run(c, u); | ||
1146 | |||
1147 | return ret; | ||
1148 | } | ||
1149 | |||
1150 | int bch_flash_dev_create(struct cache_set *c, uint64_t size) | ||
1151 | { | ||
1152 | struct uuid_entry *u; | ||
1153 | |||
1154 | if (test_bit(CACHE_SET_STOPPING, &c->flags)) | ||
1155 | return -EINTR; | ||
1156 | |||
1157 | u = uuid_find_empty(c); | ||
1158 | if (!u) { | ||
1159 | pr_err("Can't create volume, no room for UUID"); | ||
1160 | return -EINVAL; | ||
1161 | } | ||
1162 | |||
1163 | get_random_bytes(u->uuid, 16); | ||
1164 | memset(u->label, 0, 32); | ||
1165 | u->first_reg = u->last_reg = cpu_to_le32(get_seconds()); | ||
1166 | |||
1167 | SET_UUID_FLASH_ONLY(u, 1); | ||
1168 | u->sectors = size >> 9; | ||
1169 | |||
1170 | bch_uuid_write(c); | ||
1171 | |||
1172 | return flash_dev_run(c, u); | ||
1173 | } | ||
1174 | |||
1175 | /* Cache set */ | ||
1176 | |||
1177 | __printf(2, 3) | ||
1178 | bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) | ||
1179 | { | ||
1180 | va_list args; | ||
1181 | |||
1182 | if (test_bit(CACHE_SET_STOPPING, &c->flags)) | ||
1183 | return false; | ||
1184 | |||
1185 | /* XXX: we can be called from atomic context | ||
1186 | acquire_console_sem(); | ||
1187 | */ | ||
1188 | |||
1189 | printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid); | ||
1190 | |||
1191 | va_start(args, fmt); | ||
1192 | vprintk(fmt, args); | ||
1193 | va_end(args); | ||
1194 | |||
1195 | printk(", disabling caching\n"); | ||
1196 | |||
1197 | bch_cache_set_unregister(c); | ||
1198 | return true; | ||
1199 | } | ||
1200 | |||
1201 | void bch_cache_set_release(struct kobject *kobj) | ||
1202 | { | ||
1203 | struct cache_set *c = container_of(kobj, struct cache_set, kobj); | ||
1204 | kfree(c); | ||
1205 | module_put(THIS_MODULE); | ||
1206 | } | ||
1207 | |||
1208 | static void cache_set_free(struct closure *cl) | ||
1209 | { | ||
1210 | struct cache_set *c = container_of(cl, struct cache_set, cl); | ||
1211 | struct cache *ca; | ||
1212 | unsigned i; | ||
1213 | |||
1214 | if (!IS_ERR_OR_NULL(c->debug)) | ||
1215 | debugfs_remove(c->debug); | ||
1216 | |||
1217 | bch_open_buckets_free(c); | ||
1218 | bch_btree_cache_free(c); | ||
1219 | bch_journal_free(c); | ||
1220 | |||
1221 | for_each_cache(ca, c, i) | ||
1222 | if (ca) | ||
1223 | kobject_put(&ca->kobj); | ||
1224 | |||
1225 | free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); | ||
1226 | free_pages((unsigned long) c->sort, ilog2(bucket_pages(c))); | ||
1227 | |||
1228 | kfree(c->fill_iter); | ||
1229 | if (c->bio_split) | ||
1230 | bioset_free(c->bio_split); | ||
1231 | if (c->bio_meta) | ||
1232 | mempool_destroy(c->bio_meta); | ||
1233 | if (c->search) | ||
1234 | mempool_destroy(c->search); | ||
1235 | kfree(c->devices); | ||
1236 | |||
1237 | mutex_lock(&bch_register_lock); | ||
1238 | list_del(&c->list); | ||
1239 | mutex_unlock(&bch_register_lock); | ||
1240 | |||
1241 | pr_info("Cache set %pU unregistered", c->sb.set_uuid); | ||
1242 | wake_up(&unregister_wait); | ||
1243 | |||
1244 | closure_debug_destroy(&c->cl); | ||
1245 | kobject_put(&c->kobj); | ||
1246 | } | ||
1247 | |||
1248 | static void cache_set_flush(struct closure *cl) | ||
1249 | { | ||
1250 | struct cache_set *c = container_of(cl, struct cache_set, caching); | ||
1251 | struct btree *b; | ||
1252 | |||
1253 | /* Shut down allocator threads */ | ||
1254 | set_bit(CACHE_SET_STOPPING_2, &c->flags); | ||
1255 | wake_up(&c->alloc_wait); | ||
1256 | |||
1257 | bch_cache_accounting_destroy(&c->accounting); | ||
1258 | |||
1259 | kobject_put(&c->internal); | ||
1260 | kobject_del(&c->kobj); | ||
1261 | |||
1262 | if (!IS_ERR_OR_NULL(c->root)) | ||
1263 | list_add(&c->root->list, &c->btree_cache); | ||
1264 | |||
1265 | /* Should skip this if we're unregistering because of an error */ | ||
1266 | list_for_each_entry(b, &c->btree_cache, list) | ||
1267 | if (btree_node_dirty(b)) | ||
1268 | bch_btree_write(b, true, NULL); | ||
1269 | |||
1270 | closure_return(cl); | ||
1271 | } | ||
1272 | |||
1273 | static void __cache_set_unregister(struct closure *cl) | ||
1274 | { | ||
1275 | struct cache_set *c = container_of(cl, struct cache_set, caching); | ||
1276 | struct cached_dev *dc, *t; | ||
1277 | size_t i; | ||
1278 | |||
1279 | mutex_lock(&bch_register_lock); | ||
1280 | |||
1281 | if (test_bit(CACHE_SET_UNREGISTERING, &c->flags)) | ||
1282 | list_for_each_entry_safe(dc, t, &c->cached_devs, list) | ||
1283 | bch_cached_dev_detach(dc); | ||
1284 | |||
1285 | for (i = 0; i < c->nr_uuids; i++) | ||
1286 | if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i])) | ||
1287 | bcache_device_stop(c->devices[i]); | ||
1288 | |||
1289 | mutex_unlock(&bch_register_lock); | ||
1290 | |||
1291 | continue_at(cl, cache_set_flush, system_wq); | ||
1292 | } | ||
1293 | |||
1294 | void bch_cache_set_stop(struct cache_set *c) | ||
1295 | { | ||
1296 | if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) | ||
1297 | closure_queue(&c->caching); | ||
1298 | } | ||
1299 | |||
1300 | void bch_cache_set_unregister(struct cache_set *c) | ||
1301 | { | ||
1302 | set_bit(CACHE_SET_UNREGISTERING, &c->flags); | ||
1303 | bch_cache_set_stop(c); | ||
1304 | } | ||
1305 | |||
1306 | #define alloc_bucket_pages(gfp, c) \ | ||
1307 | ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) | ||
1308 | |||
1309 | struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) | ||
1310 | { | ||
1311 | int iter_size; | ||
1312 | struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); | ||
1313 | if (!c) | ||
1314 | return NULL; | ||
1315 | |||
1316 | __module_get(THIS_MODULE); | ||
1317 | closure_init(&c->cl, NULL); | ||
1318 | set_closure_fn(&c->cl, cache_set_free, system_wq); | ||
1319 | |||
1320 | closure_init(&c->caching, &c->cl); | ||
1321 | set_closure_fn(&c->caching, __cache_set_unregister, system_wq); | ||
1322 | |||
1323 | /* Maybe create continue_at_noreturn() and use it here? */ | ||
1324 | closure_set_stopped(&c->cl); | ||
1325 | closure_put(&c->cl); | ||
1326 | |||
1327 | kobject_init(&c->kobj, &bch_cache_set_ktype); | ||
1328 | kobject_init(&c->internal, &bch_cache_set_internal_ktype); | ||
1329 | |||
1330 | bch_cache_accounting_init(&c->accounting, &c->cl); | ||
1331 | |||
1332 | memcpy(c->sb.set_uuid, sb->set_uuid, 16); | ||
1333 | c->sb.block_size = sb->block_size; | ||
1334 | c->sb.bucket_size = sb->bucket_size; | ||
1335 | c->sb.nr_in_set = sb->nr_in_set; | ||
1336 | c->sb.last_mount = sb->last_mount; | ||
1337 | c->bucket_bits = ilog2(sb->bucket_size); | ||
1338 | c->block_bits = ilog2(sb->block_size); | ||
1339 | c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); | ||
1340 | |||
1341 | c->btree_pages = c->sb.bucket_size / PAGE_SECTORS; | ||
1342 | if (c->btree_pages > BTREE_MAX_PAGES) | ||
1343 | c->btree_pages = max_t(int, c->btree_pages / 4, | ||
1344 | BTREE_MAX_PAGES); | ||
1345 | |||
1346 | init_waitqueue_head(&c->alloc_wait); | ||
1347 | mutex_init(&c->bucket_lock); | ||
1348 | mutex_init(&c->fill_lock); | ||
1349 | mutex_init(&c->sort_lock); | ||
1350 | spin_lock_init(&c->sort_time_lock); | ||
1351 | closure_init_unlocked(&c->sb_write); | ||
1352 | closure_init_unlocked(&c->uuid_write); | ||
1353 | spin_lock_init(&c->btree_read_time_lock); | ||
1354 | bch_moving_init_cache_set(c); | ||
1355 | |||
1356 | INIT_LIST_HEAD(&c->list); | ||
1357 | INIT_LIST_HEAD(&c->cached_devs); | ||
1358 | INIT_LIST_HEAD(&c->btree_cache); | ||
1359 | INIT_LIST_HEAD(&c->btree_cache_freeable); | ||
1360 | INIT_LIST_HEAD(&c->btree_cache_freed); | ||
1361 | INIT_LIST_HEAD(&c->data_buckets); | ||
1362 | |||
1363 | c->search = mempool_create_slab_pool(32, bch_search_cache); | ||
1364 | if (!c->search) | ||
1365 | goto err; | ||
1366 | |||
1367 | iter_size = (sb->bucket_size / sb->block_size + 1) * | ||
1368 | sizeof(struct btree_iter_set); | ||
1369 | |||
1370 | if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || | ||
1371 | !(c->bio_meta = mempool_create_kmalloc_pool(2, | ||
1372 | sizeof(struct bbio) + sizeof(struct bio_vec) * | ||
1373 | bucket_pages(c))) || | ||
1374 | !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || | ||
1375 | !(c->fill_iter = kmalloc(iter_size, GFP_KERNEL)) || | ||
1376 | !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) || | ||
1377 | !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || | ||
1378 | bch_journal_alloc(c) || | ||
1379 | bch_btree_cache_alloc(c) || | ||
1380 | bch_open_buckets_alloc(c)) | ||
1381 | goto err; | ||
1382 | |||
1383 | c->fill_iter->size = sb->bucket_size / sb->block_size; | ||
1384 | |||
1385 | c->congested_read_threshold_us = 2000; | ||
1386 | c->congested_write_threshold_us = 20000; | ||
1387 | c->error_limit = 8 << IO_ERROR_SHIFT; | ||
1388 | |||
1389 | return c; | ||
1390 | err: | ||
1391 | bch_cache_set_unregister(c); | ||
1392 | return NULL; | ||
1393 | } | ||
1394 | |||
1395 | static void run_cache_set(struct cache_set *c) | ||
1396 | { | ||
1397 | const char *err = "cannot allocate memory"; | ||
1398 | struct cached_dev *dc, *t; | ||
1399 | struct cache *ca; | ||
1400 | unsigned i; | ||
1401 | |||
1402 | struct btree_op op; | ||
1403 | bch_btree_op_init_stack(&op); | ||
1404 | op.lock = SHRT_MAX; | ||
1405 | |||
1406 | for_each_cache(ca, c, i) | ||
1407 | c->nbuckets += ca->sb.nbuckets; | ||
1408 | |||
1409 | if (CACHE_SYNC(&c->sb)) { | ||
1410 | LIST_HEAD(journal); | ||
1411 | struct bkey *k; | ||
1412 | struct jset *j; | ||
1413 | |||
1414 | err = "cannot allocate memory for journal"; | ||
1415 | if (bch_journal_read(c, &journal, &op)) | ||
1416 | goto err; | ||
1417 | |||
1418 | pr_debug("btree_journal_read() done"); | ||
1419 | |||
1420 | err = "no journal entries found"; | ||
1421 | if (list_empty(&journal)) | ||
1422 | goto err; | ||
1423 | |||
1424 | j = &list_entry(journal.prev, struct journal_replay, list)->j; | ||
1425 | |||
1426 | err = "IO error reading priorities"; | ||
1427 | for_each_cache(ca, c, i) | ||
1428 | prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); | ||
1429 | |||
1430 | /* | ||
1431 | * If prio_read() fails it'll call cache_set_error and we'll | ||
1432 | * tear everything down right away, but if we perhaps checked | ||
1433 | * sooner we could avoid journal replay. | ||
1434 | */ | ||
1435 | |||
1436 | k = &j->btree_root; | ||
1437 | |||
1438 | err = "bad btree root"; | ||
1439 | if (__bch_ptr_invalid(c, j->btree_level + 1, k)) | ||
1440 | goto err; | ||
1441 | |||
1442 | err = "error reading btree root"; | ||
1443 | c->root = bch_btree_node_get(c, k, j->btree_level, &op); | ||
1444 | if (IS_ERR_OR_NULL(c->root)) | ||
1445 | goto err; | ||
1446 | |||
1447 | list_del_init(&c->root->list); | ||
1448 | rw_unlock(true, c->root); | ||
1449 | |||
1450 | err = uuid_read(c, j, &op.cl); | ||
1451 | if (err) | ||
1452 | goto err; | ||
1453 | |||
1454 | err = "error in recovery"; | ||
1455 | if (bch_btree_check(c, &op)) | ||
1456 | goto err; | ||
1457 | |||
1458 | bch_journal_mark(c, &journal); | ||
1459 | bch_btree_gc_finish(c); | ||
1460 | pr_debug("btree_check() done"); | ||
1461 | |||
1462 | /* | ||
1463 | * bcache_journal_next() can't happen sooner, or | ||
1464 | * btree_gc_finish() will give spurious errors about last_gc > | ||
1465 | * gc_gen - this is a hack but oh well. | ||
1466 | */ | ||
1467 | bch_journal_next(&c->journal); | ||
1468 | |||
1469 | for_each_cache(ca, c, i) | ||
1470 | closure_call(&ca->alloc, bch_allocator_thread, | ||
1471 | system_wq, &c->cl); | ||
1472 | |||
1473 | /* | ||
1474 | * First place it's safe to allocate: btree_check() and | ||
1475 | * btree_gc_finish() have to run before we have buckets to | ||
1476 | * allocate, and bch_bucket_alloc_set() might cause a journal | ||
1477 | * entry to be written so bcache_journal_next() has to be called | ||
1478 | * first. | ||
1479 | * | ||
1480 | * If the uuids were in the old format we have to rewrite them | ||
1481 | * before the next journal entry is written: | ||
1482 | */ | ||
1483 | if (j->version < BCACHE_JSET_VERSION_UUID) | ||
1484 | __uuid_write(c); | ||
1485 | |||
1486 | bch_journal_replay(c, &journal, &op); | ||
1487 | } else { | ||
1488 | pr_notice("invalidating existing data"); | ||
1489 | /* Don't want invalidate_buckets() to queue a gc yet */ | ||
1490 | closure_lock(&c->gc, NULL); | ||
1491 | |||
1492 | for_each_cache(ca, c, i) { | ||
1493 | unsigned j; | ||
1494 | |||
1495 | ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, | ||
1496 | 2, SB_JOURNAL_BUCKETS); | ||
1497 | |||
1498 | for (j = 0; j < ca->sb.keys; j++) | ||
1499 | ca->sb.d[j] = ca->sb.first_bucket + j; | ||
1500 | } | ||
1501 | |||
1502 | bch_btree_gc_finish(c); | ||
1503 | |||
1504 | for_each_cache(ca, c, i) | ||
1505 | closure_call(&ca->alloc, bch_allocator_thread, | ||
1506 | ca->alloc_workqueue, &c->cl); | ||
1507 | |||
1508 | mutex_lock(&c->bucket_lock); | ||
1509 | for_each_cache(ca, c, i) | ||
1510 | bch_prio_write(ca); | ||
1511 | mutex_unlock(&c->bucket_lock); | ||
1512 | |||
1513 | wake_up(&c->alloc_wait); | ||
1514 | |||
1515 | err = "cannot allocate new UUID bucket"; | ||
1516 | if (__uuid_write(c)) | ||
1517 | goto err_unlock_gc; | ||
1518 | |||
1519 | err = "cannot allocate new btree root"; | ||
1520 | c->root = bch_btree_node_alloc(c, 0, &op.cl); | ||
1521 | if (IS_ERR_OR_NULL(c->root)) | ||
1522 | goto err_unlock_gc; | ||
1523 | |||
1524 | bkey_copy_key(&c->root->key, &MAX_KEY); | ||
1525 | bch_btree_write(c->root, true, &op); | ||
1526 | |||
1527 | bch_btree_set_root(c->root); | ||
1528 | rw_unlock(true, c->root); | ||
1529 | |||
1530 | /* | ||
1531 | * We don't want to write the first journal entry until | ||
1532 | * everything is set up - fortunately journal entries won't be | ||
1533 | * written until the SET_CACHE_SYNC() here: | ||
1534 | */ | ||
1535 | SET_CACHE_SYNC(&c->sb, true); | ||
1536 | |||
1537 | bch_journal_next(&c->journal); | ||
1538 | bch_journal_meta(c, &op.cl); | ||
1539 | |||
1540 | /* Unlock */ | ||
1541 | closure_set_stopped(&c->gc.cl); | ||
1542 | closure_put(&c->gc.cl); | ||
1543 | } | ||
1544 | |||
1545 | closure_sync(&op.cl); | ||
1546 | c->sb.last_mount = get_seconds(); | ||
1547 | bcache_write_super(c); | ||
1548 | |||
1549 | list_for_each_entry_safe(dc, t, &uncached_devices, list) | ||
1550 | bch_cached_dev_attach(dc, c); | ||
1551 | |||
1552 | flash_devs_run(c); | ||
1553 | |||
1554 | return; | ||
1555 | err_unlock_gc: | ||
1556 | closure_set_stopped(&c->gc.cl); | ||
1557 | closure_put(&c->gc.cl); | ||
1558 | err: | ||
1559 | closure_sync(&op.cl); | ||
1560 | /* XXX: test this, it's broken */ | ||
1561 | bch_cache_set_error(c, err); | ||
1562 | } | ||
1563 | |||
1564 | static bool can_attach_cache(struct cache *ca, struct cache_set *c) | ||
1565 | { | ||
1566 | return ca->sb.block_size == c->sb.block_size && | ||
1567 | ca->sb.bucket_size == c->sb.block_size && | ||
1568 | ca->sb.nr_in_set == c->sb.nr_in_set; | ||
1569 | } | ||
1570 | |||
1571 | static const char *register_cache_set(struct cache *ca) | ||
1572 | { | ||
1573 | char buf[12]; | ||
1574 | const char *err = "cannot allocate memory"; | ||
1575 | struct cache_set *c; | ||
1576 | |||
1577 | list_for_each_entry(c, &bch_cache_sets, list) | ||
1578 | if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { | ||
1579 | if (c->cache[ca->sb.nr_this_dev]) | ||
1580 | return "duplicate cache set member"; | ||
1581 | |||
1582 | if (!can_attach_cache(ca, c)) | ||
1583 | return "cache sb does not match set"; | ||
1584 | |||
1585 | if (!CACHE_SYNC(&ca->sb)) | ||
1586 | SET_CACHE_SYNC(&c->sb, false); | ||
1587 | |||
1588 | goto found; | ||
1589 | } | ||
1590 | |||
1591 | c = bch_cache_set_alloc(&ca->sb); | ||
1592 | if (!c) | ||
1593 | return err; | ||
1594 | |||
1595 | err = "error creating kobject"; | ||
1596 | if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || | ||
1597 | kobject_add(&c->internal, &c->kobj, "internal")) | ||
1598 | goto err; | ||
1599 | |||
1600 | if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) | ||
1601 | goto err; | ||
1602 | |||
1603 | bch_debug_init_cache_set(c); | ||
1604 | |||
1605 | list_add(&c->list, &bch_cache_sets); | ||
1606 | found: | ||
1607 | sprintf(buf, "cache%i", ca->sb.nr_this_dev); | ||
1608 | if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || | ||
1609 | sysfs_create_link(&c->kobj, &ca->kobj, buf)) | ||
1610 | goto err; | ||
1611 | |||
1612 | if (ca->sb.seq > c->sb.seq) { | ||
1613 | c->sb.version = ca->sb.version; | ||
1614 | memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); | ||
1615 | c->sb.flags = ca->sb.flags; | ||
1616 | c->sb.seq = ca->sb.seq; | ||
1617 | pr_debug("set version = %llu", c->sb.version); | ||
1618 | } | ||
1619 | |||
1620 | ca->set = c; | ||
1621 | ca->set->cache[ca->sb.nr_this_dev] = ca; | ||
1622 | c->cache_by_alloc[c->caches_loaded++] = ca; | ||
1623 | |||
1624 | if (c->caches_loaded == c->sb.nr_in_set) | ||
1625 | run_cache_set(c); | ||
1626 | |||
1627 | return NULL; | ||
1628 | err: | ||
1629 | bch_cache_set_unregister(c); | ||
1630 | return err; | ||
1631 | } | ||
1632 | |||
1633 | /* Cache device */ | ||
1634 | |||
1635 | void bch_cache_release(struct kobject *kobj) | ||
1636 | { | ||
1637 | struct cache *ca = container_of(kobj, struct cache, kobj); | ||
1638 | |||
1639 | if (ca->set) | ||
1640 | ca->set->cache[ca->sb.nr_this_dev] = NULL; | ||
1641 | |||
1642 | bch_cache_allocator_exit(ca); | ||
1643 | |||
1644 | bio_split_pool_free(&ca->bio_split_hook); | ||
1645 | |||
1646 | if (ca->alloc_workqueue) | ||
1647 | destroy_workqueue(ca->alloc_workqueue); | ||
1648 | |||
1649 | free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); | ||
1650 | kfree(ca->prio_buckets); | ||
1651 | vfree(ca->buckets); | ||
1652 | |||
1653 | free_heap(&ca->heap); | ||
1654 | free_fifo(&ca->unused); | ||
1655 | free_fifo(&ca->free_inc); | ||
1656 | free_fifo(&ca->free); | ||
1657 | |||
1658 | if (ca->sb_bio.bi_inline_vecs[0].bv_page) | ||
1659 | put_page(ca->sb_bio.bi_io_vec[0].bv_page); | ||
1660 | |||
1661 | if (!IS_ERR_OR_NULL(ca->bdev)) { | ||
1662 | blk_sync_queue(bdev_get_queue(ca->bdev)); | ||
1663 | blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); | ||
1664 | } | ||
1665 | |||
1666 | kfree(ca); | ||
1667 | module_put(THIS_MODULE); | ||
1668 | } | ||
1669 | |||
1670 | static int cache_alloc(struct cache_sb *sb, struct cache *ca) | ||
1671 | { | ||
1672 | size_t free; | ||
1673 | struct bucket *b; | ||
1674 | |||
1675 | if (!ca) | ||
1676 | return -ENOMEM; | ||
1677 | |||
1678 | __module_get(THIS_MODULE); | ||
1679 | kobject_init(&ca->kobj, &bch_cache_ktype); | ||
1680 | |||
1681 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); | ||
1682 | |||
1683 | INIT_LIST_HEAD(&ca->discards); | ||
1684 | |||
1685 | bio_init(&ca->sb_bio); | ||
1686 | ca->sb_bio.bi_max_vecs = 1; | ||
1687 | ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; | ||
1688 | |||
1689 | bio_init(&ca->journal.bio); | ||
1690 | ca->journal.bio.bi_max_vecs = 8; | ||
1691 | ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; | ||
1692 | |||
1693 | free = roundup_pow_of_two(ca->sb.nbuckets) >> 9; | ||
1694 | free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2); | ||
1695 | |||
1696 | if (!init_fifo(&ca->free, free, GFP_KERNEL) || | ||
1697 | !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || | ||
1698 | !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || | ||
1699 | !init_heap(&ca->heap, free << 3, GFP_KERNEL) || | ||
1700 | !(ca->buckets = vmalloc(sizeof(struct bucket) * | ||
1701 | ca->sb.nbuckets)) || | ||
1702 | !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * | ||
1703 | 2, GFP_KERNEL)) || | ||
1704 | !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || | ||
1705 | !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) || | ||
1706 | bio_split_pool_init(&ca->bio_split_hook)) | ||
1707 | goto err; | ||
1708 | |||
1709 | ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); | ||
1710 | |||
1711 | memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket)); | ||
1712 | for_each_bucket(b, ca) | ||
1713 | atomic_set(&b->pin, 0); | ||
1714 | |||
1715 | if (bch_cache_allocator_init(ca)) | ||
1716 | goto err; | ||
1717 | |||
1718 | return 0; | ||
1719 | err: | ||
1720 | kobject_put(&ca->kobj); | ||
1721 | return -ENOMEM; | ||
1722 | } | ||
1723 | |||
1724 | static const char *register_cache(struct cache_sb *sb, struct page *sb_page, | ||
1725 | struct block_device *bdev, struct cache *ca) | ||
1726 | { | ||
1727 | char name[BDEVNAME_SIZE]; | ||
1728 | const char *err = "cannot allocate memory"; | ||
1729 | |||
1730 | if (cache_alloc(sb, ca) != 0) | ||
1731 | return err; | ||
1732 | |||
1733 | ca->sb_bio.bi_io_vec[0].bv_page = sb_page; | ||
1734 | ca->bdev = bdev; | ||
1735 | ca->bdev->bd_holder = ca; | ||
1736 | |||
1737 | if (blk_queue_discard(bdev_get_queue(ca->bdev))) | ||
1738 | ca->discard = CACHE_DISCARD(&ca->sb); | ||
1739 | |||
1740 | err = "error creating kobject"; | ||
1741 | if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) | ||
1742 | goto err; | ||
1743 | |||
1744 | err = register_cache_set(ca); | ||
1745 | if (err) | ||
1746 | goto err; | ||
1747 | |||
1748 | pr_info("registered cache device %s", bdevname(bdev, name)); | ||
1749 | |||
1750 | return NULL; | ||
1751 | err: | ||
1752 | kobject_put(&ca->kobj); | ||
1753 | pr_info("error opening %s: %s", bdevname(bdev, name), err); | ||
1754 | /* Return NULL instead of an error because kobject_put() cleans | ||
1755 | * everything up | ||
1756 | */ | ||
1757 | return NULL; | ||
1758 | } | ||
1759 | |||
1760 | /* Global interfaces/init */ | ||
1761 | |||
1762 | static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, | ||
1763 | const char *, size_t); | ||
1764 | |||
1765 | kobj_attribute_write(register, register_bcache); | ||
1766 | kobj_attribute_write(register_quiet, register_bcache); | ||
1767 | |||
1768 | static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, | ||
1769 | const char *buffer, size_t size) | ||
1770 | { | ||
1771 | ssize_t ret = size; | ||
1772 | const char *err = "cannot allocate memory"; | ||
1773 | char *path = NULL; | ||
1774 | struct cache_sb *sb = NULL; | ||
1775 | struct block_device *bdev = NULL; | ||
1776 | struct page *sb_page = NULL; | ||
1777 | |||
1778 | if (!try_module_get(THIS_MODULE)) | ||
1779 | return -EBUSY; | ||
1780 | |||
1781 | mutex_lock(&bch_register_lock); | ||
1782 | |||
1783 | if (!(path = kstrndup(buffer, size, GFP_KERNEL)) || | ||
1784 | !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL))) | ||
1785 | goto err; | ||
1786 | |||
1787 | err = "failed to open device"; | ||
1788 | bdev = blkdev_get_by_path(strim(path), | ||
1789 | FMODE_READ|FMODE_WRITE|FMODE_EXCL, | ||
1790 | sb); | ||
1791 | if (bdev == ERR_PTR(-EBUSY)) | ||
1792 | err = "device busy"; | ||
1793 | |||
1794 | if (IS_ERR(bdev) || | ||
1795 | set_blocksize(bdev, 4096)) | ||
1796 | goto err; | ||
1797 | |||
1798 | err = read_super(sb, bdev, &sb_page); | ||
1799 | if (err) | ||
1800 | goto err_close; | ||
1801 | |||
1802 | if (sb->version == CACHE_BACKING_DEV) { | ||
1803 | struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); | ||
1804 | |||
1805 | err = register_bdev(sb, sb_page, bdev, dc); | ||
1806 | } else { | ||
1807 | struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); | ||
1808 | |||
1809 | err = register_cache(sb, sb_page, bdev, ca); | ||
1810 | } | ||
1811 | |||
1812 | if (err) { | ||
1813 | /* register_(bdev|cache) will only return an error if they | ||
1814 | * didn't get far enough to create the kobject - if they did, | ||
1815 | * the kobject destructor will do this cleanup. | ||
1816 | */ | ||
1817 | put_page(sb_page); | ||
1818 | err_close: | ||
1819 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); | ||
1820 | err: | ||
1821 | if (attr != &ksysfs_register_quiet) | ||
1822 | pr_info("error opening %s: %s", path, err); | ||
1823 | ret = -EINVAL; | ||
1824 | } | ||
1825 | |||
1826 | kfree(sb); | ||
1827 | kfree(path); | ||
1828 | mutex_unlock(&bch_register_lock); | ||
1829 | module_put(THIS_MODULE); | ||
1830 | return ret; | ||
1831 | } | ||
1832 | |||
1833 | static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) | ||
1834 | { | ||
1835 | if (code == SYS_DOWN || | ||
1836 | code == SYS_HALT || | ||
1837 | code == SYS_POWER_OFF) { | ||
1838 | DEFINE_WAIT(wait); | ||
1839 | unsigned long start = jiffies; | ||
1840 | bool stopped = false; | ||
1841 | |||
1842 | struct cache_set *c, *tc; | ||
1843 | struct cached_dev *dc, *tdc; | ||
1844 | |||
1845 | mutex_lock(&bch_register_lock); | ||
1846 | |||
1847 | if (list_empty(&bch_cache_sets) && | ||
1848 | list_empty(&uncached_devices)) | ||
1849 | goto out; | ||
1850 | |||
1851 | pr_info("Stopping all devices:"); | ||
1852 | |||
1853 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) | ||
1854 | bch_cache_set_stop(c); | ||
1855 | |||
1856 | list_for_each_entry_safe(dc, tdc, &uncached_devices, list) | ||
1857 | bcache_device_stop(&dc->disk); | ||
1858 | |||
1859 | /* What's a condition variable? */ | ||
1860 | while (1) { | ||
1861 | long timeout = start + 2 * HZ - jiffies; | ||
1862 | |||
1863 | stopped = list_empty(&bch_cache_sets) && | ||
1864 | list_empty(&uncached_devices); | ||
1865 | |||
1866 | if (timeout < 0 || stopped) | ||
1867 | break; | ||
1868 | |||
1869 | prepare_to_wait(&unregister_wait, &wait, | ||
1870 | TASK_UNINTERRUPTIBLE); | ||
1871 | |||
1872 | mutex_unlock(&bch_register_lock); | ||
1873 | schedule_timeout(timeout); | ||
1874 | mutex_lock(&bch_register_lock); | ||
1875 | } | ||
1876 | |||
1877 | finish_wait(&unregister_wait, &wait); | ||
1878 | |||
1879 | if (stopped) | ||
1880 | pr_info("All devices stopped"); | ||
1881 | else | ||
1882 | pr_notice("Timeout waiting for devices to be closed"); | ||
1883 | out: | ||
1884 | mutex_unlock(&bch_register_lock); | ||
1885 | } | ||
1886 | |||
1887 | return NOTIFY_DONE; | ||
1888 | } | ||
1889 | |||
1890 | static struct notifier_block reboot = { | ||
1891 | .notifier_call = bcache_reboot, | ||
1892 | .priority = INT_MAX, /* before any real devices */ | ||
1893 | }; | ||
1894 | |||
1895 | static void bcache_exit(void) | ||
1896 | { | ||
1897 | bch_debug_exit(); | ||
1898 | bch_writeback_exit(); | ||
1899 | bch_request_exit(); | ||
1900 | bch_btree_exit(); | ||
1901 | if (bcache_kobj) | ||
1902 | kobject_put(bcache_kobj); | ||
1903 | if (bcache_wq) | ||
1904 | destroy_workqueue(bcache_wq); | ||
1905 | unregister_blkdev(bcache_major, "bcache"); | ||
1906 | unregister_reboot_notifier(&reboot); | ||
1907 | } | ||
1908 | |||
1909 | static int __init bcache_init(void) | ||
1910 | { | ||
1911 | static const struct attribute *files[] = { | ||
1912 | &ksysfs_register.attr, | ||
1913 | &ksysfs_register_quiet.attr, | ||
1914 | NULL | ||
1915 | }; | ||
1916 | |||
1917 | mutex_init(&bch_register_lock); | ||
1918 | init_waitqueue_head(&unregister_wait); | ||
1919 | register_reboot_notifier(&reboot); | ||
1920 | |||
1921 | bcache_major = register_blkdev(0, "bcache"); | ||
1922 | if (bcache_major < 0) | ||
1923 | return bcache_major; | ||
1924 | |||
1925 | if (!(bcache_wq = create_workqueue("bcache")) || | ||
1926 | !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || | ||
1927 | sysfs_create_files(bcache_kobj, files) || | ||
1928 | bch_btree_init() || | ||
1929 | bch_request_init() || | ||
1930 | bch_writeback_init() || | ||
1931 | bch_debug_init(bcache_kobj)) | ||
1932 | goto err; | ||
1933 | |||
1934 | return 0; | ||
1935 | err: | ||
1936 | bcache_exit(); | ||
1937 | return -ENOMEM; | ||
1938 | } | ||
1939 | |||
1940 | module_exit(bcache_exit); | ||
1941 | module_init(bcache_init); | ||