diff options
Diffstat (limited to 'drivers/md/bcache/writeback.c')
-rw-r--r-- | drivers/md/bcache/writeback.c | 455 |
1 files changed, 223 insertions, 232 deletions
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index ba3ee48320f2..99053b1251be 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c | |||
@@ -11,18 +11,11 @@ | |||
11 | #include "debug.h" | 11 | #include "debug.h" |
12 | #include "writeback.h" | 12 | #include "writeback.h" |
13 | 13 | ||
14 | #include <linux/delay.h> | ||
15 | #include <linux/freezer.h> | ||
16 | #include <linux/kthread.h> | ||
14 | #include <trace/events/bcache.h> | 17 | #include <trace/events/bcache.h> |
15 | 18 | ||
16 | static struct workqueue_struct *dirty_wq; | ||
17 | |||
18 | static void read_dirty(struct closure *); | ||
19 | |||
20 | struct dirty_io { | ||
21 | struct closure cl; | ||
22 | struct cached_dev *dc; | ||
23 | struct bio bio; | ||
24 | }; | ||
25 | |||
26 | /* Rate limiting */ | 19 | /* Rate limiting */ |
27 | 20 | ||
28 | static void __update_writeback_rate(struct cached_dev *dc) | 21 | static void __update_writeback_rate(struct cached_dev *dc) |
@@ -72,9 +65,6 @@ out: | |||
72 | dc->writeback_rate_derivative = derivative; | 65 | dc->writeback_rate_derivative = derivative; |
73 | dc->writeback_rate_change = change; | 66 | dc->writeback_rate_change = change; |
74 | dc->writeback_rate_target = target; | 67 | dc->writeback_rate_target = target; |
75 | |||
76 | schedule_delayed_work(&dc->writeback_rate_update, | ||
77 | dc->writeback_rate_update_seconds * HZ); | ||
78 | } | 68 | } |
79 | 69 | ||
80 | static void update_writeback_rate(struct work_struct *work) | 70 | static void update_writeback_rate(struct work_struct *work) |
@@ -90,13 +80,16 @@ static void update_writeback_rate(struct work_struct *work) | |||
90 | __update_writeback_rate(dc); | 80 | __update_writeback_rate(dc); |
91 | 81 | ||
92 | up_read(&dc->writeback_lock); | 82 | up_read(&dc->writeback_lock); |
83 | |||
84 | schedule_delayed_work(&dc->writeback_rate_update, | ||
85 | dc->writeback_rate_update_seconds * HZ); | ||
93 | } | 86 | } |
94 | 87 | ||
95 | static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) | 88 | static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) |
96 | { | 89 | { |
97 | uint64_t ret; | 90 | uint64_t ret; |
98 | 91 | ||
99 | if (atomic_read(&dc->disk.detaching) || | 92 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
100 | !dc->writeback_percent) | 93 | !dc->writeback_percent) |
101 | return 0; | 94 | return 0; |
102 | 95 | ||
@@ -105,37 +98,11 @@ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) | |||
105 | return min_t(uint64_t, ret, HZ); | 98 | return min_t(uint64_t, ret, HZ); |
106 | } | 99 | } |
107 | 100 | ||
108 | /* Background writeback */ | 101 | struct dirty_io { |
109 | 102 | struct closure cl; | |
110 | static bool dirty_pred(struct keybuf *buf, struct bkey *k) | 103 | struct cached_dev *dc; |
111 | { | 104 | struct bio bio; |
112 | return KEY_DIRTY(k); | 105 | }; |
113 | } | ||
114 | |||
115 | static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k) | ||
116 | { | ||
117 | uint64_t stripe; | ||
118 | unsigned nr_sectors = KEY_SIZE(k); | ||
119 | struct cached_dev *dc = container_of(buf, struct cached_dev, | ||
120 | writeback_keys); | ||
121 | unsigned stripe_size = 1 << dc->disk.stripe_size_bits; | ||
122 | |||
123 | if (!KEY_DIRTY(k)) | ||
124 | return false; | ||
125 | |||
126 | stripe = KEY_START(k) >> dc->disk.stripe_size_bits; | ||
127 | while (1) { | ||
128 | if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) != | ||
129 | stripe_size) | ||
130 | return false; | ||
131 | |||
132 | if (nr_sectors <= stripe_size) | ||
133 | return true; | ||
134 | |||
135 | nr_sectors -= stripe_size; | ||
136 | stripe++; | ||
137 | } | ||
138 | } | ||
139 | 106 | ||
140 | static void dirty_init(struct keybuf_key *w) | 107 | static void dirty_init(struct keybuf_key *w) |
141 | { | 108 | { |
@@ -153,131 +120,6 @@ static void dirty_init(struct keybuf_key *w) | |||
153 | bch_bio_map(bio, NULL); | 120 | bch_bio_map(bio, NULL); |
154 | } | 121 | } |
155 | 122 | ||
156 | static void refill_dirty(struct closure *cl) | ||
157 | { | ||
158 | struct cached_dev *dc = container_of(cl, struct cached_dev, | ||
159 | writeback.cl); | ||
160 | struct keybuf *buf = &dc->writeback_keys; | ||
161 | bool searched_from_start = false; | ||
162 | struct bkey end = MAX_KEY; | ||
163 | SET_KEY_INODE(&end, dc->disk.id); | ||
164 | |||
165 | if (!atomic_read(&dc->disk.detaching) && | ||
166 | !dc->writeback_running) | ||
167 | closure_return(cl); | ||
168 | |||
169 | down_write(&dc->writeback_lock); | ||
170 | |||
171 | if (!atomic_read(&dc->has_dirty)) { | ||
172 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); | ||
173 | bch_write_bdev_super(dc, NULL); | ||
174 | |||
175 | up_write(&dc->writeback_lock); | ||
176 | closure_return(cl); | ||
177 | } | ||
178 | |||
179 | if (bkey_cmp(&buf->last_scanned, &end) >= 0) { | ||
180 | buf->last_scanned = KEY(dc->disk.id, 0, 0); | ||
181 | searched_from_start = true; | ||
182 | } | ||
183 | |||
184 | if (dc->partial_stripes_expensive) { | ||
185 | uint64_t i; | ||
186 | |||
187 | for (i = 0; i < dc->disk.nr_stripes; i++) | ||
188 | if (atomic_read(dc->disk.stripe_sectors_dirty + i) == | ||
189 | 1 << dc->disk.stripe_size_bits) | ||
190 | goto full_stripes; | ||
191 | |||
192 | goto normal_refill; | ||
193 | full_stripes: | ||
194 | bch_refill_keybuf(dc->disk.c, buf, &end, | ||
195 | dirty_full_stripe_pred); | ||
196 | } else { | ||
197 | normal_refill: | ||
198 | bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); | ||
199 | } | ||
200 | |||
201 | if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) { | ||
202 | /* Searched the entire btree - delay awhile */ | ||
203 | |||
204 | if (RB_EMPTY_ROOT(&buf->keys)) { | ||
205 | atomic_set(&dc->has_dirty, 0); | ||
206 | cached_dev_put(dc); | ||
207 | } | ||
208 | |||
209 | if (!atomic_read(&dc->disk.detaching)) | ||
210 | closure_delay(&dc->writeback, dc->writeback_delay * HZ); | ||
211 | } | ||
212 | |||
213 | up_write(&dc->writeback_lock); | ||
214 | |||
215 | bch_ratelimit_reset(&dc->writeback_rate); | ||
216 | |||
217 | /* Punt to workqueue only so we don't recurse and blow the stack */ | ||
218 | continue_at(cl, read_dirty, dirty_wq); | ||
219 | } | ||
220 | |||
221 | void bch_writeback_queue(struct cached_dev *dc) | ||
222 | { | ||
223 | if (closure_trylock(&dc->writeback.cl, &dc->disk.cl)) { | ||
224 | if (!atomic_read(&dc->disk.detaching)) | ||
225 | closure_delay(&dc->writeback, dc->writeback_delay * HZ); | ||
226 | |||
227 | continue_at(&dc->writeback.cl, refill_dirty, dirty_wq); | ||
228 | } | ||
229 | } | ||
230 | |||
231 | void bch_writeback_add(struct cached_dev *dc) | ||
232 | { | ||
233 | if (!atomic_read(&dc->has_dirty) && | ||
234 | !atomic_xchg(&dc->has_dirty, 1)) { | ||
235 | atomic_inc(&dc->count); | ||
236 | |||
237 | if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { | ||
238 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); | ||
239 | /* XXX: should do this synchronously */ | ||
240 | bch_write_bdev_super(dc, NULL); | ||
241 | } | ||
242 | |||
243 | bch_writeback_queue(dc); | ||
244 | |||
245 | if (dc->writeback_percent) | ||
246 | schedule_delayed_work(&dc->writeback_rate_update, | ||
247 | dc->writeback_rate_update_seconds * HZ); | ||
248 | } | ||
249 | } | ||
250 | |||
251 | void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, | ||
252 | uint64_t offset, int nr_sectors) | ||
253 | { | ||
254 | struct bcache_device *d = c->devices[inode]; | ||
255 | unsigned stripe_size, stripe_offset; | ||
256 | uint64_t stripe; | ||
257 | |||
258 | if (!d) | ||
259 | return; | ||
260 | |||
261 | stripe_size = 1 << d->stripe_size_bits; | ||
262 | stripe = offset >> d->stripe_size_bits; | ||
263 | stripe_offset = offset & (stripe_size - 1); | ||
264 | |||
265 | while (nr_sectors) { | ||
266 | int s = min_t(unsigned, abs(nr_sectors), | ||
267 | stripe_size - stripe_offset); | ||
268 | |||
269 | if (nr_sectors < 0) | ||
270 | s = -s; | ||
271 | |||
272 | atomic_add(s, d->stripe_sectors_dirty + stripe); | ||
273 | nr_sectors -= s; | ||
274 | stripe_offset = 0; | ||
275 | stripe++; | ||
276 | } | ||
277 | } | ||
278 | |||
279 | /* Background writeback - IO loop */ | ||
280 | |||
281 | static void dirty_io_destructor(struct closure *cl) | 123 | static void dirty_io_destructor(struct closure *cl) |
282 | { | 124 | { |
283 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); | 125 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); |
@@ -297,26 +139,25 @@ static void write_dirty_finish(struct closure *cl) | |||
297 | 139 | ||
298 | /* This is kind of a dumb way of signalling errors. */ | 140 | /* This is kind of a dumb way of signalling errors. */ |
299 | if (KEY_DIRTY(&w->key)) { | 141 | if (KEY_DIRTY(&w->key)) { |
142 | int ret; | ||
300 | unsigned i; | 143 | unsigned i; |
301 | struct btree_op op; | 144 | struct keylist keys; |
302 | bch_btree_op_init_stack(&op); | ||
303 | 145 | ||
304 | op.type = BTREE_REPLACE; | 146 | bch_keylist_init(&keys); |
305 | bkey_copy(&op.replace, &w->key); | ||
306 | 147 | ||
307 | SET_KEY_DIRTY(&w->key, false); | 148 | bkey_copy(keys.top, &w->key); |
308 | bch_keylist_add(&op.keys, &w->key); | 149 | SET_KEY_DIRTY(keys.top, false); |
150 | bch_keylist_push(&keys); | ||
309 | 151 | ||
310 | for (i = 0; i < KEY_PTRS(&w->key); i++) | 152 | for (i = 0; i < KEY_PTRS(&w->key); i++) |
311 | atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); | 153 | atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); |
312 | 154 | ||
313 | bch_btree_insert(&op, dc->disk.c); | 155 | ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key); |
314 | closure_sync(&op.cl); | ||
315 | 156 | ||
316 | if (op.insert_collision) | 157 | if (ret) |
317 | trace_bcache_writeback_collision(&w->key); | 158 | trace_bcache_writeback_collision(&w->key); |
318 | 159 | ||
319 | atomic_long_inc(op.insert_collision | 160 | atomic_long_inc(ret |
320 | ? &dc->disk.c->writeback_keys_failed | 161 | ? &dc->disk.c->writeback_keys_failed |
321 | : &dc->disk.c->writeback_keys_done); | 162 | : &dc->disk.c->writeback_keys_done); |
322 | } | 163 | } |
@@ -374,30 +215,33 @@ static void read_dirty_submit(struct closure *cl) | |||
374 | continue_at(cl, write_dirty, system_wq); | 215 | continue_at(cl, write_dirty, system_wq); |
375 | } | 216 | } |
376 | 217 | ||
377 | static void read_dirty(struct closure *cl) | 218 | static void read_dirty(struct cached_dev *dc) |
378 | { | 219 | { |
379 | struct cached_dev *dc = container_of(cl, struct cached_dev, | 220 | unsigned delay = 0; |
380 | writeback.cl); | ||
381 | unsigned delay = writeback_delay(dc, 0); | ||
382 | struct keybuf_key *w; | 221 | struct keybuf_key *w; |
383 | struct dirty_io *io; | 222 | struct dirty_io *io; |
223 | struct closure cl; | ||
224 | |||
225 | closure_init_stack(&cl); | ||
384 | 226 | ||
385 | /* | 227 | /* |
386 | * XXX: if we error, background writeback just spins. Should use some | 228 | * XXX: if we error, background writeback just spins. Should use some |
387 | * mempools. | 229 | * mempools. |
388 | */ | 230 | */ |
389 | 231 | ||
390 | while (1) { | 232 | while (!kthread_should_stop()) { |
233 | try_to_freeze(); | ||
234 | |||
391 | w = bch_keybuf_next(&dc->writeback_keys); | 235 | w = bch_keybuf_next(&dc->writeback_keys); |
392 | if (!w) | 236 | if (!w) |
393 | break; | 237 | break; |
394 | 238 | ||
395 | BUG_ON(ptr_stale(dc->disk.c, &w->key, 0)); | 239 | BUG_ON(ptr_stale(dc->disk.c, &w->key, 0)); |
396 | 240 | ||
397 | if (delay > 0 && | 241 | if (KEY_START(&w->key) != dc->last_read || |
398 | (KEY_START(&w->key) != dc->last_read || | 242 | jiffies_to_msecs(delay) > 50) |
399 | jiffies_to_msecs(delay) > 50)) | 243 | while (!kthread_should_stop() && delay) |
400 | delay = schedule_timeout_uninterruptible(delay); | 244 | delay = schedule_timeout_interruptible(delay); |
401 | 245 | ||
402 | dc->last_read = KEY_OFFSET(&w->key); | 246 | dc->last_read = KEY_OFFSET(&w->key); |
403 | 247 | ||
@@ -423,7 +267,7 @@ static void read_dirty(struct closure *cl) | |||
423 | trace_bcache_writeback(&w->key); | 267 | trace_bcache_writeback(&w->key); |
424 | 268 | ||
425 | down(&dc->in_flight); | 269 | down(&dc->in_flight); |
426 | closure_call(&io->cl, read_dirty_submit, NULL, cl); | 270 | closure_call(&io->cl, read_dirty_submit, NULL, &cl); |
427 | 271 | ||
428 | delay = writeback_delay(dc, KEY_SIZE(&w->key)); | 272 | delay = writeback_delay(dc, KEY_SIZE(&w->key)); |
429 | } | 273 | } |
@@ -439,52 +283,205 @@ err: | |||
439 | * Wait for outstanding writeback IOs to finish (and keybuf slots to be | 283 | * Wait for outstanding writeback IOs to finish (and keybuf slots to be |
440 | * freed) before refilling again | 284 | * freed) before refilling again |
441 | */ | 285 | */ |
442 | continue_at(cl, refill_dirty, dirty_wq); | 286 | closure_sync(&cl); |
443 | } | 287 | } |
444 | 288 | ||
445 | /* Init */ | 289 | /* Scan for dirty data */ |
290 | |||
291 | void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, | ||
292 | uint64_t offset, int nr_sectors) | ||
293 | { | ||
294 | struct bcache_device *d = c->devices[inode]; | ||
295 | unsigned stripe_offset, stripe, sectors_dirty; | ||
296 | |||
297 | if (!d) | ||
298 | return; | ||
299 | |||
300 | stripe = offset_to_stripe(d, offset); | ||
301 | stripe_offset = offset & (d->stripe_size - 1); | ||
302 | |||
303 | while (nr_sectors) { | ||
304 | int s = min_t(unsigned, abs(nr_sectors), | ||
305 | d->stripe_size - stripe_offset); | ||
306 | |||
307 | if (nr_sectors < 0) | ||
308 | s = -s; | ||
309 | |||
310 | if (stripe >= d->nr_stripes) | ||
311 | return; | ||
312 | |||
313 | sectors_dirty = atomic_add_return(s, | ||
314 | d->stripe_sectors_dirty + stripe); | ||
315 | if (sectors_dirty == d->stripe_size) | ||
316 | set_bit(stripe, d->full_dirty_stripes); | ||
317 | else | ||
318 | clear_bit(stripe, d->full_dirty_stripes); | ||
319 | |||
320 | nr_sectors -= s; | ||
321 | stripe_offset = 0; | ||
322 | stripe++; | ||
323 | } | ||
324 | } | ||
446 | 325 | ||
447 | static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op, | 326 | static bool dirty_pred(struct keybuf *buf, struct bkey *k) |
448 | struct cached_dev *dc) | ||
449 | { | 327 | { |
450 | struct bkey *k; | 328 | return KEY_DIRTY(k); |
451 | struct btree_iter iter; | 329 | } |
452 | 330 | ||
453 | bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0)); | 331 | static void refill_full_stripes(struct cached_dev *dc) |
454 | while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) | 332 | { |
455 | if (!b->level) { | 333 | struct keybuf *buf = &dc->writeback_keys; |
456 | if (KEY_INODE(k) > dc->disk.id) | 334 | unsigned start_stripe, stripe, next_stripe; |
457 | break; | 335 | bool wrapped = false; |
458 | 336 | ||
459 | if (KEY_DIRTY(k)) | 337 | stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); |
460 | bcache_dev_sectors_dirty_add(b->c, dc->disk.id, | 338 | |
461 | KEY_START(k), | 339 | if (stripe >= dc->disk.nr_stripes) |
462 | KEY_SIZE(k)); | 340 | stripe = 0; |
463 | } else { | 341 | |
464 | btree(sectors_dirty_init, k, b, op, dc); | 342 | start_stripe = stripe; |
465 | if (KEY_INODE(k) > dc->disk.id) | 343 | |
466 | break; | 344 | while (1) { |
467 | 345 | stripe = find_next_bit(dc->disk.full_dirty_stripes, | |
468 | cond_resched(); | 346 | dc->disk.nr_stripes, stripe); |
347 | |||
348 | if (stripe == dc->disk.nr_stripes) | ||
349 | goto next; | ||
350 | |||
351 | next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes, | ||
352 | dc->disk.nr_stripes, stripe); | ||
353 | |||
354 | buf->last_scanned = KEY(dc->disk.id, | ||
355 | stripe * dc->disk.stripe_size, 0); | ||
356 | |||
357 | bch_refill_keybuf(dc->disk.c, buf, | ||
358 | &KEY(dc->disk.id, | ||
359 | next_stripe * dc->disk.stripe_size, 0), | ||
360 | dirty_pred); | ||
361 | |||
362 | if (array_freelist_empty(&buf->freelist)) | ||
363 | return; | ||
364 | |||
365 | stripe = next_stripe; | ||
366 | next: | ||
367 | if (wrapped && stripe > start_stripe) | ||
368 | return; | ||
369 | |||
370 | if (stripe == dc->disk.nr_stripes) { | ||
371 | stripe = 0; | ||
372 | wrapped = true; | ||
469 | } | 373 | } |
374 | } | ||
375 | } | ||
376 | |||
377 | static bool refill_dirty(struct cached_dev *dc) | ||
378 | { | ||
379 | struct keybuf *buf = &dc->writeback_keys; | ||
380 | struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); | ||
381 | bool searched_from_start = false; | ||
382 | |||
383 | if (dc->partial_stripes_expensive) { | ||
384 | refill_full_stripes(dc); | ||
385 | if (array_freelist_empty(&buf->freelist)) | ||
386 | return false; | ||
387 | } | ||
388 | |||
389 | if (bkey_cmp(&buf->last_scanned, &end) >= 0) { | ||
390 | buf->last_scanned = KEY(dc->disk.id, 0, 0); | ||
391 | searched_from_start = true; | ||
392 | } | ||
393 | |||
394 | bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); | ||
395 | |||
396 | return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start; | ||
397 | } | ||
398 | |||
399 | static int bch_writeback_thread(void *arg) | ||
400 | { | ||
401 | struct cached_dev *dc = arg; | ||
402 | bool searched_full_index; | ||
403 | |||
404 | while (!kthread_should_stop()) { | ||
405 | down_write(&dc->writeback_lock); | ||
406 | if (!atomic_read(&dc->has_dirty) || | ||
407 | (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && | ||
408 | !dc->writeback_running)) { | ||
409 | up_write(&dc->writeback_lock); | ||
410 | set_current_state(TASK_INTERRUPTIBLE); | ||
411 | |||
412 | if (kthread_should_stop()) | ||
413 | return 0; | ||
414 | |||
415 | try_to_freeze(); | ||
416 | schedule(); | ||
417 | continue; | ||
418 | } | ||
419 | |||
420 | searched_full_index = refill_dirty(dc); | ||
421 | |||
422 | if (searched_full_index && | ||
423 | RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { | ||
424 | atomic_set(&dc->has_dirty, 0); | ||
425 | cached_dev_put(dc); | ||
426 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); | ||
427 | bch_write_bdev_super(dc, NULL); | ||
428 | } | ||
429 | |||
430 | up_write(&dc->writeback_lock); | ||
431 | |||
432 | bch_ratelimit_reset(&dc->writeback_rate); | ||
433 | read_dirty(dc); | ||
434 | |||
435 | if (searched_full_index) { | ||
436 | unsigned delay = dc->writeback_delay * HZ; | ||
437 | |||
438 | while (delay && | ||
439 | !kthread_should_stop() && | ||
440 | !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) | ||
441 | delay = schedule_timeout_interruptible(delay); | ||
442 | } | ||
443 | } | ||
470 | 444 | ||
471 | return 0; | 445 | return 0; |
472 | } | 446 | } |
473 | 447 | ||
448 | /* Init */ | ||
449 | |||
450 | struct sectors_dirty_init { | ||
451 | struct btree_op op; | ||
452 | unsigned inode; | ||
453 | }; | ||
454 | |||
455 | static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, | ||
456 | struct bkey *k) | ||
457 | { | ||
458 | struct sectors_dirty_init *op = container_of(_op, | ||
459 | struct sectors_dirty_init, op); | ||
460 | if (KEY_INODE(k) > op->inode) | ||
461 | return MAP_DONE; | ||
462 | |||
463 | if (KEY_DIRTY(k)) | ||
464 | bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), | ||
465 | KEY_START(k), KEY_SIZE(k)); | ||
466 | |||
467 | return MAP_CONTINUE; | ||
468 | } | ||
469 | |||
474 | void bch_sectors_dirty_init(struct cached_dev *dc) | 470 | void bch_sectors_dirty_init(struct cached_dev *dc) |
475 | { | 471 | { |
476 | struct btree_op op; | 472 | struct sectors_dirty_init op; |
473 | |||
474 | bch_btree_op_init(&op.op, -1); | ||
475 | op.inode = dc->disk.id; | ||
477 | 476 | ||
478 | bch_btree_op_init_stack(&op); | 477 | bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), |
479 | btree_root(sectors_dirty_init, dc->disk.c, &op, dc); | 478 | sectors_dirty_init_fn, 0); |
480 | } | 479 | } |
481 | 480 | ||
482 | void bch_cached_dev_writeback_init(struct cached_dev *dc) | 481 | int bch_cached_dev_writeback_init(struct cached_dev *dc) |
483 | { | 482 | { |
484 | sema_init(&dc->in_flight, 64); | 483 | sema_init(&dc->in_flight, 64); |
485 | closure_init_unlocked(&dc->writeback); | ||
486 | init_rwsem(&dc->writeback_lock); | 484 | init_rwsem(&dc->writeback_lock); |
487 | |||
488 | bch_keybuf_init(&dc->writeback_keys); | 485 | bch_keybuf_init(&dc->writeback_keys); |
489 | 486 | ||
490 | dc->writeback_metadata = true; | 487 | dc->writeback_metadata = true; |
@@ -498,22 +495,16 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc) | |||
498 | dc->writeback_rate_p_term_inverse = 64; | 495 | dc->writeback_rate_p_term_inverse = 64; |
499 | dc->writeback_rate_d_smooth = 8; | 496 | dc->writeback_rate_d_smooth = 8; |
500 | 497 | ||
498 | dc->writeback_thread = kthread_create(bch_writeback_thread, dc, | ||
499 | "bcache_writeback"); | ||
500 | if (IS_ERR(dc->writeback_thread)) | ||
501 | return PTR_ERR(dc->writeback_thread); | ||
502 | |||
503 | set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE); | ||
504 | |||
501 | INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); | 505 | INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); |
502 | schedule_delayed_work(&dc->writeback_rate_update, | 506 | schedule_delayed_work(&dc->writeback_rate_update, |
503 | dc->writeback_rate_update_seconds * HZ); | 507 | dc->writeback_rate_update_seconds * HZ); |
504 | } | ||
505 | |||
506 | void bch_writeback_exit(void) | ||
507 | { | ||
508 | if (dirty_wq) | ||
509 | destroy_workqueue(dirty_wq); | ||
510 | } | ||
511 | |||
512 | int __init bch_writeback_init(void) | ||
513 | { | ||
514 | dirty_wq = create_workqueue("bcache_writeback"); | ||
515 | if (!dirty_wq) | ||
516 | return -ENOMEM; | ||
517 | 508 | ||
518 | return 0; | 509 | return 0; |
519 | } | 510 | } |