aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-07-24 20:50:06 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-11 00:56:05 -0500
commit5e6926daac267dd99552ae613f041a9e88bbf258 (patch)
tree0d9ebb689cccc61bc3bfca19c48ac83c5591df28
parent72a44517f3ca3725dc86081d105457df46448679 (diff)
bcache: Convert writeback to a kthread
This simplifies the writeback flow control quite a bit - previously, it was conceptually two coroutines, refill_dirty() and read_dirty(). This makes the code quite a bit more straightforward. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
-rw-r--r--drivers/md/bcache/bcache.h10
-rw-r--r--drivers/md/bcache/super.c3
-rw-r--r--drivers/md/bcache/writeback.c371
-rw-r--r--drivers/md/bcache/writeback.h25
4 files changed, 203 insertions, 206 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 09410eb07d82..674e2f42e778 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -509,7 +509,7 @@ struct cached_dev {
509 509
510 /* Limit number of writeback bios in flight */ 510 /* Limit number of writeback bios in flight */
511 struct semaphore in_flight; 511 struct semaphore in_flight;
512 struct closure_with_timer writeback; 512 struct task_struct *writeback_thread;
513 513
514 struct keybuf writeback_keys; 514 struct keybuf writeback_keys;
515 515
@@ -1038,7 +1038,11 @@ static inline void bkey_init(struct bkey *k)
1038 1038
1039#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k)) 1039#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
1040#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0) 1040#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
1041#define MAX_KEY KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0) 1041
1042#define MAX_KEY_INODE (~(~0 << 20))
1043#define MAX_KEY_OFFSET (((uint64_t) ~0) >> 1)
1044#define MAX_KEY KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0)
1045
1042#define ZERO_KEY KEY(0, 0, 0) 1046#define ZERO_KEY KEY(0, 0, 0)
1043 1047
1044/* 1048/*
@@ -1214,8 +1218,6 @@ int bch_cache_allocator_init(struct cache *ca);
1214 1218
1215void bch_debug_exit(void); 1219void bch_debug_exit(void);
1216int bch_debug_init(struct kobject *); 1220int bch_debug_init(struct kobject *);
1217void bch_writeback_exit(void);
1218int bch_writeback_init(void);
1219void bch_request_exit(void); 1221void bch_request_exit(void);
1220int bch_request_init(void); 1222int bch_request_init(void);
1221void bch_btree_exit(void); 1223void bch_btree_exit(void);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index f89e2296bde1..b79dd5a6679e 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1029,6 +1029,7 @@ static void cached_dev_free(struct closure *cl)
1029 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1029 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1030 1030
1031 cancel_delayed_work_sync(&dc->writeback_rate_update); 1031 cancel_delayed_work_sync(&dc->writeback_rate_update);
1032 kthread_stop(dc->writeback_thread);
1032 1033
1033 mutex_lock(&bch_register_lock); 1034 mutex_lock(&bch_register_lock);
1034 1035
@@ -2006,7 +2007,6 @@ static struct notifier_block reboot = {
2006static void bcache_exit(void) 2007static void bcache_exit(void)
2007{ 2008{
2008 bch_debug_exit(); 2009 bch_debug_exit();
2009 bch_writeback_exit();
2010 bch_request_exit(); 2010 bch_request_exit();
2011 bch_btree_exit(); 2011 bch_btree_exit();
2012 if (bcache_kobj) 2012 if (bcache_kobj)
@@ -2039,7 +2039,6 @@ static int __init bcache_init(void)
2039 sysfs_create_files(bcache_kobj, files) || 2039 sysfs_create_files(bcache_kobj, files) ||
2040 bch_btree_init() || 2040 bch_btree_init() ||
2041 bch_request_init() || 2041 bch_request_init() ||
2042 bch_writeback_init() ||
2043 bch_debug_init(bcache_kobj)) 2042 bch_debug_init(bcache_kobj))
2044 goto err; 2043 goto err;
2045 2044
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 51dc709c9bf7..4392f3f38d62 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -11,18 +11,11 @@
11#include "debug.h" 11#include "debug.h"
12#include "writeback.h" 12#include "writeback.h"
13 13
14#include <linux/delay.h>
15#include <linux/freezer.h>
16#include <linux/kthread.h>
14#include <trace/events/bcache.h> 17#include <trace/events/bcache.h>
15 18
16static struct workqueue_struct *dirty_wq;
17
18static void read_dirty(struct closure *);
19
20struct dirty_io {
21 struct closure cl;
22 struct cached_dev *dc;
23 struct bio bio;
24};
25
26/* Rate limiting */ 19/* Rate limiting */
27 20
28static void __update_writeback_rate(struct cached_dev *dc) 21static void __update_writeback_rate(struct cached_dev *dc)
@@ -72,9 +65,6 @@ out:
72 dc->writeback_rate_derivative = derivative; 65 dc->writeback_rate_derivative = derivative;
73 dc->writeback_rate_change = change; 66 dc->writeback_rate_change = change;
74 dc->writeback_rate_target = target; 67 dc->writeback_rate_target = target;
75
76 schedule_delayed_work(&dc->writeback_rate_update,
77 dc->writeback_rate_update_seconds * HZ);
78} 68}
79 69
80static void update_writeback_rate(struct work_struct *work) 70static void update_writeback_rate(struct work_struct *work)
@@ -90,6 +80,9 @@ static void update_writeback_rate(struct work_struct *work)
90 __update_writeback_rate(dc); 80 __update_writeback_rate(dc);
91 81
92 up_read(&dc->writeback_lock); 82 up_read(&dc->writeback_lock);
83
84 schedule_delayed_work(&dc->writeback_rate_update,
85 dc->writeback_rate_update_seconds * HZ);
93} 86}
94 87
95static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) 88static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
@@ -105,37 +98,11 @@ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
105 return min_t(uint64_t, ret, HZ); 98 return min_t(uint64_t, ret, HZ);
106} 99}
107 100
108/* Background writeback */ 101struct dirty_io {
109 102 struct closure cl;
110static bool dirty_pred(struct keybuf *buf, struct bkey *k) 103 struct cached_dev *dc;
111{ 104 struct bio bio;
112 return KEY_DIRTY(k); 105};
113}
114
115static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
116{
117 uint64_t stripe = KEY_START(k);
118 unsigned nr_sectors = KEY_SIZE(k);
119 struct cached_dev *dc = container_of(buf, struct cached_dev,
120 writeback_keys);
121
122 if (!KEY_DIRTY(k))
123 return false;
124
125 do_div(stripe, dc->disk.stripe_size);
126
127 while (1) {
128 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) ==
129 dc->disk.stripe_size)
130 return true;
131
132 if (nr_sectors <= dc->disk.stripe_size)
133 return false;
134
135 nr_sectors -= dc->disk.stripe_size;
136 stripe++;
137 }
138}
139 106
140static void dirty_init(struct keybuf_key *w) 107static void dirty_init(struct keybuf_key *w)
141{ 108{
@@ -153,132 +120,6 @@ static void dirty_init(struct keybuf_key *w)
153 bch_bio_map(bio, NULL); 120 bch_bio_map(bio, NULL);
154} 121}
155 122
156static void refill_dirty(struct closure *cl)
157{
158 struct cached_dev *dc = container_of(cl, struct cached_dev,
159 writeback.cl);
160 struct keybuf *buf = &dc->writeback_keys;
161 bool searched_from_start = false;
162 struct bkey end = MAX_KEY;
163 SET_KEY_INODE(&end, dc->disk.id);
164
165 if (!atomic_read(&dc->disk.detaching) &&
166 !dc->writeback_running)
167 closure_return(cl);
168
169 down_write(&dc->writeback_lock);
170
171 if (!atomic_read(&dc->has_dirty)) {
172 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
173 bch_write_bdev_super(dc, NULL);
174
175 up_write(&dc->writeback_lock);
176 closure_return(cl);
177 }
178
179 if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
180 buf->last_scanned = KEY(dc->disk.id, 0, 0);
181 searched_from_start = true;
182 }
183
184 if (dc->partial_stripes_expensive) {
185 uint64_t i;
186
187 for (i = 0; i < dc->disk.nr_stripes; i++)
188 if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
189 dc->disk.stripe_size)
190 goto full_stripes;
191
192 goto normal_refill;
193full_stripes:
194 searched_from_start = false; /* not searching entire btree */
195 bch_refill_keybuf(dc->disk.c, buf, &end,
196 dirty_full_stripe_pred);
197 } else {
198normal_refill:
199 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
200 }
201
202 if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) {
203 /* Searched the entire btree - delay awhile */
204
205 if (RB_EMPTY_ROOT(&buf->keys)) {
206 atomic_set(&dc->has_dirty, 0);
207 cached_dev_put(dc);
208 }
209
210 if (!atomic_read(&dc->disk.detaching))
211 closure_delay(&dc->writeback, dc->writeback_delay * HZ);
212 }
213
214 up_write(&dc->writeback_lock);
215
216 bch_ratelimit_reset(&dc->writeback_rate);
217
218 /* Punt to workqueue only so we don't recurse and blow the stack */
219 continue_at(cl, read_dirty, dirty_wq);
220}
221
222void bch_writeback_queue(struct cached_dev *dc)
223{
224 if (closure_trylock(&dc->writeback.cl, &dc->disk.cl)) {
225 if (!atomic_read(&dc->disk.detaching))
226 closure_delay(&dc->writeback, dc->writeback_delay * HZ);
227
228 continue_at(&dc->writeback.cl, refill_dirty, dirty_wq);
229 }
230}
231
232void bch_writeback_add(struct cached_dev *dc)
233{
234 if (!atomic_read(&dc->has_dirty) &&
235 !atomic_xchg(&dc->has_dirty, 1)) {
236 atomic_inc(&dc->count);
237
238 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
239 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
240 /* XXX: should do this synchronously */
241 bch_write_bdev_super(dc, NULL);
242 }
243
244 bch_writeback_queue(dc);
245
246 if (dc->writeback_percent)
247 schedule_delayed_work(&dc->writeback_rate_update,
248 dc->writeback_rate_update_seconds * HZ);
249 }
250}
251
252void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
253 uint64_t offset, int nr_sectors)
254{
255 struct bcache_device *d = c->devices[inode];
256 unsigned stripe_offset;
257 uint64_t stripe = offset;
258
259 if (!d)
260 return;
261
262 do_div(stripe, d->stripe_size);
263
264 stripe_offset = offset & (d->stripe_size - 1);
265
266 while (nr_sectors) {
267 int s = min_t(unsigned, abs(nr_sectors),
268 d->stripe_size - stripe_offset);
269
270 if (nr_sectors < 0)
271 s = -s;
272
273 atomic_add(s, d->stripe_sectors_dirty + stripe);
274 nr_sectors -= s;
275 stripe_offset = 0;
276 stripe++;
277 }
278}
279
280/* Background writeback - IO loop */
281
282static void dirty_io_destructor(struct closure *cl) 123static void dirty_io_destructor(struct closure *cl)
283{ 124{
284 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 125 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
@@ -378,30 +219,33 @@ static void read_dirty_submit(struct closure *cl)
378 continue_at(cl, write_dirty, system_wq); 219 continue_at(cl, write_dirty, system_wq);
379} 220}
380 221
381static void read_dirty(struct closure *cl) 222static void read_dirty(struct cached_dev *dc)
382{ 223{
383 struct cached_dev *dc = container_of(cl, struct cached_dev, 224 unsigned delay = 0;
384 writeback.cl);
385 unsigned delay = writeback_delay(dc, 0);
386 struct keybuf_key *w; 225 struct keybuf_key *w;
387 struct dirty_io *io; 226 struct dirty_io *io;
227 struct closure cl;
228
229 closure_init_stack(&cl);
388 230
389 /* 231 /*
390 * XXX: if we error, background writeback just spins. Should use some 232 * XXX: if we error, background writeback just spins. Should use some
391 * mempools. 233 * mempools.
392 */ 234 */
393 235
394 while (1) { 236 while (!kthread_should_stop()) {
237 try_to_freeze();
238
395 w = bch_keybuf_next(&dc->writeback_keys); 239 w = bch_keybuf_next(&dc->writeback_keys);
396 if (!w) 240 if (!w)
397 break; 241 break;
398 242
399 BUG_ON(ptr_stale(dc->disk.c, &w->key, 0)); 243 BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
400 244
401 if (delay > 0 && 245 if (KEY_START(&w->key) != dc->last_read ||
402 (KEY_START(&w->key) != dc->last_read || 246 jiffies_to_msecs(delay) > 50)
403 jiffies_to_msecs(delay) > 50)) 247 while (!kthread_should_stop() && delay)
404 delay = schedule_timeout_uninterruptible(delay); 248 delay = schedule_timeout_interruptible(delay);
405 249
406 dc->last_read = KEY_OFFSET(&w->key); 250 dc->last_read = KEY_OFFSET(&w->key);
407 251
@@ -427,7 +271,7 @@ static void read_dirty(struct closure *cl)
427 trace_bcache_writeback(&w->key); 271 trace_bcache_writeback(&w->key);
428 272
429 down(&dc->in_flight); 273 down(&dc->in_flight);
430 closure_call(&io->cl, read_dirty_submit, NULL, cl); 274 closure_call(&io->cl, read_dirty_submit, NULL, &cl);
431 275
432 delay = writeback_delay(dc, KEY_SIZE(&w->key)); 276 delay = writeback_delay(dc, KEY_SIZE(&w->key));
433 } 277 }
@@ -443,7 +287,148 @@ err:
443 * Wait for outstanding writeback IOs to finish (and keybuf slots to be 287 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
444 * freed) before refilling again 288 * freed) before refilling again
445 */ 289 */
446 continue_at(cl, refill_dirty, dirty_wq); 290 closure_sync(&cl);
291}
292
293/* Scan for dirty data */
294
295void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
296 uint64_t offset, int nr_sectors)
297{
298 struct bcache_device *d = c->devices[inode];
299 unsigned stripe_offset;
300 uint64_t stripe = offset;
301
302 if (!d)
303 return;
304
305 do_div(stripe, d->stripe_size);
306
307 stripe_offset = offset & (d->stripe_size - 1);
308
309 while (nr_sectors) {
310 int s = min_t(unsigned, abs(nr_sectors),
311 d->stripe_size - stripe_offset);
312
313 if (nr_sectors < 0)
314 s = -s;
315
316 atomic_add(s, d->stripe_sectors_dirty + stripe);
317 nr_sectors -= s;
318 stripe_offset = 0;
319 stripe++;
320 }
321}
322
323static bool dirty_pred(struct keybuf *buf, struct bkey *k)
324{
325 return KEY_DIRTY(k);
326}
327
328static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
329{
330 uint64_t stripe = KEY_START(k);
331 unsigned nr_sectors = KEY_SIZE(k);
332 struct cached_dev *dc = container_of(buf, struct cached_dev,
333 writeback_keys);
334
335 if (!KEY_DIRTY(k))
336 return false;
337
338 do_div(stripe, dc->disk.stripe_size);
339
340 while (1) {
341 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) ==
342 dc->disk.stripe_size)
343 return true;
344
345 if (nr_sectors <= dc->disk.stripe_size)
346 return false;
347
348 nr_sectors -= dc->disk.stripe_size;
349 stripe++;
350 }
351}
352
353static bool refill_dirty(struct cached_dev *dc)
354{
355 struct keybuf *buf = &dc->writeback_keys;
356 bool searched_from_start = false;
357 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
358
359 if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
360 buf->last_scanned = KEY(dc->disk.id, 0, 0);
361 searched_from_start = true;
362 }
363
364 if (dc->partial_stripes_expensive) {
365 uint64_t i;
366
367 for (i = 0; i < dc->disk.nr_stripes; i++)
368 if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
369 dc->disk.stripe_size)
370 goto full_stripes;
371
372 goto normal_refill;
373full_stripes:
374 searched_from_start = false; /* not searching entire btree */
375 bch_refill_keybuf(dc->disk.c, buf, &end,
376 dirty_full_stripe_pred);
377 } else {
378normal_refill:
379 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
380 }
381
382 return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
383}
384
385static int bch_writeback_thread(void *arg)
386{
387 struct cached_dev *dc = arg;
388 bool searched_full_index;
389
390 while (!kthread_should_stop()) {
391 down_write(&dc->writeback_lock);
392 if (!atomic_read(&dc->has_dirty) ||
393 (!atomic_read(&dc->disk.detaching) &&
394 !dc->writeback_running)) {
395 up_write(&dc->writeback_lock);
396 set_current_state(TASK_INTERRUPTIBLE);
397
398 if (kthread_should_stop())
399 return 0;
400
401 try_to_freeze();
402 schedule();
403 continue;
404 }
405
406 searched_full_index = refill_dirty(dc);
407
408 if (searched_full_index &&
409 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
410 atomic_set(&dc->has_dirty, 0);
411 cached_dev_put(dc);
412 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
413 bch_write_bdev_super(dc, NULL);
414 }
415
416 up_write(&dc->writeback_lock);
417
418 bch_ratelimit_reset(&dc->writeback_rate);
419 read_dirty(dc);
420
421 if (searched_full_index) {
422 unsigned delay = dc->writeback_delay * HZ;
423
424 while (delay &&
425 !kthread_should_stop() &&
426 !atomic_read(&dc->disk.detaching))
427 delay = schedule_timeout_interruptible(delay);
428 }
429 }
430
431 return 0;
447} 432}
448 433
449/* Init */ 434/* Init */
@@ -483,12 +468,10 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
483 btree_root(sectors_dirty_init, dc->disk.c, &op, dc); 468 btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
484} 469}
485 470
486void bch_cached_dev_writeback_init(struct cached_dev *dc) 471int bch_cached_dev_writeback_init(struct cached_dev *dc)
487{ 472{
488 sema_init(&dc->in_flight, 64); 473 sema_init(&dc->in_flight, 64);
489 closure_init_unlocked(&dc->writeback);
490 init_rwsem(&dc->writeback_lock); 474 init_rwsem(&dc->writeback_lock);
491
492 bch_keybuf_init(&dc->writeback_keys); 475 bch_keybuf_init(&dc->writeback_keys);
493 476
494 dc->writeback_metadata = true; 477 dc->writeback_metadata = true;
@@ -502,22 +485,16 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
502 dc->writeback_rate_p_term_inverse = 64; 485 dc->writeback_rate_p_term_inverse = 64;
503 dc->writeback_rate_d_smooth = 8; 486 dc->writeback_rate_d_smooth = 8;
504 487
488 dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
489 "bcache_writeback");
490 if (IS_ERR(dc->writeback_thread))
491 return PTR_ERR(dc->writeback_thread);
492
493 set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
494
505 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); 495 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
506 schedule_delayed_work(&dc->writeback_rate_update, 496 schedule_delayed_work(&dc->writeback_rate_update,
507 dc->writeback_rate_update_seconds * HZ); 497 dc->writeback_rate_update_seconds * HZ);
508}
509
510void bch_writeback_exit(void)
511{
512 if (dirty_wq)
513 destroy_workqueue(dirty_wq);
514}
515
516int __init bch_writeback_init(void)
517{
518 dirty_wq = create_workqueue("bcache_writeback");
519 if (!dirty_wq)
520 return -ENOMEM;
521 498
522 return 0; 499 return 0;
523} 500}
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 34961888b5a9..60516bfa6052 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -56,11 +56,30 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
56 in_use <= CUTOFF_WRITEBACK; 56 in_use <= CUTOFF_WRITEBACK;
57} 57}
58 58
59static inline void bch_writeback_queue(struct cached_dev *dc)
60{
61 wake_up_process(dc->writeback_thread);
62}
63
64static inline void bch_writeback_add(struct cached_dev *dc)
65{
66 if (!atomic_read(&dc->has_dirty) &&
67 !atomic_xchg(&dc->has_dirty, 1)) {
68 atomic_inc(&dc->count);
69
70 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
71 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
72 /* XXX: should do this synchronously */
73 bch_write_bdev_super(dc, NULL);
74 }
75
76 bch_writeback_queue(dc);
77 }
78}
79
59void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); 80void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
60void bch_writeback_queue(struct cached_dev *);
61void bch_writeback_add(struct cached_dev *);
62 81
63void bch_sectors_dirty_init(struct cached_dev *dc); 82void bch_sectors_dirty_init(struct cached_dev *dc);
64void bch_cached_dev_writeback_init(struct cached_dev *); 83int bch_cached_dev_writeback_init(struct cached_dev *);
65 84
66#endif 85#endif