diff options
Diffstat (limited to 'drivers/md/dm-bufio.c')
-rw-r--r-- | drivers/md/dm-bufio.c | 1699 |
1 files changed, 1699 insertions, 0 deletions
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c new file mode 100644 index 00000000000..cb246667dd5 --- /dev/null +++ b/drivers/md/dm-bufio.c | |||
@@ -0,0 +1,1699 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009-2011 Red Hat, Inc. | ||
3 | * | ||
4 | * Author: Mikulas Patocka <mpatocka@redhat.com> | ||
5 | * | ||
6 | * This file is released under the GPL. | ||
7 | */ | ||
8 | |||
9 | #include "dm-bufio.h" | ||
10 | |||
11 | #include <linux/device-mapper.h> | ||
12 | #include <linux/dm-io.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/vmalloc.h> | ||
15 | #include <linux/version.h> | ||
16 | #include <linux/shrinker.h> | ||
17 | |||
18 | #define DM_MSG_PREFIX "bufio" | ||
19 | |||
20 | /* | ||
21 | * Memory management policy: | ||
22 | * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory | ||
23 | * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower). | ||
24 | * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers. | ||
25 | * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT | ||
26 | * dirty buffers. | ||
27 | */ | ||
28 | #define DM_BUFIO_MIN_BUFFERS 8 | ||
29 | |||
30 | #define DM_BUFIO_MEMORY_PERCENT 2 | ||
31 | #define DM_BUFIO_VMALLOC_PERCENT 25 | ||
32 | #define DM_BUFIO_WRITEBACK_PERCENT 75 | ||
33 | |||
34 | /* | ||
35 | * Check buffer ages in this interval (seconds) | ||
36 | */ | ||
37 | #define DM_BUFIO_WORK_TIMER_SECS 10 | ||
38 | |||
39 | /* | ||
40 | * Free buffers when they are older than this (seconds) | ||
41 | */ | ||
42 | #define DM_BUFIO_DEFAULT_AGE_SECS 60 | ||
43 | |||
44 | /* | ||
45 | * The number of bvec entries that are embedded directly in the buffer. | ||
46 | * If the chunk size is larger, dm-io is used to do the io. | ||
47 | */ | ||
48 | #define DM_BUFIO_INLINE_VECS 16 | ||
49 | |||
50 | /* | ||
51 | * Buffer hash | ||
52 | */ | ||
53 | #define DM_BUFIO_HASH_BITS 20 | ||
54 | #define DM_BUFIO_HASH(block) \ | ||
55 | ((((block) >> DM_BUFIO_HASH_BITS) ^ (block)) & \ | ||
56 | ((1 << DM_BUFIO_HASH_BITS) - 1)) | ||
57 | |||
58 | /* | ||
59 | * Don't try to use kmem_cache_alloc for blocks larger than this. | ||
60 | * For explanation, see alloc_buffer_data below. | ||
61 | */ | ||
62 | #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1) | ||
63 | #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1)) | ||
64 | |||
65 | /* | ||
66 | * dm_buffer->list_mode | ||
67 | */ | ||
68 | #define LIST_CLEAN 0 | ||
69 | #define LIST_DIRTY 1 | ||
70 | #define LIST_SIZE 2 | ||
71 | |||
72 | /* | ||
73 | * Linking of buffers: | ||
74 | * All buffers are linked to cache_hash with their hash_list field. | ||
75 | * | ||
76 | * Clean buffers that are not being written (B_WRITING not set) | ||
77 | * are linked to lru[LIST_CLEAN] with their lru_list field. | ||
78 | * | ||
79 | * Dirty and clean buffers that are being written are linked to | ||
80 | * lru[LIST_DIRTY] with their lru_list field. When the write | ||
81 | * finishes, the buffer cannot be relinked immediately (because we | ||
82 | * are in an interrupt context and relinking requires process | ||
83 | * context), so some clean-not-writing buffers can be held on | ||
84 | * dirty_lru too. They are later added to lru in the process | ||
85 | * context. | ||
86 | */ | ||
87 | struct dm_bufio_client { | ||
88 | struct mutex lock; | ||
89 | |||
90 | struct list_head lru[LIST_SIZE]; | ||
91 | unsigned long n_buffers[LIST_SIZE]; | ||
92 | |||
93 | struct block_device *bdev; | ||
94 | unsigned block_size; | ||
95 | unsigned char sectors_per_block_bits; | ||
96 | unsigned char pages_per_block_bits; | ||
97 | unsigned char blocks_per_page_bits; | ||
98 | unsigned aux_size; | ||
99 | void (*alloc_callback)(struct dm_buffer *); | ||
100 | void (*write_callback)(struct dm_buffer *); | ||
101 | |||
102 | struct dm_io_client *dm_io; | ||
103 | |||
104 | struct list_head reserved_buffers; | ||
105 | unsigned need_reserved_buffers; | ||
106 | |||
107 | struct hlist_head *cache_hash; | ||
108 | wait_queue_head_t free_buffer_wait; | ||
109 | |||
110 | int async_write_error; | ||
111 | |||
112 | struct list_head client_list; | ||
113 | struct shrinker shrinker; | ||
114 | }; | ||
115 | |||
116 | /* | ||
117 | * Buffer state bits. | ||
118 | */ | ||
119 | #define B_READING 0 | ||
120 | #define B_WRITING 1 | ||
121 | #define B_DIRTY 2 | ||
122 | |||
123 | /* | ||
124 | * Describes how the block was allocated: | ||
125 | * kmem_cache_alloc(), __get_free_pages() or vmalloc(). | ||
126 | * See the comment at alloc_buffer_data. | ||
127 | */ | ||
128 | enum data_mode { | ||
129 | DATA_MODE_SLAB = 0, | ||
130 | DATA_MODE_GET_FREE_PAGES = 1, | ||
131 | DATA_MODE_VMALLOC = 2, | ||
132 | DATA_MODE_LIMIT = 3 | ||
133 | }; | ||
134 | |||
135 | struct dm_buffer { | ||
136 | struct hlist_node hash_list; | ||
137 | struct list_head lru_list; | ||
138 | sector_t block; | ||
139 | void *data; | ||
140 | enum data_mode data_mode; | ||
141 | unsigned char list_mode; /* LIST_* */ | ||
142 | unsigned hold_count; | ||
143 | int read_error; | ||
144 | int write_error; | ||
145 | unsigned long state; | ||
146 | unsigned long last_accessed; | ||
147 | struct dm_bufio_client *c; | ||
148 | struct bio bio; | ||
149 | struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS]; | ||
150 | }; | ||
151 | |||
152 | /*----------------------------------------------------------------*/ | ||
153 | |||
154 | static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT]; | ||
155 | static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT]; | ||
156 | |||
157 | static inline int dm_bufio_cache_index(struct dm_bufio_client *c) | ||
158 | { | ||
159 | unsigned ret = c->blocks_per_page_bits - 1; | ||
160 | |||
161 | BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches)); | ||
162 | |||
163 | return ret; | ||
164 | } | ||
165 | |||
166 | #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)]) | ||
167 | #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)]) | ||
168 | |||
169 | #define dm_bufio_in_request() (!!current->bio_list) | ||
170 | |||
171 | static void dm_bufio_lock(struct dm_bufio_client *c) | ||
172 | { | ||
173 | mutex_lock_nested(&c->lock, dm_bufio_in_request()); | ||
174 | } | ||
175 | |||
176 | static int dm_bufio_trylock(struct dm_bufio_client *c) | ||
177 | { | ||
178 | return mutex_trylock(&c->lock); | ||
179 | } | ||
180 | |||
181 | static void dm_bufio_unlock(struct dm_bufio_client *c) | ||
182 | { | ||
183 | mutex_unlock(&c->lock); | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * FIXME Move to sched.h? | ||
188 | */ | ||
189 | #ifdef CONFIG_PREEMPT_VOLUNTARY | ||
190 | # define dm_bufio_cond_resched() \ | ||
191 | do { \ | ||
192 | if (unlikely(need_resched())) \ | ||
193 | _cond_resched(); \ | ||
194 | } while (0) | ||
195 | #else | ||
196 | # define dm_bufio_cond_resched() do { } while (0) | ||
197 | #endif | ||
198 | |||
199 | /*----------------------------------------------------------------*/ | ||
200 | |||
201 | /* | ||
202 | * Default cache size: available memory divided by the ratio. | ||
203 | */ | ||
204 | static unsigned long dm_bufio_default_cache_size; | ||
205 | |||
206 | /* | ||
207 | * Total cache size set by the user. | ||
208 | */ | ||
209 | static unsigned long dm_bufio_cache_size; | ||
210 | |||
211 | /* | ||
212 | * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change | ||
213 | * at any time. If it disagrees, the user has changed cache size. | ||
214 | */ | ||
215 | static unsigned long dm_bufio_cache_size_latch; | ||
216 | |||
217 | static DEFINE_SPINLOCK(param_spinlock); | ||
218 | |||
219 | /* | ||
220 | * Buffers are freed after this timeout | ||
221 | */ | ||
222 | static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; | ||
223 | |||
224 | static unsigned long dm_bufio_peak_allocated; | ||
225 | static unsigned long dm_bufio_allocated_kmem_cache; | ||
226 | static unsigned long dm_bufio_allocated_get_free_pages; | ||
227 | static unsigned long dm_bufio_allocated_vmalloc; | ||
228 | static unsigned long dm_bufio_current_allocated; | ||
229 | |||
230 | /*----------------------------------------------------------------*/ | ||
231 | |||
232 | /* | ||
233 | * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count | ||
234 | */ | ||
235 | static unsigned long dm_bufio_cache_size_per_client; | ||
236 | |||
237 | /* | ||
238 | * The current number of clients. | ||
239 | */ | ||
240 | static int dm_bufio_client_count; | ||
241 | |||
242 | /* | ||
243 | * The list of all clients. | ||
244 | */ | ||
245 | static LIST_HEAD(dm_bufio_all_clients); | ||
246 | |||
247 | /* | ||
248 | * This mutex protects dm_bufio_cache_size_latch, | ||
249 | * dm_bufio_cache_size_per_client and dm_bufio_client_count | ||
250 | */ | ||
251 | static DEFINE_MUTEX(dm_bufio_clients_lock); | ||
252 | |||
253 | /*----------------------------------------------------------------*/ | ||
254 | |||
255 | static void adjust_total_allocated(enum data_mode data_mode, long diff) | ||
256 | { | ||
257 | static unsigned long * const class_ptr[DATA_MODE_LIMIT] = { | ||
258 | &dm_bufio_allocated_kmem_cache, | ||
259 | &dm_bufio_allocated_get_free_pages, | ||
260 | &dm_bufio_allocated_vmalloc, | ||
261 | }; | ||
262 | |||
263 | spin_lock(¶m_spinlock); | ||
264 | |||
265 | *class_ptr[data_mode] += diff; | ||
266 | |||
267 | dm_bufio_current_allocated += diff; | ||
268 | |||
269 | if (dm_bufio_current_allocated > dm_bufio_peak_allocated) | ||
270 | dm_bufio_peak_allocated = dm_bufio_current_allocated; | ||
271 | |||
272 | spin_unlock(¶m_spinlock); | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * Change the number of clients and recalculate per-client limit. | ||
277 | */ | ||
278 | static void __cache_size_refresh(void) | ||
279 | { | ||
280 | BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); | ||
281 | BUG_ON(dm_bufio_client_count < 0); | ||
282 | |||
283 | dm_bufio_cache_size_latch = dm_bufio_cache_size; | ||
284 | |||
285 | barrier(); | ||
286 | |||
287 | /* | ||
288 | * Use default if set to 0 and report the actual cache size used. | ||
289 | */ | ||
290 | if (!dm_bufio_cache_size_latch) { | ||
291 | (void)cmpxchg(&dm_bufio_cache_size, 0, | ||
292 | dm_bufio_default_cache_size); | ||
293 | dm_bufio_cache_size_latch = dm_bufio_default_cache_size; | ||
294 | } | ||
295 | |||
296 | dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch / | ||
297 | (dm_bufio_client_count ? : 1); | ||
298 | } | ||
299 | |||
300 | /* | ||
301 | * Allocating buffer data. | ||
302 | * | ||
303 | * Small buffers are allocated with kmem_cache, to use space optimally. | ||
304 | * | ||
305 | * For large buffers, we choose between get_free_pages and vmalloc. | ||
306 | * Each has advantages and disadvantages. | ||
307 | * | ||
308 | * __get_free_pages can randomly fail if the memory is fragmented. | ||
309 | * __vmalloc won't randomly fail, but vmalloc space is limited (it may be | ||
310 | * as low as 128M) so using it for caching is not appropriate. | ||
311 | * | ||
312 | * If the allocation may fail we use __get_free_pages. Memory fragmentation | ||
313 | * won't have a fatal effect here, but it just causes flushes of some other | ||
314 | * buffers and more I/O will be performed. Don't use __get_free_pages if it | ||
315 | * always fails (i.e. order >= MAX_ORDER). | ||
316 | * | ||
317 | * If the allocation shouldn't fail we use __vmalloc. This is only for the | ||
318 | * initial reserve allocation, so there's no risk of wasting all vmalloc | ||
319 | * space. | ||
320 | */ | ||
321 | static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, | ||
322 | enum data_mode *data_mode) | ||
323 | { | ||
324 | if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { | ||
325 | *data_mode = DATA_MODE_SLAB; | ||
326 | return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); | ||
327 | } | ||
328 | |||
329 | if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT && | ||
330 | gfp_mask & __GFP_NORETRY) { | ||
331 | *data_mode = DATA_MODE_GET_FREE_PAGES; | ||
332 | return (void *)__get_free_pages(gfp_mask, | ||
333 | c->pages_per_block_bits); | ||
334 | } | ||
335 | |||
336 | *data_mode = DATA_MODE_VMALLOC; | ||
337 | return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * Free buffer's data. | ||
342 | */ | ||
343 | static void free_buffer_data(struct dm_bufio_client *c, | ||
344 | void *data, enum data_mode data_mode) | ||
345 | { | ||
346 | switch (data_mode) { | ||
347 | case DATA_MODE_SLAB: | ||
348 | kmem_cache_free(DM_BUFIO_CACHE(c), data); | ||
349 | break; | ||
350 | |||
351 | case DATA_MODE_GET_FREE_PAGES: | ||
352 | free_pages((unsigned long)data, c->pages_per_block_bits); | ||
353 | break; | ||
354 | |||
355 | case DATA_MODE_VMALLOC: | ||
356 | vfree(data); | ||
357 | break; | ||
358 | |||
359 | default: | ||
360 | DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d", | ||
361 | data_mode); | ||
362 | BUG(); | ||
363 | } | ||
364 | } | ||
365 | |||
366 | /* | ||
367 | * Allocate buffer and its data. | ||
368 | */ | ||
369 | static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) | ||
370 | { | ||
371 | struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size, | ||
372 | gfp_mask); | ||
373 | |||
374 | if (!b) | ||
375 | return NULL; | ||
376 | |||
377 | b->c = c; | ||
378 | |||
379 | b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); | ||
380 | if (!b->data) { | ||
381 | kfree(b); | ||
382 | return NULL; | ||
383 | } | ||
384 | |||
385 | adjust_total_allocated(b->data_mode, (long)c->block_size); | ||
386 | |||
387 | return b; | ||
388 | } | ||
389 | |||
390 | /* | ||
391 | * Free buffer and its data. | ||
392 | */ | ||
393 | static void free_buffer(struct dm_buffer *b) | ||
394 | { | ||
395 | struct dm_bufio_client *c = b->c; | ||
396 | |||
397 | adjust_total_allocated(b->data_mode, -(long)c->block_size); | ||
398 | |||
399 | free_buffer_data(c, b->data, b->data_mode); | ||
400 | kfree(b); | ||
401 | } | ||
402 | |||
403 | /* | ||
404 | * Link buffer to the hash list and clean or dirty queue. | ||
405 | */ | ||
406 | static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty) | ||
407 | { | ||
408 | struct dm_bufio_client *c = b->c; | ||
409 | |||
410 | c->n_buffers[dirty]++; | ||
411 | b->block = block; | ||
412 | b->list_mode = dirty; | ||
413 | list_add(&b->lru_list, &c->lru[dirty]); | ||
414 | hlist_add_head(&b->hash_list, &c->cache_hash[DM_BUFIO_HASH(block)]); | ||
415 | b->last_accessed = jiffies; | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * Unlink buffer from the hash list and dirty or clean queue. | ||
420 | */ | ||
421 | static void __unlink_buffer(struct dm_buffer *b) | ||
422 | { | ||
423 | struct dm_bufio_client *c = b->c; | ||
424 | |||
425 | BUG_ON(!c->n_buffers[b->list_mode]); | ||
426 | |||
427 | c->n_buffers[b->list_mode]--; | ||
428 | hlist_del(&b->hash_list); | ||
429 | list_del(&b->lru_list); | ||
430 | } | ||
431 | |||
432 | /* | ||
433 | * Place the buffer to the head of dirty or clean LRU queue. | ||
434 | */ | ||
435 | static void __relink_lru(struct dm_buffer *b, int dirty) | ||
436 | { | ||
437 | struct dm_bufio_client *c = b->c; | ||
438 | |||
439 | BUG_ON(!c->n_buffers[b->list_mode]); | ||
440 | |||
441 | c->n_buffers[b->list_mode]--; | ||
442 | c->n_buffers[dirty]++; | ||
443 | b->list_mode = dirty; | ||
444 | list_del(&b->lru_list); | ||
445 | list_add(&b->lru_list, &c->lru[dirty]); | ||
446 | } | ||
447 | |||
448 | /*---------------------------------------------------------------- | ||
449 | * Submit I/O on the buffer. | ||
450 | * | ||
451 | * Bio interface is faster but it has some problems: | ||
452 | * the vector list is limited (increasing this limit increases | ||
453 | * memory-consumption per buffer, so it is not viable); | ||
454 | * | ||
455 | * the memory must be direct-mapped, not vmalloced; | ||
456 | * | ||
457 | * the I/O driver can reject requests spuriously if it thinks that | ||
458 | * the requests are too big for the device or if they cross a | ||
459 | * controller-defined memory boundary. | ||
460 | * | ||
461 | * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and | ||
462 | * it is not vmalloced, try using the bio interface. | ||
463 | * | ||
464 | * If the buffer is big, if it is vmalloced or if the underlying device | ||
465 | * rejects the bio because it is too large, use dm-io layer to do the I/O. | ||
466 | * The dm-io layer splits the I/O into multiple requests, avoiding the above | ||
467 | * shortcomings. | ||
468 | *--------------------------------------------------------------*/ | ||
469 | |||
470 | /* | ||
471 | * dm-io completion routine. It just calls b->bio.bi_end_io, pretending | ||
472 | * that the request was handled directly with bio interface. | ||
473 | */ | ||
474 | static void dmio_complete(unsigned long error, void *context) | ||
475 | { | ||
476 | struct dm_buffer *b = context; | ||
477 | |||
478 | b->bio.bi_end_io(&b->bio, error ? -EIO : 0); | ||
479 | } | ||
480 | |||
481 | static void use_dmio(struct dm_buffer *b, int rw, sector_t block, | ||
482 | bio_end_io_t *end_io) | ||
483 | { | ||
484 | int r; | ||
485 | struct dm_io_request io_req = { | ||
486 | .bi_rw = rw, | ||
487 | .notify.fn = dmio_complete, | ||
488 | .notify.context = b, | ||
489 | .client = b->c->dm_io, | ||
490 | }; | ||
491 | struct dm_io_region region = { | ||
492 | .bdev = b->c->bdev, | ||
493 | .sector = block << b->c->sectors_per_block_bits, | ||
494 | .count = b->c->block_size >> SECTOR_SHIFT, | ||
495 | }; | ||
496 | |||
497 | if (b->data_mode != DATA_MODE_VMALLOC) { | ||
498 | io_req.mem.type = DM_IO_KMEM; | ||
499 | io_req.mem.ptr.addr = b->data; | ||
500 | } else { | ||
501 | io_req.mem.type = DM_IO_VMA; | ||
502 | io_req.mem.ptr.vma = b->data; | ||
503 | } | ||
504 | |||
505 | b->bio.bi_end_io = end_io; | ||
506 | |||
507 | r = dm_io(&io_req, 1, ®ion, NULL); | ||
508 | if (r) | ||
509 | end_io(&b->bio, r); | ||
510 | } | ||
511 | |||
512 | static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, | ||
513 | bio_end_io_t *end_io) | ||
514 | { | ||
515 | char *ptr; | ||
516 | int len; | ||
517 | |||
518 | bio_init(&b->bio); | ||
519 | b->bio.bi_io_vec = b->bio_vec; | ||
520 | b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; | ||
521 | b->bio.bi_sector = block << b->c->sectors_per_block_bits; | ||
522 | b->bio.bi_bdev = b->c->bdev; | ||
523 | b->bio.bi_end_io = end_io; | ||
524 | |||
525 | /* | ||
526 | * We assume that if len >= PAGE_SIZE ptr is page-aligned. | ||
527 | * If len < PAGE_SIZE the buffer doesn't cross page boundary. | ||
528 | */ | ||
529 | ptr = b->data; | ||
530 | len = b->c->block_size; | ||
531 | |||
532 | if (len >= PAGE_SIZE) | ||
533 | BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1)); | ||
534 | else | ||
535 | BUG_ON((unsigned long)ptr & (len - 1)); | ||
536 | |||
537 | do { | ||
538 | if (!bio_add_page(&b->bio, virt_to_page(ptr), | ||
539 | len < PAGE_SIZE ? len : PAGE_SIZE, | ||
540 | virt_to_phys(ptr) & (PAGE_SIZE - 1))) { | ||
541 | BUG_ON(b->c->block_size <= PAGE_SIZE); | ||
542 | use_dmio(b, rw, block, end_io); | ||
543 | return; | ||
544 | } | ||
545 | |||
546 | len -= PAGE_SIZE; | ||
547 | ptr += PAGE_SIZE; | ||
548 | } while (len > 0); | ||
549 | |||
550 | submit_bio(rw, &b->bio); | ||
551 | } | ||
552 | |||
553 | static void submit_io(struct dm_buffer *b, int rw, sector_t block, | ||
554 | bio_end_io_t *end_io) | ||
555 | { | ||
556 | if (rw == WRITE && b->c->write_callback) | ||
557 | b->c->write_callback(b); | ||
558 | |||
559 | if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE && | ||
560 | b->data_mode != DATA_MODE_VMALLOC) | ||
561 | use_inline_bio(b, rw, block, end_io); | ||
562 | else | ||
563 | use_dmio(b, rw, block, end_io); | ||
564 | } | ||
565 | |||
566 | /*---------------------------------------------------------------- | ||
567 | * Writing dirty buffers | ||
568 | *--------------------------------------------------------------*/ | ||
569 | |||
570 | /* | ||
571 | * The endio routine for write. | ||
572 | * | ||
573 | * Set the error, clear B_WRITING bit and wake anyone who was waiting on | ||
574 | * it. | ||
575 | */ | ||
576 | static void write_endio(struct bio *bio, int error) | ||
577 | { | ||
578 | struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); | ||
579 | |||
580 | b->write_error = error; | ||
581 | if (error) { | ||
582 | struct dm_bufio_client *c = b->c; | ||
583 | (void)cmpxchg(&c->async_write_error, 0, error); | ||
584 | } | ||
585 | |||
586 | BUG_ON(!test_bit(B_WRITING, &b->state)); | ||
587 | |||
588 | smp_mb__before_clear_bit(); | ||
589 | clear_bit(B_WRITING, &b->state); | ||
590 | smp_mb__after_clear_bit(); | ||
591 | |||
592 | wake_up_bit(&b->state, B_WRITING); | ||
593 | } | ||
594 | |||
595 | /* | ||
596 | * This function is called when wait_on_bit is actually waiting. | ||
597 | */ | ||
598 | static int do_io_schedule(void *word) | ||
599 | { | ||
600 | io_schedule(); | ||
601 | |||
602 | return 0; | ||
603 | } | ||
604 | |||
605 | /* | ||
606 | * Initiate a write on a dirty buffer, but don't wait for it. | ||
607 | * | ||
608 | * - If the buffer is not dirty, exit. | ||
609 | * - If there some previous write going on, wait for it to finish (we can't | ||
610 | * have two writes on the same buffer simultaneously). | ||
611 | * - Submit our write and don't wait on it. We set B_WRITING indicating | ||
612 | * that there is a write in progress. | ||
613 | */ | ||
614 | static void __write_dirty_buffer(struct dm_buffer *b) | ||
615 | { | ||
616 | if (!test_bit(B_DIRTY, &b->state)) | ||
617 | return; | ||
618 | |||
619 | clear_bit(B_DIRTY, &b->state); | ||
620 | wait_on_bit_lock(&b->state, B_WRITING, | ||
621 | do_io_schedule, TASK_UNINTERRUPTIBLE); | ||
622 | |||
623 | submit_io(b, WRITE, b->block, write_endio); | ||
624 | } | ||
625 | |||
626 | /* | ||
627 | * Wait until any activity on the buffer finishes. Possibly write the | ||
628 | * buffer if it is dirty. When this function finishes, there is no I/O | ||
629 | * running on the buffer and the buffer is not dirty. | ||
630 | */ | ||
631 | static void __make_buffer_clean(struct dm_buffer *b) | ||
632 | { | ||
633 | BUG_ON(b->hold_count); | ||
634 | |||
635 | if (!b->state) /* fast case */ | ||
636 | return; | ||
637 | |||
638 | wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE); | ||
639 | __write_dirty_buffer(b); | ||
640 | wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE); | ||
641 | } | ||
642 | |||
643 | /* | ||
644 | * Find some buffer that is not held by anybody, clean it, unlink it and | ||
645 | * return it. | ||
646 | */ | ||
647 | static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) | ||
648 | { | ||
649 | struct dm_buffer *b; | ||
650 | |||
651 | list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { | ||
652 | BUG_ON(test_bit(B_WRITING, &b->state)); | ||
653 | BUG_ON(test_bit(B_DIRTY, &b->state)); | ||
654 | |||
655 | if (!b->hold_count) { | ||
656 | __make_buffer_clean(b); | ||
657 | __unlink_buffer(b); | ||
658 | return b; | ||
659 | } | ||
660 | dm_bufio_cond_resched(); | ||
661 | } | ||
662 | |||
663 | list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { | ||
664 | BUG_ON(test_bit(B_READING, &b->state)); | ||
665 | |||
666 | if (!b->hold_count) { | ||
667 | __make_buffer_clean(b); | ||
668 | __unlink_buffer(b); | ||
669 | return b; | ||
670 | } | ||
671 | dm_bufio_cond_resched(); | ||
672 | } | ||
673 | |||
674 | return NULL; | ||
675 | } | ||
676 | |||
677 | /* | ||
678 | * Wait until some other threads free some buffer or release hold count on | ||
679 | * some buffer. | ||
680 | * | ||
681 | * This function is entered with c->lock held, drops it and regains it | ||
682 | * before exiting. | ||
683 | */ | ||
684 | static void __wait_for_free_buffer(struct dm_bufio_client *c) | ||
685 | { | ||
686 | DECLARE_WAITQUEUE(wait, current); | ||
687 | |||
688 | add_wait_queue(&c->free_buffer_wait, &wait); | ||
689 | set_task_state(current, TASK_UNINTERRUPTIBLE); | ||
690 | dm_bufio_unlock(c); | ||
691 | |||
692 | io_schedule(); | ||
693 | |||
694 | set_task_state(current, TASK_RUNNING); | ||
695 | remove_wait_queue(&c->free_buffer_wait, &wait); | ||
696 | |||
697 | dm_bufio_lock(c); | ||
698 | } | ||
699 | |||
700 | /* | ||
701 | * Allocate a new buffer. If the allocation is not possible, wait until | ||
702 | * some other thread frees a buffer. | ||
703 | * | ||
704 | * May drop the lock and regain it. | ||
705 | */ | ||
706 | static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c) | ||
707 | { | ||
708 | struct dm_buffer *b; | ||
709 | |||
710 | /* | ||
711 | * dm-bufio is resistant to allocation failures (it just keeps | ||
712 | * one buffer reserved in cases all the allocations fail). | ||
713 | * So set flags to not try too hard: | ||
714 | * GFP_NOIO: don't recurse into the I/O layer | ||
715 | * __GFP_NORETRY: don't retry and rather return failure | ||
716 | * __GFP_NOMEMALLOC: don't use emergency reserves | ||
717 | * __GFP_NOWARN: don't print a warning in case of failure | ||
718 | * | ||
719 | * For debugging, if we set the cache size to 1, no new buffers will | ||
720 | * be allocated. | ||
721 | */ | ||
722 | while (1) { | ||
723 | if (dm_bufio_cache_size_latch != 1) { | ||
724 | b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); | ||
725 | if (b) | ||
726 | return b; | ||
727 | } | ||
728 | |||
729 | if (!list_empty(&c->reserved_buffers)) { | ||
730 | b = list_entry(c->reserved_buffers.next, | ||
731 | struct dm_buffer, lru_list); | ||
732 | list_del(&b->lru_list); | ||
733 | c->need_reserved_buffers++; | ||
734 | |||
735 | return b; | ||
736 | } | ||
737 | |||
738 | b = __get_unclaimed_buffer(c); | ||
739 | if (b) | ||
740 | return b; | ||
741 | |||
742 | __wait_for_free_buffer(c); | ||
743 | } | ||
744 | } | ||
745 | |||
746 | static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c) | ||
747 | { | ||
748 | struct dm_buffer *b = __alloc_buffer_wait_no_callback(c); | ||
749 | |||
750 | if (c->alloc_callback) | ||
751 | c->alloc_callback(b); | ||
752 | |||
753 | return b; | ||
754 | } | ||
755 | |||
756 | /* | ||
757 | * Free a buffer and wake other threads waiting for free buffers. | ||
758 | */ | ||
759 | static void __free_buffer_wake(struct dm_buffer *b) | ||
760 | { | ||
761 | struct dm_bufio_client *c = b->c; | ||
762 | |||
763 | if (!c->need_reserved_buffers) | ||
764 | free_buffer(b); | ||
765 | else { | ||
766 | list_add(&b->lru_list, &c->reserved_buffers); | ||
767 | c->need_reserved_buffers--; | ||
768 | } | ||
769 | |||
770 | wake_up(&c->free_buffer_wait); | ||
771 | } | ||
772 | |||
773 | static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait) | ||
774 | { | ||
775 | struct dm_buffer *b, *tmp; | ||
776 | |||
777 | list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { | ||
778 | BUG_ON(test_bit(B_READING, &b->state)); | ||
779 | |||
780 | if (!test_bit(B_DIRTY, &b->state) && | ||
781 | !test_bit(B_WRITING, &b->state)) { | ||
782 | __relink_lru(b, LIST_CLEAN); | ||
783 | continue; | ||
784 | } | ||
785 | |||
786 | if (no_wait && test_bit(B_WRITING, &b->state)) | ||
787 | return; | ||
788 | |||
789 | __write_dirty_buffer(b); | ||
790 | dm_bufio_cond_resched(); | ||
791 | } | ||
792 | } | ||
793 | |||
794 | /* | ||
795 | * Get writeback threshold and buffer limit for a given client. | ||
796 | */ | ||
797 | static void __get_memory_limit(struct dm_bufio_client *c, | ||
798 | unsigned long *threshold_buffers, | ||
799 | unsigned long *limit_buffers) | ||
800 | { | ||
801 | unsigned long buffers; | ||
802 | |||
803 | if (dm_bufio_cache_size != dm_bufio_cache_size_latch) { | ||
804 | mutex_lock(&dm_bufio_clients_lock); | ||
805 | __cache_size_refresh(); | ||
806 | mutex_unlock(&dm_bufio_clients_lock); | ||
807 | } | ||
808 | |||
809 | buffers = dm_bufio_cache_size_per_client >> | ||
810 | (c->sectors_per_block_bits + SECTOR_SHIFT); | ||
811 | |||
812 | if (buffers < DM_BUFIO_MIN_BUFFERS) | ||
813 | buffers = DM_BUFIO_MIN_BUFFERS; | ||
814 | |||
815 | *limit_buffers = buffers; | ||
816 | *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100; | ||
817 | } | ||
818 | |||
819 | /* | ||
820 | * Check if we're over watermark. | ||
821 | * If we are over threshold_buffers, start freeing buffers. | ||
822 | * If we're over "limit_buffers", block until we get under the limit. | ||
823 | */ | ||
824 | static void __check_watermark(struct dm_bufio_client *c) | ||
825 | { | ||
826 | unsigned long threshold_buffers, limit_buffers; | ||
827 | |||
828 | __get_memory_limit(c, &threshold_buffers, &limit_buffers); | ||
829 | |||
830 | while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] > | ||
831 | limit_buffers) { | ||
832 | |||
833 | struct dm_buffer *b = __get_unclaimed_buffer(c); | ||
834 | |||
835 | if (!b) | ||
836 | return; | ||
837 | |||
838 | __free_buffer_wake(b); | ||
839 | dm_bufio_cond_resched(); | ||
840 | } | ||
841 | |||
842 | if (c->n_buffers[LIST_DIRTY] > threshold_buffers) | ||
843 | __write_dirty_buffers_async(c, 1); | ||
844 | } | ||
845 | |||
846 | /* | ||
847 | * Find a buffer in the hash. | ||
848 | */ | ||
849 | static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) | ||
850 | { | ||
851 | struct dm_buffer *b; | ||
852 | struct hlist_node *hn; | ||
853 | |||
854 | hlist_for_each_entry(b, hn, &c->cache_hash[DM_BUFIO_HASH(block)], | ||
855 | hash_list) { | ||
856 | dm_bufio_cond_resched(); | ||
857 | if (b->block == block) | ||
858 | return b; | ||
859 | } | ||
860 | |||
861 | return NULL; | ||
862 | } | ||
863 | |||
864 | /*---------------------------------------------------------------- | ||
865 | * Getting a buffer | ||
866 | *--------------------------------------------------------------*/ | ||
867 | |||
868 | enum new_flag { | ||
869 | NF_FRESH = 0, | ||
870 | NF_READ = 1, | ||
871 | NF_GET = 2 | ||
872 | }; | ||
873 | |||
874 | static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, | ||
875 | enum new_flag nf, struct dm_buffer **bp, | ||
876 | int *need_submit) | ||
877 | { | ||
878 | struct dm_buffer *b, *new_b = NULL; | ||
879 | |||
880 | *need_submit = 0; | ||
881 | |||
882 | b = __find(c, block); | ||
883 | if (b) { | ||
884 | b->hold_count++; | ||
885 | __relink_lru(b, test_bit(B_DIRTY, &b->state) || | ||
886 | test_bit(B_WRITING, &b->state)); | ||
887 | return b; | ||
888 | } | ||
889 | |||
890 | if (nf == NF_GET) | ||
891 | return NULL; | ||
892 | |||
893 | new_b = __alloc_buffer_wait(c); | ||
894 | |||
895 | /* | ||
896 | * We've had a period where the mutex was unlocked, so need to | ||
897 | * recheck the hash table. | ||
898 | */ | ||
899 | b = __find(c, block); | ||
900 | if (b) { | ||
901 | __free_buffer_wake(new_b); | ||
902 | b->hold_count++; | ||
903 | __relink_lru(b, test_bit(B_DIRTY, &b->state) || | ||
904 | test_bit(B_WRITING, &b->state)); | ||
905 | return b; | ||
906 | } | ||
907 | |||
908 | __check_watermark(c); | ||
909 | |||
910 | b = new_b; | ||
911 | b->hold_count = 1; | ||
912 | b->read_error = 0; | ||
913 | b->write_error = 0; | ||
914 | __link_buffer(b, block, LIST_CLEAN); | ||
915 | |||
916 | if (nf == NF_FRESH) { | ||
917 | b->state = 0; | ||
918 | return b; | ||
919 | } | ||
920 | |||
921 | b->state = 1 << B_READING; | ||
922 | *need_submit = 1; | ||
923 | |||
924 | return b; | ||
925 | } | ||
926 | |||
927 | /* | ||
928 | * The endio routine for reading: set the error, clear the bit and wake up | ||
929 | * anyone waiting on the buffer. | ||
930 | */ | ||
931 | static void read_endio(struct bio *bio, int error) | ||
932 | { | ||
933 | struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); | ||
934 | |||
935 | b->read_error = error; | ||
936 | |||
937 | BUG_ON(!test_bit(B_READING, &b->state)); | ||
938 | |||
939 | smp_mb__before_clear_bit(); | ||
940 | clear_bit(B_READING, &b->state); | ||
941 | smp_mb__after_clear_bit(); | ||
942 | |||
943 | wake_up_bit(&b->state, B_READING); | ||
944 | } | ||
945 | |||
946 | /* | ||
947 | * A common routine for dm_bufio_new and dm_bufio_read. Operation of these | ||
948 | * functions is similar except that dm_bufio_new doesn't read the | ||
949 | * buffer from the disk (assuming that the caller overwrites all the data | ||
950 | * and uses dm_bufio_mark_buffer_dirty to write new data back). | ||
951 | */ | ||
952 | static void *new_read(struct dm_bufio_client *c, sector_t block, | ||
953 | enum new_flag nf, struct dm_buffer **bp) | ||
954 | { | ||
955 | int need_submit; | ||
956 | struct dm_buffer *b; | ||
957 | |||
958 | dm_bufio_lock(c); | ||
959 | b = __bufio_new(c, block, nf, bp, &need_submit); | ||
960 | dm_bufio_unlock(c); | ||
961 | |||
962 | if (!b || IS_ERR(b)) | ||
963 | return b; | ||
964 | |||
965 | if (need_submit) | ||
966 | submit_io(b, READ, b->block, read_endio); | ||
967 | |||
968 | wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE); | ||
969 | |||
970 | if (b->read_error) { | ||
971 | int error = b->read_error; | ||
972 | |||
973 | dm_bufio_release(b); | ||
974 | |||
975 | return ERR_PTR(error); | ||
976 | } | ||
977 | |||
978 | *bp = b; | ||
979 | |||
980 | return b->data; | ||
981 | } | ||
982 | |||
983 | void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, | ||
984 | struct dm_buffer **bp) | ||
985 | { | ||
986 | return new_read(c, block, NF_GET, bp); | ||
987 | } | ||
988 | EXPORT_SYMBOL_GPL(dm_bufio_get); | ||
989 | |||
990 | void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, | ||
991 | struct dm_buffer **bp) | ||
992 | { | ||
993 | BUG_ON(dm_bufio_in_request()); | ||
994 | |||
995 | return new_read(c, block, NF_READ, bp); | ||
996 | } | ||
997 | EXPORT_SYMBOL_GPL(dm_bufio_read); | ||
998 | |||
999 | void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, | ||
1000 | struct dm_buffer **bp) | ||
1001 | { | ||
1002 | BUG_ON(dm_bufio_in_request()); | ||
1003 | |||
1004 | return new_read(c, block, NF_FRESH, bp); | ||
1005 | } | ||
1006 | EXPORT_SYMBOL_GPL(dm_bufio_new); | ||
1007 | |||
1008 | void dm_bufio_release(struct dm_buffer *b) | ||
1009 | { | ||
1010 | struct dm_bufio_client *c = b->c; | ||
1011 | |||
1012 | dm_bufio_lock(c); | ||
1013 | |||
1014 | BUG_ON(test_bit(B_READING, &b->state)); | ||
1015 | BUG_ON(!b->hold_count); | ||
1016 | |||
1017 | b->hold_count--; | ||
1018 | if (!b->hold_count) { | ||
1019 | wake_up(&c->free_buffer_wait); | ||
1020 | |||
1021 | /* | ||
1022 | * If there were errors on the buffer, and the buffer is not | ||
1023 | * to be written, free the buffer. There is no point in caching | ||
1024 | * invalid buffer. | ||
1025 | */ | ||
1026 | if ((b->read_error || b->write_error) && | ||
1027 | !test_bit(B_WRITING, &b->state) && | ||
1028 | !test_bit(B_DIRTY, &b->state)) { | ||
1029 | __unlink_buffer(b); | ||
1030 | __free_buffer_wake(b); | ||
1031 | } | ||
1032 | } | ||
1033 | |||
1034 | dm_bufio_unlock(c); | ||
1035 | } | ||
1036 | EXPORT_SYMBOL_GPL(dm_bufio_release); | ||
1037 | |||
1038 | void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) | ||
1039 | { | ||
1040 | struct dm_bufio_client *c = b->c; | ||
1041 | |||
1042 | dm_bufio_lock(c); | ||
1043 | |||
1044 | if (!test_and_set_bit(B_DIRTY, &b->state)) | ||
1045 | __relink_lru(b, LIST_DIRTY); | ||
1046 | |||
1047 | dm_bufio_unlock(c); | ||
1048 | } | ||
1049 | EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty); | ||
1050 | |||
1051 | void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) | ||
1052 | { | ||
1053 | BUG_ON(dm_bufio_in_request()); | ||
1054 | |||
1055 | dm_bufio_lock(c); | ||
1056 | __write_dirty_buffers_async(c, 0); | ||
1057 | dm_bufio_unlock(c); | ||
1058 | } | ||
1059 | EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); | ||
1060 | |||
1061 | /* | ||
1062 | * For performance, it is essential that the buffers are written asynchronously | ||
1063 | * and simultaneously (so that the block layer can merge the writes) and then | ||
1064 | * waited upon. | ||
1065 | * | ||
1066 | * Finally, we flush hardware disk cache. | ||
1067 | */ | ||
1068 | int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) | ||
1069 | { | ||
1070 | int a, f; | ||
1071 | unsigned long buffers_processed = 0; | ||
1072 | struct dm_buffer *b, *tmp; | ||
1073 | |||
1074 | dm_bufio_lock(c); | ||
1075 | __write_dirty_buffers_async(c, 0); | ||
1076 | |||
1077 | again: | ||
1078 | list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { | ||
1079 | int dropped_lock = 0; | ||
1080 | |||
1081 | if (buffers_processed < c->n_buffers[LIST_DIRTY]) | ||
1082 | buffers_processed++; | ||
1083 | |||
1084 | BUG_ON(test_bit(B_READING, &b->state)); | ||
1085 | |||
1086 | if (test_bit(B_WRITING, &b->state)) { | ||
1087 | if (buffers_processed < c->n_buffers[LIST_DIRTY]) { | ||
1088 | dropped_lock = 1; | ||
1089 | b->hold_count++; | ||
1090 | dm_bufio_unlock(c); | ||
1091 | wait_on_bit(&b->state, B_WRITING, | ||
1092 | do_io_schedule, | ||
1093 | TASK_UNINTERRUPTIBLE); | ||
1094 | dm_bufio_lock(c); | ||
1095 | b->hold_count--; | ||
1096 | } else | ||
1097 | wait_on_bit(&b->state, B_WRITING, | ||
1098 | do_io_schedule, | ||
1099 | TASK_UNINTERRUPTIBLE); | ||
1100 | } | ||
1101 | |||
1102 | if (!test_bit(B_DIRTY, &b->state) && | ||
1103 | !test_bit(B_WRITING, &b->state)) | ||
1104 | __relink_lru(b, LIST_CLEAN); | ||
1105 | |||
1106 | dm_bufio_cond_resched(); | ||
1107 | |||
1108 | /* | ||
1109 | * If we dropped the lock, the list is no longer consistent, | ||
1110 | * so we must restart the search. | ||
1111 | * | ||
1112 | * In the most common case, the buffer just processed is | ||
1113 | * relinked to the clean list, so we won't loop scanning the | ||
1114 | * same buffer again and again. | ||
1115 | * | ||
1116 | * This may livelock if there is another thread simultaneously | ||
1117 | * dirtying buffers, so we count the number of buffers walked | ||
1118 | * and if it exceeds the total number of buffers, it means that | ||
1119 | * someone is doing some writes simultaneously with us. In | ||
1120 | * this case, stop, dropping the lock. | ||
1121 | */ | ||
1122 | if (dropped_lock) | ||
1123 | goto again; | ||
1124 | } | ||
1125 | wake_up(&c->free_buffer_wait); | ||
1126 | dm_bufio_unlock(c); | ||
1127 | |||
1128 | a = xchg(&c->async_write_error, 0); | ||
1129 | f = dm_bufio_issue_flush(c); | ||
1130 | if (a) | ||
1131 | return a; | ||
1132 | |||
1133 | return f; | ||
1134 | } | ||
1135 | EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers); | ||
1136 | |||
1137 | /* | ||
1138 | * Use dm-io to send and empty barrier flush the device. | ||
1139 | */ | ||
1140 | int dm_bufio_issue_flush(struct dm_bufio_client *c) | ||
1141 | { | ||
1142 | struct dm_io_request io_req = { | ||
1143 | .bi_rw = REQ_FLUSH, | ||
1144 | .mem.type = DM_IO_KMEM, | ||
1145 | .mem.ptr.addr = NULL, | ||
1146 | .client = c->dm_io, | ||
1147 | }; | ||
1148 | struct dm_io_region io_reg = { | ||
1149 | .bdev = c->bdev, | ||
1150 | .sector = 0, | ||
1151 | .count = 0, | ||
1152 | }; | ||
1153 | |||
1154 | BUG_ON(dm_bufio_in_request()); | ||
1155 | |||
1156 | return dm_io(&io_req, 1, &io_reg, NULL); | ||
1157 | } | ||
1158 | EXPORT_SYMBOL_GPL(dm_bufio_issue_flush); | ||
1159 | |||
1160 | /* | ||
1161 | * We first delete any other buffer that may be at that new location. | ||
1162 | * | ||
1163 | * Then, we write the buffer to the original location if it was dirty. | ||
1164 | * | ||
1165 | * Then, if we are the only one who is holding the buffer, relink the buffer | ||
1166 | * in the hash queue for the new location. | ||
1167 | * | ||
1168 | * If there was someone else holding the buffer, we write it to the new | ||
1169 | * location but not relink it, because that other user needs to have the buffer | ||
1170 | * at the same place. | ||
1171 | */ | ||
1172 | void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) | ||
1173 | { | ||
1174 | struct dm_bufio_client *c = b->c; | ||
1175 | struct dm_buffer *new; | ||
1176 | |||
1177 | BUG_ON(dm_bufio_in_request()); | ||
1178 | |||
1179 | dm_bufio_lock(c); | ||
1180 | |||
1181 | retry: | ||
1182 | new = __find(c, new_block); | ||
1183 | if (new) { | ||
1184 | if (new->hold_count) { | ||
1185 | __wait_for_free_buffer(c); | ||
1186 | goto retry; | ||
1187 | } | ||
1188 | |||
1189 | /* | ||
1190 | * FIXME: Is there any point waiting for a write that's going | ||
1191 | * to be overwritten in a bit? | ||
1192 | */ | ||
1193 | __make_buffer_clean(new); | ||
1194 | __unlink_buffer(new); | ||
1195 | __free_buffer_wake(new); | ||
1196 | } | ||
1197 | |||
1198 | BUG_ON(!b->hold_count); | ||
1199 | BUG_ON(test_bit(B_READING, &b->state)); | ||
1200 | |||
1201 | __write_dirty_buffer(b); | ||
1202 | if (b->hold_count == 1) { | ||
1203 | wait_on_bit(&b->state, B_WRITING, | ||
1204 | do_io_schedule, TASK_UNINTERRUPTIBLE); | ||
1205 | set_bit(B_DIRTY, &b->state); | ||
1206 | __unlink_buffer(b); | ||
1207 | __link_buffer(b, new_block, LIST_DIRTY); | ||
1208 | } else { | ||
1209 | sector_t old_block; | ||
1210 | wait_on_bit_lock(&b->state, B_WRITING, | ||
1211 | do_io_schedule, TASK_UNINTERRUPTIBLE); | ||
1212 | /* | ||
1213 | * Relink buffer to "new_block" so that write_callback | ||
1214 | * sees "new_block" as a block number. | ||
1215 | * After the write, link the buffer back to old_block. | ||
1216 | * All this must be done in bufio lock, so that block number | ||
1217 | * change isn't visible to other threads. | ||
1218 | */ | ||
1219 | old_block = b->block; | ||
1220 | __unlink_buffer(b); | ||
1221 | __link_buffer(b, new_block, b->list_mode); | ||
1222 | submit_io(b, WRITE, new_block, write_endio); | ||
1223 | wait_on_bit(&b->state, B_WRITING, | ||
1224 | do_io_schedule, TASK_UNINTERRUPTIBLE); | ||
1225 | __unlink_buffer(b); | ||
1226 | __link_buffer(b, old_block, b->list_mode); | ||
1227 | } | ||
1228 | |||
1229 | dm_bufio_unlock(c); | ||
1230 | dm_bufio_release(b); | ||
1231 | } | ||
1232 | EXPORT_SYMBOL_GPL(dm_bufio_release_move); | ||
1233 | |||
1234 | unsigned dm_bufio_get_block_size(struct dm_bufio_client *c) | ||
1235 | { | ||
1236 | return c->block_size; | ||
1237 | } | ||
1238 | EXPORT_SYMBOL_GPL(dm_bufio_get_block_size); | ||
1239 | |||
1240 | sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) | ||
1241 | { | ||
1242 | return i_size_read(c->bdev->bd_inode) >> | ||
1243 | (SECTOR_SHIFT + c->sectors_per_block_bits); | ||
1244 | } | ||
1245 | EXPORT_SYMBOL_GPL(dm_bufio_get_device_size); | ||
1246 | |||
1247 | sector_t dm_bufio_get_block_number(struct dm_buffer *b) | ||
1248 | { | ||
1249 | return b->block; | ||
1250 | } | ||
1251 | EXPORT_SYMBOL_GPL(dm_bufio_get_block_number); | ||
1252 | |||
1253 | void *dm_bufio_get_block_data(struct dm_buffer *b) | ||
1254 | { | ||
1255 | return b->data; | ||
1256 | } | ||
1257 | EXPORT_SYMBOL_GPL(dm_bufio_get_block_data); | ||
1258 | |||
1259 | void *dm_bufio_get_aux_data(struct dm_buffer *b) | ||
1260 | { | ||
1261 | return b + 1; | ||
1262 | } | ||
1263 | EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data); | ||
1264 | |||
1265 | struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) | ||
1266 | { | ||
1267 | return b->c; | ||
1268 | } | ||
1269 | EXPORT_SYMBOL_GPL(dm_bufio_get_client); | ||
1270 | |||
1271 | static void drop_buffers(struct dm_bufio_client *c) | ||
1272 | { | ||
1273 | struct dm_buffer *b; | ||
1274 | int i; | ||
1275 | |||
1276 | BUG_ON(dm_bufio_in_request()); | ||
1277 | |||
1278 | /* | ||
1279 | * An optimization so that the buffers are not written one-by-one. | ||
1280 | */ | ||
1281 | dm_bufio_write_dirty_buffers_async(c); | ||
1282 | |||
1283 | dm_bufio_lock(c); | ||
1284 | |||
1285 | while ((b = __get_unclaimed_buffer(c))) | ||
1286 | __free_buffer_wake(b); | ||
1287 | |||
1288 | for (i = 0; i < LIST_SIZE; i++) | ||
1289 | list_for_each_entry(b, &c->lru[i], lru_list) | ||
1290 | DMERR("leaked buffer %llx, hold count %u, list %d", | ||
1291 | (unsigned long long)b->block, b->hold_count, i); | ||
1292 | |||
1293 | for (i = 0; i < LIST_SIZE; i++) | ||
1294 | BUG_ON(!list_empty(&c->lru[i])); | ||
1295 | |||
1296 | dm_bufio_unlock(c); | ||
1297 | } | ||
1298 | |||
1299 | /* | ||
1300 | * Test if the buffer is unused and too old, and commit it. | ||
1301 | * At if noio is set, we must not do any I/O because we hold | ||
1302 | * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to | ||
1303 | * different bufio client. | ||
1304 | */ | ||
1305 | static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp, | ||
1306 | unsigned long max_jiffies) | ||
1307 | { | ||
1308 | if (jiffies - b->last_accessed < max_jiffies) | ||
1309 | return 1; | ||
1310 | |||
1311 | if (!(gfp & __GFP_IO)) { | ||
1312 | if (test_bit(B_READING, &b->state) || | ||
1313 | test_bit(B_WRITING, &b->state) || | ||
1314 | test_bit(B_DIRTY, &b->state)) | ||
1315 | return 1; | ||
1316 | } | ||
1317 | |||
1318 | if (b->hold_count) | ||
1319 | return 1; | ||
1320 | |||
1321 | __make_buffer_clean(b); | ||
1322 | __unlink_buffer(b); | ||
1323 | __free_buffer_wake(b); | ||
1324 | |||
1325 | return 0; | ||
1326 | } | ||
1327 | |||
1328 | static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, | ||
1329 | struct shrink_control *sc) | ||
1330 | { | ||
1331 | int l; | ||
1332 | struct dm_buffer *b, *tmp; | ||
1333 | |||
1334 | for (l = 0; l < LIST_SIZE; l++) { | ||
1335 | list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) | ||
1336 | if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) && | ||
1337 | !--nr_to_scan) | ||
1338 | return; | ||
1339 | dm_bufio_cond_resched(); | ||
1340 | } | ||
1341 | } | ||
1342 | |||
1343 | static int shrink(struct shrinker *shrinker, struct shrink_control *sc) | ||
1344 | { | ||
1345 | struct dm_bufio_client *c = | ||
1346 | container_of(shrinker, struct dm_bufio_client, shrinker); | ||
1347 | unsigned long r; | ||
1348 | unsigned long nr_to_scan = sc->nr_to_scan; | ||
1349 | |||
1350 | if (sc->gfp_mask & __GFP_IO) | ||
1351 | dm_bufio_lock(c); | ||
1352 | else if (!dm_bufio_trylock(c)) | ||
1353 | return !nr_to_scan ? 0 : -1; | ||
1354 | |||
1355 | if (nr_to_scan) | ||
1356 | __scan(c, nr_to_scan, sc); | ||
1357 | |||
1358 | r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; | ||
1359 | if (r > INT_MAX) | ||
1360 | r = INT_MAX; | ||
1361 | |||
1362 | dm_bufio_unlock(c); | ||
1363 | |||
1364 | return r; | ||
1365 | } | ||
1366 | |||
1367 | /* | ||
1368 | * Create the buffering interface | ||
1369 | */ | ||
1370 | struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size, | ||
1371 | unsigned reserved_buffers, unsigned aux_size, | ||
1372 | void (*alloc_callback)(struct dm_buffer *), | ||
1373 | void (*write_callback)(struct dm_buffer *)) | ||
1374 | { | ||
1375 | int r; | ||
1376 | struct dm_bufio_client *c; | ||
1377 | unsigned i; | ||
1378 | |||
1379 | BUG_ON(block_size < 1 << SECTOR_SHIFT || | ||
1380 | (block_size & (block_size - 1))); | ||
1381 | |||
1382 | c = kmalloc(sizeof(*c), GFP_KERNEL); | ||
1383 | if (!c) { | ||
1384 | r = -ENOMEM; | ||
1385 | goto bad_client; | ||
1386 | } | ||
1387 | c->cache_hash = vmalloc(sizeof(struct hlist_head) << DM_BUFIO_HASH_BITS); | ||
1388 | if (!c->cache_hash) { | ||
1389 | r = -ENOMEM; | ||
1390 | goto bad_hash; | ||
1391 | } | ||
1392 | |||
1393 | c->bdev = bdev; | ||
1394 | c->block_size = block_size; | ||
1395 | c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT; | ||
1396 | c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ? | ||
1397 | ffs(block_size) - 1 - PAGE_SHIFT : 0; | ||
1398 | c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ? | ||
1399 | PAGE_SHIFT - (ffs(block_size) - 1) : 0); | ||
1400 | |||
1401 | c->aux_size = aux_size; | ||
1402 | c->alloc_callback = alloc_callback; | ||
1403 | c->write_callback = write_callback; | ||
1404 | |||
1405 | for (i = 0; i < LIST_SIZE; i++) { | ||
1406 | INIT_LIST_HEAD(&c->lru[i]); | ||
1407 | c->n_buffers[i] = 0; | ||
1408 | } | ||
1409 | |||
1410 | for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++) | ||
1411 | INIT_HLIST_HEAD(&c->cache_hash[i]); | ||
1412 | |||
1413 | mutex_init(&c->lock); | ||
1414 | INIT_LIST_HEAD(&c->reserved_buffers); | ||
1415 | c->need_reserved_buffers = reserved_buffers; | ||
1416 | |||
1417 | init_waitqueue_head(&c->free_buffer_wait); | ||
1418 | c->async_write_error = 0; | ||
1419 | |||
1420 | c->dm_io = dm_io_client_create(); | ||
1421 | if (IS_ERR(c->dm_io)) { | ||
1422 | r = PTR_ERR(c->dm_io); | ||
1423 | goto bad_dm_io; | ||
1424 | } | ||
1425 | |||
1426 | mutex_lock(&dm_bufio_clients_lock); | ||
1427 | if (c->blocks_per_page_bits) { | ||
1428 | if (!DM_BUFIO_CACHE_NAME(c)) { | ||
1429 | DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size); | ||
1430 | if (!DM_BUFIO_CACHE_NAME(c)) { | ||
1431 | r = -ENOMEM; | ||
1432 | mutex_unlock(&dm_bufio_clients_lock); | ||
1433 | goto bad_cache; | ||
1434 | } | ||
1435 | } | ||
1436 | |||
1437 | if (!DM_BUFIO_CACHE(c)) { | ||
1438 | DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c), | ||
1439 | c->block_size, | ||
1440 | c->block_size, 0, NULL); | ||
1441 | if (!DM_BUFIO_CACHE(c)) { | ||
1442 | r = -ENOMEM; | ||
1443 | mutex_unlock(&dm_bufio_clients_lock); | ||
1444 | goto bad_cache; | ||
1445 | } | ||
1446 | } | ||
1447 | } | ||
1448 | mutex_unlock(&dm_bufio_clients_lock); | ||
1449 | |||
1450 | while (c->need_reserved_buffers) { | ||
1451 | struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); | ||
1452 | |||
1453 | if (!b) { | ||
1454 | r = -ENOMEM; | ||
1455 | goto bad_buffer; | ||
1456 | } | ||
1457 | __free_buffer_wake(b); | ||
1458 | } | ||
1459 | |||
1460 | mutex_lock(&dm_bufio_clients_lock); | ||
1461 | dm_bufio_client_count++; | ||
1462 | list_add(&c->client_list, &dm_bufio_all_clients); | ||
1463 | __cache_size_refresh(); | ||
1464 | mutex_unlock(&dm_bufio_clients_lock); | ||
1465 | |||
1466 | c->shrinker.shrink = shrink; | ||
1467 | c->shrinker.seeks = 1; | ||
1468 | c->shrinker.batch = 0; | ||
1469 | register_shrinker(&c->shrinker); | ||
1470 | |||
1471 | return c; | ||
1472 | |||
1473 | bad_buffer: | ||
1474 | bad_cache: | ||
1475 | while (!list_empty(&c->reserved_buffers)) { | ||
1476 | struct dm_buffer *b = list_entry(c->reserved_buffers.next, | ||
1477 | struct dm_buffer, lru_list); | ||
1478 | list_del(&b->lru_list); | ||
1479 | free_buffer(b); | ||
1480 | } | ||
1481 | dm_io_client_destroy(c->dm_io); | ||
1482 | bad_dm_io: | ||
1483 | vfree(c->cache_hash); | ||
1484 | bad_hash: | ||
1485 | kfree(c); | ||
1486 | bad_client: | ||
1487 | return ERR_PTR(r); | ||
1488 | } | ||
1489 | EXPORT_SYMBOL_GPL(dm_bufio_client_create); | ||
1490 | |||
1491 | /* | ||
1492 | * Free the buffering interface. | ||
1493 | * It is required that there are no references on any buffers. | ||
1494 | */ | ||
1495 | void dm_bufio_client_destroy(struct dm_bufio_client *c) | ||
1496 | { | ||
1497 | unsigned i; | ||
1498 | |||
1499 | drop_buffers(c); | ||
1500 | |||
1501 | unregister_shrinker(&c->shrinker); | ||
1502 | |||
1503 | mutex_lock(&dm_bufio_clients_lock); | ||
1504 | |||
1505 | list_del(&c->client_list); | ||
1506 | dm_bufio_client_count--; | ||
1507 | __cache_size_refresh(); | ||
1508 | |||
1509 | mutex_unlock(&dm_bufio_clients_lock); | ||
1510 | |||
1511 | for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++) | ||
1512 | BUG_ON(!hlist_empty(&c->cache_hash[i])); | ||
1513 | |||
1514 | BUG_ON(c->need_reserved_buffers); | ||
1515 | |||
1516 | while (!list_empty(&c->reserved_buffers)) { | ||
1517 | struct dm_buffer *b = list_entry(c->reserved_buffers.next, | ||
1518 | struct dm_buffer, lru_list); | ||
1519 | list_del(&b->lru_list); | ||
1520 | free_buffer(b); | ||
1521 | } | ||
1522 | |||
1523 | for (i = 0; i < LIST_SIZE; i++) | ||
1524 | if (c->n_buffers[i]) | ||
1525 | DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]); | ||
1526 | |||
1527 | for (i = 0; i < LIST_SIZE; i++) | ||
1528 | BUG_ON(c->n_buffers[i]); | ||
1529 | |||
1530 | dm_io_client_destroy(c->dm_io); | ||
1531 | vfree(c->cache_hash); | ||
1532 | kfree(c); | ||
1533 | } | ||
1534 | EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); | ||
1535 | |||
1536 | static void cleanup_old_buffers(void) | ||
1537 | { | ||
1538 | unsigned long max_age = dm_bufio_max_age; | ||
1539 | struct dm_bufio_client *c; | ||
1540 | |||
1541 | barrier(); | ||
1542 | |||
1543 | if (max_age > ULONG_MAX / HZ) | ||
1544 | max_age = ULONG_MAX / HZ; | ||
1545 | |||
1546 | mutex_lock(&dm_bufio_clients_lock); | ||
1547 | list_for_each_entry(c, &dm_bufio_all_clients, client_list) { | ||
1548 | if (!dm_bufio_trylock(c)) | ||
1549 | continue; | ||
1550 | |||
1551 | while (!list_empty(&c->lru[LIST_CLEAN])) { | ||
1552 | struct dm_buffer *b; | ||
1553 | b = list_entry(c->lru[LIST_CLEAN].prev, | ||
1554 | struct dm_buffer, lru_list); | ||
1555 | if (__cleanup_old_buffer(b, 0, max_age * HZ)) | ||
1556 | break; | ||
1557 | dm_bufio_cond_resched(); | ||
1558 | } | ||
1559 | |||
1560 | dm_bufio_unlock(c); | ||
1561 | dm_bufio_cond_resched(); | ||
1562 | } | ||
1563 | mutex_unlock(&dm_bufio_clients_lock); | ||
1564 | } | ||
1565 | |||
1566 | static struct workqueue_struct *dm_bufio_wq; | ||
1567 | static struct delayed_work dm_bufio_work; | ||
1568 | |||
1569 | static void work_fn(struct work_struct *w) | ||
1570 | { | ||
1571 | cleanup_old_buffers(); | ||
1572 | |||
1573 | queue_delayed_work(dm_bufio_wq, &dm_bufio_work, | ||
1574 | DM_BUFIO_WORK_TIMER_SECS * HZ); | ||
1575 | } | ||
1576 | |||
1577 | /*---------------------------------------------------------------- | ||
1578 | * Module setup | ||
1579 | *--------------------------------------------------------------*/ | ||
1580 | |||
1581 | /* | ||
1582 | * This is called only once for the whole dm_bufio module. | ||
1583 | * It initializes memory limit. | ||
1584 | */ | ||
1585 | static int __init dm_bufio_init(void) | ||
1586 | { | ||
1587 | __u64 mem; | ||
1588 | |||
1589 | memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); | ||
1590 | memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); | ||
1591 | |||
1592 | mem = (__u64)((totalram_pages - totalhigh_pages) * | ||
1593 | DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT; | ||
1594 | |||
1595 | if (mem > ULONG_MAX) | ||
1596 | mem = ULONG_MAX; | ||
1597 | |||
1598 | #ifdef CONFIG_MMU | ||
1599 | /* | ||
1600 | * Get the size of vmalloc space the same way as VMALLOC_TOTAL | ||
1601 | * in fs/proc/internal.h | ||
1602 | */ | ||
1603 | if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100) | ||
1604 | mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100; | ||
1605 | #endif | ||
1606 | |||
1607 | dm_bufio_default_cache_size = mem; | ||
1608 | |||
1609 | mutex_lock(&dm_bufio_clients_lock); | ||
1610 | __cache_size_refresh(); | ||
1611 | mutex_unlock(&dm_bufio_clients_lock); | ||
1612 | |||
1613 | dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache"); | ||
1614 | if (!dm_bufio_wq) | ||
1615 | return -ENOMEM; | ||
1616 | |||
1617 | INIT_DELAYED_WORK(&dm_bufio_work, work_fn); | ||
1618 | queue_delayed_work(dm_bufio_wq, &dm_bufio_work, | ||
1619 | DM_BUFIO_WORK_TIMER_SECS * HZ); | ||
1620 | |||
1621 | return 0; | ||
1622 | } | ||
1623 | |||
1624 | /* | ||
1625 | * This is called once when unloading the dm_bufio module. | ||
1626 | */ | ||
1627 | static void __exit dm_bufio_exit(void) | ||
1628 | { | ||
1629 | int bug = 0; | ||
1630 | int i; | ||
1631 | |||
1632 | cancel_delayed_work_sync(&dm_bufio_work); | ||
1633 | destroy_workqueue(dm_bufio_wq); | ||
1634 | |||
1635 | for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) { | ||
1636 | struct kmem_cache *kc = dm_bufio_caches[i]; | ||
1637 | |||
1638 | if (kc) | ||
1639 | kmem_cache_destroy(kc); | ||
1640 | } | ||
1641 | |||
1642 | for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++) | ||
1643 | kfree(dm_bufio_cache_names[i]); | ||
1644 | |||
1645 | if (dm_bufio_client_count) { | ||
1646 | DMCRIT("%s: dm_bufio_client_count leaked: %d", | ||
1647 | __func__, dm_bufio_client_count); | ||
1648 | bug = 1; | ||
1649 | } | ||
1650 | |||
1651 | if (dm_bufio_current_allocated) { | ||
1652 | DMCRIT("%s: dm_bufio_current_allocated leaked: %lu", | ||
1653 | __func__, dm_bufio_current_allocated); | ||
1654 | bug = 1; | ||
1655 | } | ||
1656 | |||
1657 | if (dm_bufio_allocated_get_free_pages) { | ||
1658 | DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu", | ||
1659 | __func__, dm_bufio_allocated_get_free_pages); | ||
1660 | bug = 1; | ||
1661 | } | ||
1662 | |||
1663 | if (dm_bufio_allocated_vmalloc) { | ||
1664 | DMCRIT("%s: dm_bufio_vmalloc leaked: %lu", | ||
1665 | __func__, dm_bufio_allocated_vmalloc); | ||
1666 | bug = 1; | ||
1667 | } | ||
1668 | |||
1669 | if (bug) | ||
1670 | BUG(); | ||
1671 | } | ||
1672 | |||
1673 | module_init(dm_bufio_init) | ||
1674 | module_exit(dm_bufio_exit) | ||
1675 | |||
1676 | module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR); | ||
1677 | MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache"); | ||
1678 | |||
1679 | module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); | ||
1680 | MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); | ||
1681 | |||
1682 | module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR); | ||
1683 | MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory"); | ||
1684 | |||
1685 | module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO); | ||
1686 | MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc"); | ||
1687 | |||
1688 | module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO); | ||
1689 | MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages"); | ||
1690 | |||
1691 | module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO); | ||
1692 | MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc"); | ||
1693 | |||
1694 | module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO); | ||
1695 | MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache"); | ||
1696 | |||
1697 | MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>"); | ||
1698 | MODULE_DESCRIPTION(DM_NAME " buffered I/O library"); | ||
1699 | MODULE_LICENSE("GPL"); | ||