diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-12 21:57:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-12 21:57:57 -0400 |
commit | 79c63eeb805d086f52e5efda9c8d321beeed0b2b (patch) | |
tree | 8efd4619d384a7296f5a7f08149dc7d468f63aa2 /drivers | |
parent | 6a5a3d6a4adde0c66f3be29bbd7c0d6ffb7e1a40 (diff) | |
parent | dba141601d1327146c84b575bd581ea8730e901c (diff) |
Merge tag 'dm-3.7-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm
Pull device-mapper changes from Alasdair G Kergon:
"Remove the power-of-2 block size constraint on discards in dm thin
provisioning and factor the bio_prison code out into a separate module
(for sharing with the forthcoming cache target).
Use struct bio's front_pad to eliminate the use of one separate
mempool by bio-based devices.
A few other tiny clean-ups."
* tag 'dm-3.7-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm:
dm: store dm_target_io in bio front_pad
dm thin: move bio_prison code to separate module
dm thin: prepare to separate bio_prison code
dm thin: support discard with non power of two block size
dm persistent data: convert to use le32_add_cpu
dm: use ACCESS_ONCE for sysfs values
dm bufio: use list_move
dm mpath: fix check for null mpio in end_io fn
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/Kconfig | 8 | ||||
-rw-r--r-- | drivers/md/Makefile | 1 | ||||
-rw-r--r-- | drivers/md/dm-bio-prison.c | 415 | ||||
-rw-r--r-- | drivers/md/dm-bio-prison.h | 72 | ||||
-rw-r--r-- | drivers/md/dm-bufio.c | 13 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 3 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 521 | ||||
-rw-r--r-- | drivers/md/dm-verity.c | 2 | ||||
-rw-r--r-- | drivers/md/dm.c | 108 | ||||
-rw-r--r-- | drivers/md/persistent-data/dm-space-map-common.c | 4 |
10 files changed, 636 insertions, 511 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index d949b781f6f8..91a02eeeb319 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig | |||
@@ -216,6 +216,13 @@ config DM_BUFIO | |||
216 | as a cache, holding recently-read blocks in memory and performing | 216 | as a cache, holding recently-read blocks in memory and performing |
217 | delayed writes. | 217 | delayed writes. |
218 | 218 | ||
219 | config DM_BIO_PRISON | ||
220 | tristate | ||
221 | depends on BLK_DEV_DM && EXPERIMENTAL | ||
222 | ---help--- | ||
223 | Some bio locking schemes used by other device-mapper targets | ||
224 | including thin provisioning. | ||
225 | |||
219 | source "drivers/md/persistent-data/Kconfig" | 226 | source "drivers/md/persistent-data/Kconfig" |
220 | 227 | ||
221 | config DM_CRYPT | 228 | config DM_CRYPT |
@@ -247,6 +254,7 @@ config DM_THIN_PROVISIONING | |||
247 | tristate "Thin provisioning target (EXPERIMENTAL)" | 254 | tristate "Thin provisioning target (EXPERIMENTAL)" |
248 | depends on BLK_DEV_DM && EXPERIMENTAL | 255 | depends on BLK_DEV_DM && EXPERIMENTAL |
249 | select DM_PERSISTENT_DATA | 256 | select DM_PERSISTENT_DATA |
257 | select DM_BIO_PRISON | ||
250 | ---help--- | 258 | ---help--- |
251 | Provides thin provisioning and snapshots that share a data store. | 259 | Provides thin provisioning and snapshots that share a data store. |
252 | 260 | ||
diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 8b2e0dffe82e..94dce8b49324 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile | |||
@@ -29,6 +29,7 @@ obj-$(CONFIG_MD_FAULTY) += faulty.o | |||
29 | obj-$(CONFIG_BLK_DEV_MD) += md-mod.o | 29 | obj-$(CONFIG_BLK_DEV_MD) += md-mod.o |
30 | obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o | 30 | obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o |
31 | obj-$(CONFIG_DM_BUFIO) += dm-bufio.o | 31 | obj-$(CONFIG_DM_BUFIO) += dm-bufio.o |
32 | obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o | ||
32 | obj-$(CONFIG_DM_CRYPT) += dm-crypt.o | 33 | obj-$(CONFIG_DM_CRYPT) += dm-crypt.o |
33 | obj-$(CONFIG_DM_DELAY) += dm-delay.o | 34 | obj-$(CONFIG_DM_DELAY) += dm-delay.o |
34 | obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o | 35 | obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o |
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c new file mode 100644 index 000000000000..e4e841567459 --- /dev/null +++ b/drivers/md/dm-bio-prison.c | |||
@@ -0,0 +1,415 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Red Hat, Inc. | ||
3 | * | ||
4 | * This file is released under the GPL. | ||
5 | */ | ||
6 | |||
7 | #include "dm.h" | ||
8 | #include "dm-bio-prison.h" | ||
9 | |||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/mempool.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/slab.h> | ||
14 | |||
15 | /*----------------------------------------------------------------*/ | ||
16 | |||
17 | struct dm_bio_prison_cell { | ||
18 | struct hlist_node list; | ||
19 | struct dm_bio_prison *prison; | ||
20 | struct dm_cell_key key; | ||
21 | struct bio *holder; | ||
22 | struct bio_list bios; | ||
23 | }; | ||
24 | |||
25 | struct dm_bio_prison { | ||
26 | spinlock_t lock; | ||
27 | mempool_t *cell_pool; | ||
28 | |||
29 | unsigned nr_buckets; | ||
30 | unsigned hash_mask; | ||
31 | struct hlist_head *cells; | ||
32 | }; | ||
33 | |||
34 | /*----------------------------------------------------------------*/ | ||
35 | |||
36 | static uint32_t calc_nr_buckets(unsigned nr_cells) | ||
37 | { | ||
38 | uint32_t n = 128; | ||
39 | |||
40 | nr_cells /= 4; | ||
41 | nr_cells = min(nr_cells, 8192u); | ||
42 | |||
43 | while (n < nr_cells) | ||
44 | n <<= 1; | ||
45 | |||
46 | return n; | ||
47 | } | ||
48 | |||
49 | static struct kmem_cache *_cell_cache; | ||
50 | |||
51 | /* | ||
52 | * @nr_cells should be the number of cells you want in use _concurrently_. | ||
53 | * Don't confuse it with the number of distinct keys. | ||
54 | */ | ||
55 | struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells) | ||
56 | { | ||
57 | unsigned i; | ||
58 | uint32_t nr_buckets = calc_nr_buckets(nr_cells); | ||
59 | size_t len = sizeof(struct dm_bio_prison) + | ||
60 | (sizeof(struct hlist_head) * nr_buckets); | ||
61 | struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL); | ||
62 | |||
63 | if (!prison) | ||
64 | return NULL; | ||
65 | |||
66 | spin_lock_init(&prison->lock); | ||
67 | prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache); | ||
68 | if (!prison->cell_pool) { | ||
69 | kfree(prison); | ||
70 | return NULL; | ||
71 | } | ||
72 | |||
73 | prison->nr_buckets = nr_buckets; | ||
74 | prison->hash_mask = nr_buckets - 1; | ||
75 | prison->cells = (struct hlist_head *) (prison + 1); | ||
76 | for (i = 0; i < nr_buckets; i++) | ||
77 | INIT_HLIST_HEAD(prison->cells + i); | ||
78 | |||
79 | return prison; | ||
80 | } | ||
81 | EXPORT_SYMBOL_GPL(dm_bio_prison_create); | ||
82 | |||
83 | void dm_bio_prison_destroy(struct dm_bio_prison *prison) | ||
84 | { | ||
85 | mempool_destroy(prison->cell_pool); | ||
86 | kfree(prison); | ||
87 | } | ||
88 | EXPORT_SYMBOL_GPL(dm_bio_prison_destroy); | ||
89 | |||
90 | static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key) | ||
91 | { | ||
92 | const unsigned long BIG_PRIME = 4294967291UL; | ||
93 | uint64_t hash = key->block * BIG_PRIME; | ||
94 | |||
95 | return (uint32_t) (hash & prison->hash_mask); | ||
96 | } | ||
97 | |||
98 | static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs) | ||
99 | { | ||
100 | return (lhs->virtual == rhs->virtual) && | ||
101 | (lhs->dev == rhs->dev) && | ||
102 | (lhs->block == rhs->block); | ||
103 | } | ||
104 | |||
105 | static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket, | ||
106 | struct dm_cell_key *key) | ||
107 | { | ||
108 | struct dm_bio_prison_cell *cell; | ||
109 | struct hlist_node *tmp; | ||
110 | |||
111 | hlist_for_each_entry(cell, tmp, bucket, list) | ||
112 | if (keys_equal(&cell->key, key)) | ||
113 | return cell; | ||
114 | |||
115 | return NULL; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * This may block if a new cell needs allocating. You must ensure that | ||
120 | * cells will be unlocked even if the calling thread is blocked. | ||
121 | * | ||
122 | * Returns 1 if the cell was already held, 0 if @inmate is the new holder. | ||
123 | */ | ||
124 | int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key, | ||
125 | struct bio *inmate, struct dm_bio_prison_cell **ref) | ||
126 | { | ||
127 | int r = 1; | ||
128 | unsigned long flags; | ||
129 | uint32_t hash = hash_key(prison, key); | ||
130 | struct dm_bio_prison_cell *cell, *cell2; | ||
131 | |||
132 | BUG_ON(hash > prison->nr_buckets); | ||
133 | |||
134 | spin_lock_irqsave(&prison->lock, flags); | ||
135 | |||
136 | cell = __search_bucket(prison->cells + hash, key); | ||
137 | if (cell) { | ||
138 | bio_list_add(&cell->bios, inmate); | ||
139 | goto out; | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * Allocate a new cell | ||
144 | */ | ||
145 | spin_unlock_irqrestore(&prison->lock, flags); | ||
146 | cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO); | ||
147 | spin_lock_irqsave(&prison->lock, flags); | ||
148 | |||
149 | /* | ||
150 | * We've been unlocked, so we have to double check that | ||
151 | * nobody else has inserted this cell in the meantime. | ||
152 | */ | ||
153 | cell = __search_bucket(prison->cells + hash, key); | ||
154 | if (cell) { | ||
155 | mempool_free(cell2, prison->cell_pool); | ||
156 | bio_list_add(&cell->bios, inmate); | ||
157 | goto out; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * Use new cell. | ||
162 | */ | ||
163 | cell = cell2; | ||
164 | |||
165 | cell->prison = prison; | ||
166 | memcpy(&cell->key, key, sizeof(cell->key)); | ||
167 | cell->holder = inmate; | ||
168 | bio_list_init(&cell->bios); | ||
169 | hlist_add_head(&cell->list, prison->cells + hash); | ||
170 | |||
171 | r = 0; | ||
172 | |||
173 | out: | ||
174 | spin_unlock_irqrestore(&prison->lock, flags); | ||
175 | |||
176 | *ref = cell; | ||
177 | |||
178 | return r; | ||
179 | } | ||
180 | EXPORT_SYMBOL_GPL(dm_bio_detain); | ||
181 | |||
182 | /* | ||
183 | * @inmates must have been initialised prior to this call | ||
184 | */ | ||
185 | static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates) | ||
186 | { | ||
187 | struct dm_bio_prison *prison = cell->prison; | ||
188 | |||
189 | hlist_del(&cell->list); | ||
190 | |||
191 | if (inmates) { | ||
192 | bio_list_add(inmates, cell->holder); | ||
193 | bio_list_merge(inmates, &cell->bios); | ||
194 | } | ||
195 | |||
196 | mempool_free(cell, prison->cell_pool); | ||
197 | } | ||
198 | |||
199 | void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios) | ||
200 | { | ||
201 | unsigned long flags; | ||
202 | struct dm_bio_prison *prison = cell->prison; | ||
203 | |||
204 | spin_lock_irqsave(&prison->lock, flags); | ||
205 | __cell_release(cell, bios); | ||
206 | spin_unlock_irqrestore(&prison->lock, flags); | ||
207 | } | ||
208 | EXPORT_SYMBOL_GPL(dm_cell_release); | ||
209 | |||
210 | /* | ||
211 | * There are a couple of places where we put a bio into a cell briefly | ||
212 | * before taking it out again. In these situations we know that no other | ||
213 | * bio may be in the cell. This function releases the cell, and also does | ||
214 | * a sanity check. | ||
215 | */ | ||
216 | static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) | ||
217 | { | ||
218 | BUG_ON(cell->holder != bio); | ||
219 | BUG_ON(!bio_list_empty(&cell->bios)); | ||
220 | |||
221 | __cell_release(cell, NULL); | ||
222 | } | ||
223 | |||
224 | void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) | ||
225 | { | ||
226 | unsigned long flags; | ||
227 | struct dm_bio_prison *prison = cell->prison; | ||
228 | |||
229 | spin_lock_irqsave(&prison->lock, flags); | ||
230 | __cell_release_singleton(cell, bio); | ||
231 | spin_unlock_irqrestore(&prison->lock, flags); | ||
232 | } | ||
233 | EXPORT_SYMBOL_GPL(dm_cell_release_singleton); | ||
234 | |||
235 | /* | ||
236 | * Sometimes we don't want the holder, just the additional bios. | ||
237 | */ | ||
238 | static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates) | ||
239 | { | ||
240 | struct dm_bio_prison *prison = cell->prison; | ||
241 | |||
242 | hlist_del(&cell->list); | ||
243 | bio_list_merge(inmates, &cell->bios); | ||
244 | |||
245 | mempool_free(cell, prison->cell_pool); | ||
246 | } | ||
247 | |||
248 | void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates) | ||
249 | { | ||
250 | unsigned long flags; | ||
251 | struct dm_bio_prison *prison = cell->prison; | ||
252 | |||
253 | spin_lock_irqsave(&prison->lock, flags); | ||
254 | __cell_release_no_holder(cell, inmates); | ||
255 | spin_unlock_irqrestore(&prison->lock, flags); | ||
256 | } | ||
257 | EXPORT_SYMBOL_GPL(dm_cell_release_no_holder); | ||
258 | |||
259 | void dm_cell_error(struct dm_bio_prison_cell *cell) | ||
260 | { | ||
261 | struct dm_bio_prison *prison = cell->prison; | ||
262 | struct bio_list bios; | ||
263 | struct bio *bio; | ||
264 | unsigned long flags; | ||
265 | |||
266 | bio_list_init(&bios); | ||
267 | |||
268 | spin_lock_irqsave(&prison->lock, flags); | ||
269 | __cell_release(cell, &bios); | ||
270 | spin_unlock_irqrestore(&prison->lock, flags); | ||
271 | |||
272 | while ((bio = bio_list_pop(&bios))) | ||
273 | bio_io_error(bio); | ||
274 | } | ||
275 | EXPORT_SYMBOL_GPL(dm_cell_error); | ||
276 | |||
277 | /*----------------------------------------------------------------*/ | ||
278 | |||
279 | #define DEFERRED_SET_SIZE 64 | ||
280 | |||
281 | struct dm_deferred_entry { | ||
282 | struct dm_deferred_set *ds; | ||
283 | unsigned count; | ||
284 | struct list_head work_items; | ||
285 | }; | ||
286 | |||
287 | struct dm_deferred_set { | ||
288 | spinlock_t lock; | ||
289 | unsigned current_entry; | ||
290 | unsigned sweeper; | ||
291 | struct dm_deferred_entry entries[DEFERRED_SET_SIZE]; | ||
292 | }; | ||
293 | |||
294 | struct dm_deferred_set *dm_deferred_set_create(void) | ||
295 | { | ||
296 | int i; | ||
297 | struct dm_deferred_set *ds; | ||
298 | |||
299 | ds = kmalloc(sizeof(*ds), GFP_KERNEL); | ||
300 | if (!ds) | ||
301 | return NULL; | ||
302 | |||
303 | spin_lock_init(&ds->lock); | ||
304 | ds->current_entry = 0; | ||
305 | ds->sweeper = 0; | ||
306 | for (i = 0; i < DEFERRED_SET_SIZE; i++) { | ||
307 | ds->entries[i].ds = ds; | ||
308 | ds->entries[i].count = 0; | ||
309 | INIT_LIST_HEAD(&ds->entries[i].work_items); | ||
310 | } | ||
311 | |||
312 | return ds; | ||
313 | } | ||
314 | EXPORT_SYMBOL_GPL(dm_deferred_set_create); | ||
315 | |||
316 | void dm_deferred_set_destroy(struct dm_deferred_set *ds) | ||
317 | { | ||
318 | kfree(ds); | ||
319 | } | ||
320 | EXPORT_SYMBOL_GPL(dm_deferred_set_destroy); | ||
321 | |||
322 | struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds) | ||
323 | { | ||
324 | unsigned long flags; | ||
325 | struct dm_deferred_entry *entry; | ||
326 | |||
327 | spin_lock_irqsave(&ds->lock, flags); | ||
328 | entry = ds->entries + ds->current_entry; | ||
329 | entry->count++; | ||
330 | spin_unlock_irqrestore(&ds->lock, flags); | ||
331 | |||
332 | return entry; | ||
333 | } | ||
334 | EXPORT_SYMBOL_GPL(dm_deferred_entry_inc); | ||
335 | |||
336 | static unsigned ds_next(unsigned index) | ||
337 | { | ||
338 | return (index + 1) % DEFERRED_SET_SIZE; | ||
339 | } | ||
340 | |||
341 | static void __sweep(struct dm_deferred_set *ds, struct list_head *head) | ||
342 | { | ||
343 | while ((ds->sweeper != ds->current_entry) && | ||
344 | !ds->entries[ds->sweeper].count) { | ||
345 | list_splice_init(&ds->entries[ds->sweeper].work_items, head); | ||
346 | ds->sweeper = ds_next(ds->sweeper); | ||
347 | } | ||
348 | |||
349 | if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count) | ||
350 | list_splice_init(&ds->entries[ds->sweeper].work_items, head); | ||
351 | } | ||
352 | |||
353 | void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head) | ||
354 | { | ||
355 | unsigned long flags; | ||
356 | |||
357 | spin_lock_irqsave(&entry->ds->lock, flags); | ||
358 | BUG_ON(!entry->count); | ||
359 | --entry->count; | ||
360 | __sweep(entry->ds, head); | ||
361 | spin_unlock_irqrestore(&entry->ds->lock, flags); | ||
362 | } | ||
363 | EXPORT_SYMBOL_GPL(dm_deferred_entry_dec); | ||
364 | |||
365 | /* | ||
366 | * Returns 1 if deferred or 0 if no pending items to delay job. | ||
367 | */ | ||
368 | int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work) | ||
369 | { | ||
370 | int r = 1; | ||
371 | unsigned long flags; | ||
372 | unsigned next_entry; | ||
373 | |||
374 | spin_lock_irqsave(&ds->lock, flags); | ||
375 | if ((ds->sweeper == ds->current_entry) && | ||
376 | !ds->entries[ds->current_entry].count) | ||
377 | r = 0; | ||
378 | else { | ||
379 | list_add(work, &ds->entries[ds->current_entry].work_items); | ||
380 | next_entry = ds_next(ds->current_entry); | ||
381 | if (!ds->entries[next_entry].count) | ||
382 | ds->current_entry = next_entry; | ||
383 | } | ||
384 | spin_unlock_irqrestore(&ds->lock, flags); | ||
385 | |||
386 | return r; | ||
387 | } | ||
388 | EXPORT_SYMBOL_GPL(dm_deferred_set_add_work); | ||
389 | |||
390 | /*----------------------------------------------------------------*/ | ||
391 | |||
392 | static int __init dm_bio_prison_init(void) | ||
393 | { | ||
394 | _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0); | ||
395 | if (!_cell_cache) | ||
396 | return -ENOMEM; | ||
397 | |||
398 | return 0; | ||
399 | } | ||
400 | |||
401 | static void __exit dm_bio_prison_exit(void) | ||
402 | { | ||
403 | kmem_cache_destroy(_cell_cache); | ||
404 | _cell_cache = NULL; | ||
405 | } | ||
406 | |||
407 | /* | ||
408 | * module hooks | ||
409 | */ | ||
410 | module_init(dm_bio_prison_init); | ||
411 | module_exit(dm_bio_prison_exit); | ||
412 | |||
413 | MODULE_DESCRIPTION(DM_NAME " bio prison"); | ||
414 | MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); | ||
415 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/md/dm-bio-prison.h b/drivers/md/dm-bio-prison.h new file mode 100644 index 000000000000..4e0ac376700a --- /dev/null +++ b/drivers/md/dm-bio-prison.h | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011-2012 Red Hat, Inc. | ||
3 | * | ||
4 | * This file is released under the GPL. | ||
5 | */ | ||
6 | |||
7 | #ifndef DM_BIO_PRISON_H | ||
8 | #define DM_BIO_PRISON_H | ||
9 | |||
10 | #include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */ | ||
11 | #include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */ | ||
12 | |||
13 | #include <linux/list.h> | ||
14 | #include <linux/bio.h> | ||
15 | |||
16 | /*----------------------------------------------------------------*/ | ||
17 | |||
18 | /* | ||
19 | * Sometimes we can't deal with a bio straight away. We put them in prison | ||
20 | * where they can't cause any mischief. Bios are put in a cell identified | ||
21 | * by a key, multiple bios can be in the same cell. When the cell is | ||
22 | * subsequently unlocked the bios become available. | ||
23 | */ | ||
24 | struct dm_bio_prison; | ||
25 | struct dm_bio_prison_cell; | ||
26 | |||
27 | /* FIXME: this needs to be more abstract */ | ||
28 | struct dm_cell_key { | ||
29 | int virtual; | ||
30 | dm_thin_id dev; | ||
31 | dm_block_t block; | ||
32 | }; | ||
33 | |||
34 | struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells); | ||
35 | void dm_bio_prison_destroy(struct dm_bio_prison *prison); | ||
36 | |||
37 | /* | ||
38 | * This may block if a new cell needs allocating. You must ensure that | ||
39 | * cells will be unlocked even if the calling thread is blocked. | ||
40 | * | ||
41 | * Returns 1 if the cell was already held, 0 if @inmate is the new holder. | ||
42 | */ | ||
43 | int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key, | ||
44 | struct bio *inmate, struct dm_bio_prison_cell **ref); | ||
45 | |||
46 | void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios); | ||
47 | void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio); // FIXME: bio arg not needed | ||
48 | void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates); | ||
49 | void dm_cell_error(struct dm_bio_prison_cell *cell); | ||
50 | |||
51 | /*----------------------------------------------------------------*/ | ||
52 | |||
53 | /* | ||
54 | * We use the deferred set to keep track of pending reads to shared blocks. | ||
55 | * We do this to ensure the new mapping caused by a write isn't performed | ||
56 | * until these prior reads have completed. Otherwise the insertion of the | ||
57 | * new mapping could free the old block that the read bios are mapped to. | ||
58 | */ | ||
59 | |||
60 | struct dm_deferred_set; | ||
61 | struct dm_deferred_entry; | ||
62 | |||
63 | struct dm_deferred_set *dm_deferred_set_create(void); | ||
64 | void dm_deferred_set_destroy(struct dm_deferred_set *ds); | ||
65 | |||
66 | struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds); | ||
67 | void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head); | ||
68 | int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work); | ||
69 | |||
70 | /*----------------------------------------------------------------*/ | ||
71 | |||
72 | #endif | ||
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index cc06a1e52423..651ca79881dd 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -280,9 +280,7 @@ static void __cache_size_refresh(void) | |||
280 | BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); | 280 | BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); |
281 | BUG_ON(dm_bufio_client_count < 0); | 281 | BUG_ON(dm_bufio_client_count < 0); |
282 | 282 | ||
283 | dm_bufio_cache_size_latch = dm_bufio_cache_size; | 283 | dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size); |
284 | |||
285 | barrier(); | ||
286 | 284 | ||
287 | /* | 285 | /* |
288 | * Use default if set to 0 and report the actual cache size used. | 286 | * Use default if set to 0 and report the actual cache size used. |
@@ -441,8 +439,7 @@ static void __relink_lru(struct dm_buffer *b, int dirty) | |||
441 | c->n_buffers[b->list_mode]--; | 439 | c->n_buffers[b->list_mode]--; |
442 | c->n_buffers[dirty]++; | 440 | c->n_buffers[dirty]++; |
443 | b->list_mode = dirty; | 441 | b->list_mode = dirty; |
444 | list_del(&b->lru_list); | 442 | list_move(&b->lru_list, &c->lru[dirty]); |
445 | list_add(&b->lru_list, &c->lru[dirty]); | ||
446 | } | 443 | } |
447 | 444 | ||
448 | /*---------------------------------------------------------------- | 445 | /*---------------------------------------------------------------- |
@@ -813,7 +810,7 @@ static void __get_memory_limit(struct dm_bufio_client *c, | |||
813 | { | 810 | { |
814 | unsigned long buffers; | 811 | unsigned long buffers; |
815 | 812 | ||
816 | if (dm_bufio_cache_size != dm_bufio_cache_size_latch) { | 813 | if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) { |
817 | mutex_lock(&dm_bufio_clients_lock); | 814 | mutex_lock(&dm_bufio_clients_lock); |
818 | __cache_size_refresh(); | 815 | __cache_size_refresh(); |
819 | mutex_unlock(&dm_bufio_clients_lock); | 816 | mutex_unlock(&dm_bufio_clients_lock); |
@@ -1591,11 +1588,9 @@ EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); | |||
1591 | 1588 | ||
1592 | static void cleanup_old_buffers(void) | 1589 | static void cleanup_old_buffers(void) |
1593 | { | 1590 | { |
1594 | unsigned long max_age = dm_bufio_max_age; | 1591 | unsigned long max_age = ACCESS_ONCE(dm_bufio_max_age); |
1595 | struct dm_bufio_client *c; | 1592 | struct dm_bufio_client *c; |
1596 | 1593 | ||
1597 | barrier(); | ||
1598 | |||
1599 | if (max_age > ULONG_MAX / HZ) | 1594 | if (max_age > ULONG_MAX / HZ) |
1600 | max_age = ULONG_MAX / HZ; | 1595 | max_age = ULONG_MAX / HZ; |
1601 | 1596 | ||
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index d778563a4ffd..573bd04591bf 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -1309,13 +1309,14 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, | |||
1309 | { | 1309 | { |
1310 | struct multipath *m = ti->private; | 1310 | struct multipath *m = ti->private; |
1311 | struct dm_mpath_io *mpio = map_context->ptr; | 1311 | struct dm_mpath_io *mpio = map_context->ptr; |
1312 | struct pgpath *pgpath = mpio->pgpath; | 1312 | struct pgpath *pgpath; |
1313 | struct path_selector *ps; | 1313 | struct path_selector *ps; |
1314 | int r; | 1314 | int r; |
1315 | 1315 | ||
1316 | BUG_ON(!mpio); | 1316 | BUG_ON(!mpio); |
1317 | 1317 | ||
1318 | r = do_end_io(m, clone, error, mpio); | 1318 | r = do_end_io(m, clone, error, mpio); |
1319 | pgpath = mpio->pgpath; | ||
1319 | if (pgpath) { | 1320 | if (pgpath) { |
1320 | ps = &pgpath->pg->ps; | 1321 | ps = &pgpath->pg->ps; |
1321 | if (ps->type->end_io) | 1322 | if (ps->type->end_io) |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index c29410af1e22..058acf3a5ba7 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -5,6 +5,7 @@ | |||
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include "dm-thin-metadata.h" | 7 | #include "dm-thin-metadata.h" |
8 | #include "dm-bio-prison.h" | ||
8 | #include "dm.h" | 9 | #include "dm.h" |
9 | 10 | ||
10 | #include <linux/device-mapper.h> | 11 | #include <linux/device-mapper.h> |
@@ -21,7 +22,6 @@ | |||
21 | * Tunable constants | 22 | * Tunable constants |
22 | */ | 23 | */ |
23 | #define ENDIO_HOOK_POOL_SIZE 1024 | 24 | #define ENDIO_HOOK_POOL_SIZE 1024 |
24 | #define DEFERRED_SET_SIZE 64 | ||
25 | #define MAPPING_POOL_SIZE 1024 | 25 | #define MAPPING_POOL_SIZE 1024 |
26 | #define PRISON_CELLS 1024 | 26 | #define PRISON_CELLS 1024 |
27 | #define COMMIT_PERIOD HZ | 27 | #define COMMIT_PERIOD HZ |
@@ -58,7 +58,7 @@ | |||
58 | * i) plug io further to this physical block. (see bio_prison code). | 58 | * i) plug io further to this physical block. (see bio_prison code). |
59 | * | 59 | * |
60 | * ii) quiesce any read io to that shared data block. Obviously | 60 | * ii) quiesce any read io to that shared data block. Obviously |
61 | * including all devices that share this block. (see deferred_set code) | 61 | * including all devices that share this block. (see dm_deferred_set code) |
62 | * | 62 | * |
63 | * iii) copy the data block to a newly allocate block. This step can be | 63 | * iii) copy the data block to a newly allocate block. This step can be |
64 | * missed out if the io covers the block. (schedule_copy). | 64 | * missed out if the io covers the block. (schedule_copy). |
@@ -99,381 +99,10 @@ | |||
99 | /*----------------------------------------------------------------*/ | 99 | /*----------------------------------------------------------------*/ |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * Sometimes we can't deal with a bio straight away. We put them in prison | ||
103 | * where they can't cause any mischief. Bios are put in a cell identified | ||
104 | * by a key, multiple bios can be in the same cell. When the cell is | ||
105 | * subsequently unlocked the bios become available. | ||
106 | */ | ||
107 | struct bio_prison; | ||
108 | |||
109 | struct cell_key { | ||
110 | int virtual; | ||
111 | dm_thin_id dev; | ||
112 | dm_block_t block; | ||
113 | }; | ||
114 | |||
115 | struct dm_bio_prison_cell { | ||
116 | struct hlist_node list; | ||
117 | struct bio_prison *prison; | ||
118 | struct cell_key key; | ||
119 | struct bio *holder; | ||
120 | struct bio_list bios; | ||
121 | }; | ||
122 | |||
123 | struct bio_prison { | ||
124 | spinlock_t lock; | ||
125 | mempool_t *cell_pool; | ||
126 | |||
127 | unsigned nr_buckets; | ||
128 | unsigned hash_mask; | ||
129 | struct hlist_head *cells; | ||
130 | }; | ||
131 | |||
132 | static uint32_t calc_nr_buckets(unsigned nr_cells) | ||
133 | { | ||
134 | uint32_t n = 128; | ||
135 | |||
136 | nr_cells /= 4; | ||
137 | nr_cells = min(nr_cells, 8192u); | ||
138 | |||
139 | while (n < nr_cells) | ||
140 | n <<= 1; | ||
141 | |||
142 | return n; | ||
143 | } | ||
144 | |||
145 | static struct kmem_cache *_cell_cache; | ||
146 | |||
147 | /* | ||
148 | * @nr_cells should be the number of cells you want in use _concurrently_. | ||
149 | * Don't confuse it with the number of distinct keys. | ||
150 | */ | ||
151 | static struct bio_prison *prison_create(unsigned nr_cells) | ||
152 | { | ||
153 | unsigned i; | ||
154 | uint32_t nr_buckets = calc_nr_buckets(nr_cells); | ||
155 | size_t len = sizeof(struct bio_prison) + | ||
156 | (sizeof(struct hlist_head) * nr_buckets); | ||
157 | struct bio_prison *prison = kmalloc(len, GFP_KERNEL); | ||
158 | |||
159 | if (!prison) | ||
160 | return NULL; | ||
161 | |||
162 | spin_lock_init(&prison->lock); | ||
163 | prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache); | ||
164 | if (!prison->cell_pool) { | ||
165 | kfree(prison); | ||
166 | return NULL; | ||
167 | } | ||
168 | |||
169 | prison->nr_buckets = nr_buckets; | ||
170 | prison->hash_mask = nr_buckets - 1; | ||
171 | prison->cells = (struct hlist_head *) (prison + 1); | ||
172 | for (i = 0; i < nr_buckets; i++) | ||
173 | INIT_HLIST_HEAD(prison->cells + i); | ||
174 | |||
175 | return prison; | ||
176 | } | ||
177 | |||
178 | static void prison_destroy(struct bio_prison *prison) | ||
179 | { | ||
180 | mempool_destroy(prison->cell_pool); | ||
181 | kfree(prison); | ||
182 | } | ||
183 | |||
184 | static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key) | ||
185 | { | ||
186 | const unsigned long BIG_PRIME = 4294967291UL; | ||
187 | uint64_t hash = key->block * BIG_PRIME; | ||
188 | |||
189 | return (uint32_t) (hash & prison->hash_mask); | ||
190 | } | ||
191 | |||
192 | static int keys_equal(struct cell_key *lhs, struct cell_key *rhs) | ||
193 | { | ||
194 | return (lhs->virtual == rhs->virtual) && | ||
195 | (lhs->dev == rhs->dev) && | ||
196 | (lhs->block == rhs->block); | ||
197 | } | ||
198 | |||
199 | static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket, | ||
200 | struct cell_key *key) | ||
201 | { | ||
202 | struct dm_bio_prison_cell *cell; | ||
203 | struct hlist_node *tmp; | ||
204 | |||
205 | hlist_for_each_entry(cell, tmp, bucket, list) | ||
206 | if (keys_equal(&cell->key, key)) | ||
207 | return cell; | ||
208 | |||
209 | return NULL; | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * This may block if a new cell needs allocating. You must ensure that | ||
214 | * cells will be unlocked even if the calling thread is blocked. | ||
215 | * | ||
216 | * Returns 1 if the cell was already held, 0 if @inmate is the new holder. | ||
217 | */ | ||
218 | static int bio_detain(struct bio_prison *prison, struct cell_key *key, | ||
219 | struct bio *inmate, struct dm_bio_prison_cell **ref) | ||
220 | { | ||
221 | int r = 1; | ||
222 | unsigned long flags; | ||
223 | uint32_t hash = hash_key(prison, key); | ||
224 | struct dm_bio_prison_cell *cell, *cell2; | ||
225 | |||
226 | BUG_ON(hash > prison->nr_buckets); | ||
227 | |||
228 | spin_lock_irqsave(&prison->lock, flags); | ||
229 | |||
230 | cell = __search_bucket(prison->cells + hash, key); | ||
231 | if (cell) { | ||
232 | bio_list_add(&cell->bios, inmate); | ||
233 | goto out; | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * Allocate a new cell | ||
238 | */ | ||
239 | spin_unlock_irqrestore(&prison->lock, flags); | ||
240 | cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO); | ||
241 | spin_lock_irqsave(&prison->lock, flags); | ||
242 | |||
243 | /* | ||
244 | * We've been unlocked, so we have to double check that | ||
245 | * nobody else has inserted this cell in the meantime. | ||
246 | */ | ||
247 | cell = __search_bucket(prison->cells + hash, key); | ||
248 | if (cell) { | ||
249 | mempool_free(cell2, prison->cell_pool); | ||
250 | bio_list_add(&cell->bios, inmate); | ||
251 | goto out; | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * Use new cell. | ||
256 | */ | ||
257 | cell = cell2; | ||
258 | |||
259 | cell->prison = prison; | ||
260 | memcpy(&cell->key, key, sizeof(cell->key)); | ||
261 | cell->holder = inmate; | ||
262 | bio_list_init(&cell->bios); | ||
263 | hlist_add_head(&cell->list, prison->cells + hash); | ||
264 | |||
265 | r = 0; | ||
266 | |||
267 | out: | ||
268 | spin_unlock_irqrestore(&prison->lock, flags); | ||
269 | |||
270 | *ref = cell; | ||
271 | |||
272 | return r; | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * @inmates must have been initialised prior to this call | ||
277 | */ | ||
278 | static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates) | ||
279 | { | ||
280 | struct bio_prison *prison = cell->prison; | ||
281 | |||
282 | hlist_del(&cell->list); | ||
283 | |||
284 | if (inmates) { | ||
285 | bio_list_add(inmates, cell->holder); | ||
286 | bio_list_merge(inmates, &cell->bios); | ||
287 | } | ||
288 | |||
289 | mempool_free(cell, prison->cell_pool); | ||
290 | } | ||
291 | |||
292 | static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios) | ||
293 | { | ||
294 | unsigned long flags; | ||
295 | struct bio_prison *prison = cell->prison; | ||
296 | |||
297 | spin_lock_irqsave(&prison->lock, flags); | ||
298 | __cell_release(cell, bios); | ||
299 | spin_unlock_irqrestore(&prison->lock, flags); | ||
300 | } | ||
301 | |||
302 | /* | ||
303 | * There are a couple of places where we put a bio into a cell briefly | ||
304 | * before taking it out again. In these situations we know that no other | ||
305 | * bio may be in the cell. This function releases the cell, and also does | ||
306 | * a sanity check. | ||
307 | */ | ||
308 | static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) | ||
309 | { | ||
310 | BUG_ON(cell->holder != bio); | ||
311 | BUG_ON(!bio_list_empty(&cell->bios)); | ||
312 | |||
313 | __cell_release(cell, NULL); | ||
314 | } | ||
315 | |||
316 | static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) | ||
317 | { | ||
318 | unsigned long flags; | ||
319 | struct bio_prison *prison = cell->prison; | ||
320 | |||
321 | spin_lock_irqsave(&prison->lock, flags); | ||
322 | __cell_release_singleton(cell, bio); | ||
323 | spin_unlock_irqrestore(&prison->lock, flags); | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * Sometimes we don't want the holder, just the additional bios. | ||
328 | */ | ||
329 | static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, | ||
330 | struct bio_list *inmates) | ||
331 | { | ||
332 | struct bio_prison *prison = cell->prison; | ||
333 | |||
334 | hlist_del(&cell->list); | ||
335 | bio_list_merge(inmates, &cell->bios); | ||
336 | |||
337 | mempool_free(cell, prison->cell_pool); | ||
338 | } | ||
339 | |||
340 | static void cell_release_no_holder(struct dm_bio_prison_cell *cell, | ||
341 | struct bio_list *inmates) | ||
342 | { | ||
343 | unsigned long flags; | ||
344 | struct bio_prison *prison = cell->prison; | ||
345 | |||
346 | spin_lock_irqsave(&prison->lock, flags); | ||
347 | __cell_release_no_holder(cell, inmates); | ||
348 | spin_unlock_irqrestore(&prison->lock, flags); | ||
349 | } | ||
350 | |||
351 | static void cell_error(struct dm_bio_prison_cell *cell) | ||
352 | { | ||
353 | struct bio_prison *prison = cell->prison; | ||
354 | struct bio_list bios; | ||
355 | struct bio *bio; | ||
356 | unsigned long flags; | ||
357 | |||
358 | bio_list_init(&bios); | ||
359 | |||
360 | spin_lock_irqsave(&prison->lock, flags); | ||
361 | __cell_release(cell, &bios); | ||
362 | spin_unlock_irqrestore(&prison->lock, flags); | ||
363 | |||
364 | while ((bio = bio_list_pop(&bios))) | ||
365 | bio_io_error(bio); | ||
366 | } | ||
367 | |||
368 | /*----------------------------------------------------------------*/ | ||
369 | |||
370 | /* | ||
371 | * We use the deferred set to keep track of pending reads to shared blocks. | ||
372 | * We do this to ensure the new mapping caused by a write isn't performed | ||
373 | * until these prior reads have completed. Otherwise the insertion of the | ||
374 | * new mapping could free the old block that the read bios are mapped to. | ||
375 | */ | ||
376 | |||
377 | struct deferred_set; | ||
378 | struct deferred_entry { | ||
379 | struct deferred_set *ds; | ||
380 | unsigned count; | ||
381 | struct list_head work_items; | ||
382 | }; | ||
383 | |||
384 | struct deferred_set { | ||
385 | spinlock_t lock; | ||
386 | unsigned current_entry; | ||
387 | unsigned sweeper; | ||
388 | struct deferred_entry entries[DEFERRED_SET_SIZE]; | ||
389 | }; | ||
390 | |||
391 | static void ds_init(struct deferred_set *ds) | ||
392 | { | ||
393 | int i; | ||
394 | |||
395 | spin_lock_init(&ds->lock); | ||
396 | ds->current_entry = 0; | ||
397 | ds->sweeper = 0; | ||
398 | for (i = 0; i < DEFERRED_SET_SIZE; i++) { | ||
399 | ds->entries[i].ds = ds; | ||
400 | ds->entries[i].count = 0; | ||
401 | INIT_LIST_HEAD(&ds->entries[i].work_items); | ||
402 | } | ||
403 | } | ||
404 | |||
405 | static struct deferred_entry *ds_inc(struct deferred_set *ds) | ||
406 | { | ||
407 | unsigned long flags; | ||
408 | struct deferred_entry *entry; | ||
409 | |||
410 | spin_lock_irqsave(&ds->lock, flags); | ||
411 | entry = ds->entries + ds->current_entry; | ||
412 | entry->count++; | ||
413 | spin_unlock_irqrestore(&ds->lock, flags); | ||
414 | |||
415 | return entry; | ||
416 | } | ||
417 | |||
418 | static unsigned ds_next(unsigned index) | ||
419 | { | ||
420 | return (index + 1) % DEFERRED_SET_SIZE; | ||
421 | } | ||
422 | |||
423 | static void __sweep(struct deferred_set *ds, struct list_head *head) | ||
424 | { | ||
425 | while ((ds->sweeper != ds->current_entry) && | ||
426 | !ds->entries[ds->sweeper].count) { | ||
427 | list_splice_init(&ds->entries[ds->sweeper].work_items, head); | ||
428 | ds->sweeper = ds_next(ds->sweeper); | ||
429 | } | ||
430 | |||
431 | if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count) | ||
432 | list_splice_init(&ds->entries[ds->sweeper].work_items, head); | ||
433 | } | ||
434 | |||
435 | static void ds_dec(struct deferred_entry *entry, struct list_head *head) | ||
436 | { | ||
437 | unsigned long flags; | ||
438 | |||
439 | spin_lock_irqsave(&entry->ds->lock, flags); | ||
440 | BUG_ON(!entry->count); | ||
441 | --entry->count; | ||
442 | __sweep(entry->ds, head); | ||
443 | spin_unlock_irqrestore(&entry->ds->lock, flags); | ||
444 | } | ||
445 | |||
446 | /* | ||
447 | * Returns 1 if deferred or 0 if no pending items to delay job. | ||
448 | */ | ||
449 | static int ds_add_work(struct deferred_set *ds, struct list_head *work) | ||
450 | { | ||
451 | int r = 1; | ||
452 | unsigned long flags; | ||
453 | unsigned next_entry; | ||
454 | |||
455 | spin_lock_irqsave(&ds->lock, flags); | ||
456 | if ((ds->sweeper == ds->current_entry) && | ||
457 | !ds->entries[ds->current_entry].count) | ||
458 | r = 0; | ||
459 | else { | ||
460 | list_add(work, &ds->entries[ds->current_entry].work_items); | ||
461 | next_entry = ds_next(ds->current_entry); | ||
462 | if (!ds->entries[next_entry].count) | ||
463 | ds->current_entry = next_entry; | ||
464 | } | ||
465 | spin_unlock_irqrestore(&ds->lock, flags); | ||
466 | |||
467 | return r; | ||
468 | } | ||
469 | |||
470 | /*----------------------------------------------------------------*/ | ||
471 | |||
472 | /* | ||
473 | * Key building. | 102 | * Key building. |
474 | */ | 103 | */ |
475 | static void build_data_key(struct dm_thin_device *td, | 104 | static void build_data_key(struct dm_thin_device *td, |
476 | dm_block_t b, struct cell_key *key) | 105 | dm_block_t b, struct dm_cell_key *key) |
477 | { | 106 | { |
478 | key->virtual = 0; | 107 | key->virtual = 0; |
479 | key->dev = dm_thin_dev_id(td); | 108 | key->dev = dm_thin_dev_id(td); |
@@ -481,7 +110,7 @@ static void build_data_key(struct dm_thin_device *td, | |||
481 | } | 110 | } |
482 | 111 | ||
483 | static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, | 112 | static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, |
484 | struct cell_key *key) | 113 | struct dm_cell_key *key) |
485 | { | 114 | { |
486 | key->virtual = 1; | 115 | key->virtual = 1; |
487 | key->dev = dm_thin_dev_id(td); | 116 | key->dev = dm_thin_dev_id(td); |
@@ -534,7 +163,7 @@ struct pool { | |||
534 | unsigned low_water_triggered:1; /* A dm event has been sent */ | 163 | unsigned low_water_triggered:1; /* A dm event has been sent */ |
535 | unsigned no_free_space:1; /* A -ENOSPC warning has been issued */ | 164 | unsigned no_free_space:1; /* A -ENOSPC warning has been issued */ |
536 | 165 | ||
537 | struct bio_prison *prison; | 166 | struct dm_bio_prison *prison; |
538 | struct dm_kcopyd_client *copier; | 167 | struct dm_kcopyd_client *copier; |
539 | 168 | ||
540 | struct workqueue_struct *wq; | 169 | struct workqueue_struct *wq; |
@@ -552,8 +181,8 @@ struct pool { | |||
552 | 181 | ||
553 | struct bio_list retry_on_resume_list; | 182 | struct bio_list retry_on_resume_list; |
554 | 183 | ||
555 | struct deferred_set shared_read_ds; | 184 | struct dm_deferred_set *shared_read_ds; |
556 | struct deferred_set all_io_ds; | 185 | struct dm_deferred_set *all_io_ds; |
557 | 186 | ||
558 | struct dm_thin_new_mapping *next_mapping; | 187 | struct dm_thin_new_mapping *next_mapping; |
559 | mempool_t *mapping_pool; | 188 | mempool_t *mapping_pool; |
@@ -660,8 +289,8 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev | |||
660 | 289 | ||
661 | struct dm_thin_endio_hook { | 290 | struct dm_thin_endio_hook { |
662 | struct thin_c *tc; | 291 | struct thin_c *tc; |
663 | struct deferred_entry *shared_read_entry; | 292 | struct dm_deferred_entry *shared_read_entry; |
664 | struct deferred_entry *all_io_entry; | 293 | struct dm_deferred_entry *all_io_entry; |
665 | struct dm_thin_new_mapping *overwrite_mapping; | 294 | struct dm_thin_new_mapping *overwrite_mapping; |
666 | }; | 295 | }; |
667 | 296 | ||
@@ -877,7 +506,7 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell, | |||
877 | unsigned long flags; | 506 | unsigned long flags; |
878 | 507 | ||
879 | spin_lock_irqsave(&pool->lock, flags); | 508 | spin_lock_irqsave(&pool->lock, flags); |
880 | cell_release(cell, &pool->deferred_bios); | 509 | dm_cell_release(cell, &pool->deferred_bios); |
881 | spin_unlock_irqrestore(&tc->pool->lock, flags); | 510 | spin_unlock_irqrestore(&tc->pool->lock, flags); |
882 | 511 | ||
883 | wake_worker(pool); | 512 | wake_worker(pool); |
@@ -896,7 +525,7 @@ static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell | |||
896 | bio_list_init(&bios); | 525 | bio_list_init(&bios); |
897 | 526 | ||
898 | spin_lock_irqsave(&pool->lock, flags); | 527 | spin_lock_irqsave(&pool->lock, flags); |
899 | cell_release_no_holder(cell, &pool->deferred_bios); | 528 | dm_cell_release_no_holder(cell, &pool->deferred_bios); |
900 | spin_unlock_irqrestore(&pool->lock, flags); | 529 | spin_unlock_irqrestore(&pool->lock, flags); |
901 | 530 | ||
902 | wake_worker(pool); | 531 | wake_worker(pool); |
@@ -906,7 +535,7 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) | |||
906 | { | 535 | { |
907 | if (m->bio) | 536 | if (m->bio) |
908 | m->bio->bi_end_io = m->saved_bi_end_io; | 537 | m->bio->bi_end_io = m->saved_bi_end_io; |
909 | cell_error(m->cell); | 538 | dm_cell_error(m->cell); |
910 | list_del(&m->list); | 539 | list_del(&m->list); |
911 | mempool_free(m, m->tc->pool->mapping_pool); | 540 | mempool_free(m, m->tc->pool->mapping_pool); |
912 | } | 541 | } |
@@ -921,7 +550,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
921 | bio->bi_end_io = m->saved_bi_end_io; | 550 | bio->bi_end_io = m->saved_bi_end_io; |
922 | 551 | ||
923 | if (m->err) { | 552 | if (m->err) { |
924 | cell_error(m->cell); | 553 | dm_cell_error(m->cell); |
925 | goto out; | 554 | goto out; |
926 | } | 555 | } |
927 | 556 | ||
@@ -933,7 +562,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
933 | r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); | 562 | r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); |
934 | if (r) { | 563 | if (r) { |
935 | DMERR("dm_thin_insert_block() failed"); | 564 | DMERR("dm_thin_insert_block() failed"); |
936 | cell_error(m->cell); | 565 | dm_cell_error(m->cell); |
937 | goto out; | 566 | goto out; |
938 | } | 567 | } |
939 | 568 | ||
@@ -1067,7 +696,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, | |||
1067 | m->err = 0; | 696 | m->err = 0; |
1068 | m->bio = NULL; | 697 | m->bio = NULL; |
1069 | 698 | ||
1070 | if (!ds_add_work(&pool->shared_read_ds, &m->list)) | 699 | if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) |
1071 | m->quiesced = 1; | 700 | m->quiesced = 1; |
1072 | 701 | ||
1073 | /* | 702 | /* |
@@ -1099,7 +728,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, | |||
1099 | if (r < 0) { | 728 | if (r < 0) { |
1100 | mempool_free(m, pool->mapping_pool); | 729 | mempool_free(m, pool->mapping_pool); |
1101 | DMERR("dm_kcopyd_copy() failed"); | 730 | DMERR("dm_kcopyd_copy() failed"); |
1102 | cell_error(cell); | 731 | dm_cell_error(cell); |
1103 | } | 732 | } |
1104 | } | 733 | } |
1105 | } | 734 | } |
@@ -1164,7 +793,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, | |||
1164 | if (r < 0) { | 793 | if (r < 0) { |
1165 | mempool_free(m, pool->mapping_pool); | 794 | mempool_free(m, pool->mapping_pool); |
1166 | DMERR("dm_kcopyd_zero() failed"); | 795 | DMERR("dm_kcopyd_zero() failed"); |
1167 | cell_error(cell); | 796 | dm_cell_error(cell); |
1168 | } | 797 | } |
1169 | } | 798 | } |
1170 | } | 799 | } |
@@ -1276,7 +905,7 @@ static void no_space(struct dm_bio_prison_cell *cell) | |||
1276 | struct bio_list bios; | 905 | struct bio_list bios; |
1277 | 906 | ||
1278 | bio_list_init(&bios); | 907 | bio_list_init(&bios); |
1279 | cell_release(cell, &bios); | 908 | dm_cell_release(cell, &bios); |
1280 | 909 | ||
1281 | while ((bio = bio_list_pop(&bios))) | 910 | while ((bio = bio_list_pop(&bios))) |
1282 | retry_on_resume(bio); | 911 | retry_on_resume(bio); |
@@ -1288,13 +917,13 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
1288 | unsigned long flags; | 917 | unsigned long flags; |
1289 | struct pool *pool = tc->pool; | 918 | struct pool *pool = tc->pool; |
1290 | struct dm_bio_prison_cell *cell, *cell2; | 919 | struct dm_bio_prison_cell *cell, *cell2; |
1291 | struct cell_key key, key2; | 920 | struct dm_cell_key key, key2; |
1292 | dm_block_t block = get_bio_block(tc, bio); | 921 | dm_block_t block = get_bio_block(tc, bio); |
1293 | struct dm_thin_lookup_result lookup_result; | 922 | struct dm_thin_lookup_result lookup_result; |
1294 | struct dm_thin_new_mapping *m; | 923 | struct dm_thin_new_mapping *m; |
1295 | 924 | ||
1296 | build_virtual_key(tc->td, block, &key); | 925 | build_virtual_key(tc->td, block, &key); |
1297 | if (bio_detain(tc->pool->prison, &key, bio, &cell)) | 926 | if (dm_bio_detain(tc->pool->prison, &key, bio, &cell)) |
1298 | return; | 927 | return; |
1299 | 928 | ||
1300 | r = dm_thin_find_block(tc->td, block, 1, &lookup_result); | 929 | r = dm_thin_find_block(tc->td, block, 1, &lookup_result); |
@@ -1306,8 +935,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
1306 | * on this block. | 935 | * on this block. |
1307 | */ | 936 | */ |
1308 | build_data_key(tc->td, lookup_result.block, &key2); | 937 | build_data_key(tc->td, lookup_result.block, &key2); |
1309 | if (bio_detain(tc->pool->prison, &key2, bio, &cell2)) { | 938 | if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) { |
1310 | cell_release_singleton(cell, bio); | 939 | dm_cell_release_singleton(cell, bio); |
1311 | break; | 940 | break; |
1312 | } | 941 | } |
1313 | 942 | ||
@@ -1326,7 +955,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
1326 | m->err = 0; | 955 | m->err = 0; |
1327 | m->bio = bio; | 956 | m->bio = bio; |
1328 | 957 | ||
1329 | if (!ds_add_work(&pool->all_io_ds, &m->list)) { | 958 | if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) { |
1330 | spin_lock_irqsave(&pool->lock, flags); | 959 | spin_lock_irqsave(&pool->lock, flags); |
1331 | list_add(&m->list, &pool->prepared_discards); | 960 | list_add(&m->list, &pool->prepared_discards); |
1332 | spin_unlock_irqrestore(&pool->lock, flags); | 961 | spin_unlock_irqrestore(&pool->lock, flags); |
@@ -1338,8 +967,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
1338 | * a block boundary. So we submit the discard of a | 967 | * a block boundary. So we submit the discard of a |
1339 | * partial block appropriately. | 968 | * partial block appropriately. |
1340 | */ | 969 | */ |
1341 | cell_release_singleton(cell, bio); | 970 | dm_cell_release_singleton(cell, bio); |
1342 | cell_release_singleton(cell2, bio); | 971 | dm_cell_release_singleton(cell2, bio); |
1343 | if ((!lookup_result.shared) && pool->pf.discard_passdown) | 972 | if ((!lookup_result.shared) && pool->pf.discard_passdown) |
1344 | remap_and_issue(tc, bio, lookup_result.block); | 973 | remap_and_issue(tc, bio, lookup_result.block); |
1345 | else | 974 | else |
@@ -1351,20 +980,20 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
1351 | /* | 980 | /* |
1352 | * It isn't provisioned, just forget it. | 981 | * It isn't provisioned, just forget it. |
1353 | */ | 982 | */ |
1354 | cell_release_singleton(cell, bio); | 983 | dm_cell_release_singleton(cell, bio); |
1355 | bio_endio(bio, 0); | 984 | bio_endio(bio, 0); |
1356 | break; | 985 | break; |
1357 | 986 | ||
1358 | default: | 987 | default: |
1359 | DMERR("discard: find block unexpectedly returned %d", r); | 988 | DMERR("discard: find block unexpectedly returned %d", r); |
1360 | cell_release_singleton(cell, bio); | 989 | dm_cell_release_singleton(cell, bio); |
1361 | bio_io_error(bio); | 990 | bio_io_error(bio); |
1362 | break; | 991 | break; |
1363 | } | 992 | } |
1364 | } | 993 | } |
1365 | 994 | ||
1366 | static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, | 995 | static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, |
1367 | struct cell_key *key, | 996 | struct dm_cell_key *key, |
1368 | struct dm_thin_lookup_result *lookup_result, | 997 | struct dm_thin_lookup_result *lookup_result, |
1369 | struct dm_bio_prison_cell *cell) | 998 | struct dm_bio_prison_cell *cell) |
1370 | { | 999 | { |
@@ -1384,7 +1013,7 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, | |||
1384 | 1013 | ||
1385 | default: | 1014 | default: |
1386 | DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); | 1015 | DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); |
1387 | cell_error(cell); | 1016 | dm_cell_error(cell); |
1388 | break; | 1017 | break; |
1389 | } | 1018 | } |
1390 | } | 1019 | } |
@@ -1395,14 +1024,14 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, | |||
1395 | { | 1024 | { |
1396 | struct dm_bio_prison_cell *cell; | 1025 | struct dm_bio_prison_cell *cell; |
1397 | struct pool *pool = tc->pool; | 1026 | struct pool *pool = tc->pool; |
1398 | struct cell_key key; | 1027 | struct dm_cell_key key; |
1399 | 1028 | ||
1400 | /* | 1029 | /* |
1401 | * If cell is already occupied, then sharing is already in the process | 1030 | * If cell is already occupied, then sharing is already in the process |
1402 | * of being broken so we have nothing further to do here. | 1031 | * of being broken so we have nothing further to do here. |
1403 | */ | 1032 | */ |
1404 | build_data_key(tc->td, lookup_result->block, &key); | 1033 | build_data_key(tc->td, lookup_result->block, &key); |
1405 | if (bio_detain(pool->prison, &key, bio, &cell)) | 1034 | if (dm_bio_detain(pool->prison, &key, bio, &cell)) |
1406 | return; | 1035 | return; |
1407 | 1036 | ||
1408 | if (bio_data_dir(bio) == WRITE && bio->bi_size) | 1037 | if (bio_data_dir(bio) == WRITE && bio->bi_size) |
@@ -1410,9 +1039,9 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, | |||
1410 | else { | 1039 | else { |
1411 | struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; | 1040 | struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; |
1412 | 1041 | ||
1413 | h->shared_read_entry = ds_inc(&pool->shared_read_ds); | 1042 | h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); |
1414 | 1043 | ||
1415 | cell_release_singleton(cell, bio); | 1044 | dm_cell_release_singleton(cell, bio); |
1416 | remap_and_issue(tc, bio, lookup_result->block); | 1045 | remap_and_issue(tc, bio, lookup_result->block); |
1417 | } | 1046 | } |
1418 | } | 1047 | } |
@@ -1427,7 +1056,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block | |||
1427 | * Remap empty bios (flushes) immediately, without provisioning. | 1056 | * Remap empty bios (flushes) immediately, without provisioning. |
1428 | */ | 1057 | */ |
1429 | if (!bio->bi_size) { | 1058 | if (!bio->bi_size) { |
1430 | cell_release_singleton(cell, bio); | 1059 | dm_cell_release_singleton(cell, bio); |
1431 | remap_and_issue(tc, bio, 0); | 1060 | remap_and_issue(tc, bio, 0); |
1432 | return; | 1061 | return; |
1433 | } | 1062 | } |
@@ -1437,7 +1066,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block | |||
1437 | */ | 1066 | */ |
1438 | if (bio_data_dir(bio) == READ) { | 1067 | if (bio_data_dir(bio) == READ) { |
1439 | zero_fill_bio(bio); | 1068 | zero_fill_bio(bio); |
1440 | cell_release_singleton(cell, bio); | 1069 | dm_cell_release_singleton(cell, bio); |
1441 | bio_endio(bio, 0); | 1070 | bio_endio(bio, 0); |
1442 | return; | 1071 | return; |
1443 | } | 1072 | } |
@@ -1458,7 +1087,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block | |||
1458 | default: | 1087 | default: |
1459 | DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); | 1088 | DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); |
1460 | set_pool_mode(tc->pool, PM_READ_ONLY); | 1089 | set_pool_mode(tc->pool, PM_READ_ONLY); |
1461 | cell_error(cell); | 1090 | dm_cell_error(cell); |
1462 | break; | 1091 | break; |
1463 | } | 1092 | } |
1464 | } | 1093 | } |
@@ -1468,7 +1097,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
1468 | int r; | 1097 | int r; |
1469 | dm_block_t block = get_bio_block(tc, bio); | 1098 | dm_block_t block = get_bio_block(tc, bio); |
1470 | struct dm_bio_prison_cell *cell; | 1099 | struct dm_bio_prison_cell *cell; |
1471 | struct cell_key key; | 1100 | struct dm_cell_key key; |
1472 | struct dm_thin_lookup_result lookup_result; | 1101 | struct dm_thin_lookup_result lookup_result; |
1473 | 1102 | ||
1474 | /* | 1103 | /* |
@@ -1476,7 +1105,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
1476 | * being provisioned so we have nothing further to do here. | 1105 | * being provisioned so we have nothing further to do here. |
1477 | */ | 1106 | */ |
1478 | build_virtual_key(tc->td, block, &key); | 1107 | build_virtual_key(tc->td, block, &key); |
1479 | if (bio_detain(tc->pool->prison, &key, bio, &cell)) | 1108 | if (dm_bio_detain(tc->pool->prison, &key, bio, &cell)) |
1480 | return; | 1109 | return; |
1481 | 1110 | ||
1482 | r = dm_thin_find_block(tc->td, block, 1, &lookup_result); | 1111 | r = dm_thin_find_block(tc->td, block, 1, &lookup_result); |
@@ -1491,7 +1120,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
1491 | * TODO: this will probably have to change when discard goes | 1120 | * TODO: this will probably have to change when discard goes |
1492 | * back in. | 1121 | * back in. |
1493 | */ | 1122 | */ |
1494 | cell_release_singleton(cell, bio); | 1123 | dm_cell_release_singleton(cell, bio); |
1495 | 1124 | ||
1496 | if (lookup_result.shared) | 1125 | if (lookup_result.shared) |
1497 | process_shared_bio(tc, bio, block, &lookup_result); | 1126 | process_shared_bio(tc, bio, block, &lookup_result); |
@@ -1501,7 +1130,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
1501 | 1130 | ||
1502 | case -ENODATA: | 1131 | case -ENODATA: |
1503 | if (bio_data_dir(bio) == READ && tc->origin_dev) { | 1132 | if (bio_data_dir(bio) == READ && tc->origin_dev) { |
1504 | cell_release_singleton(cell, bio); | 1133 | dm_cell_release_singleton(cell, bio); |
1505 | remap_to_origin_and_issue(tc, bio); | 1134 | remap_to_origin_and_issue(tc, bio); |
1506 | } else | 1135 | } else |
1507 | provision_block(tc, bio, block, cell); | 1136 | provision_block(tc, bio, block, cell); |
@@ -1509,7 +1138,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
1509 | 1138 | ||
1510 | default: | 1139 | default: |
1511 | DMERR("dm_thin_find_block() failed, error = %d", r); | 1140 | DMERR("dm_thin_find_block() failed, error = %d", r); |
1512 | cell_release_singleton(cell, bio); | 1141 | dm_cell_release_singleton(cell, bio); |
1513 | bio_io_error(bio); | 1142 | bio_io_error(bio); |
1514 | break; | 1143 | break; |
1515 | } | 1144 | } |
@@ -1718,7 +1347,7 @@ static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *b | |||
1718 | 1347 | ||
1719 | h->tc = tc; | 1348 | h->tc = tc; |
1720 | h->shared_read_entry = NULL; | 1349 | h->shared_read_entry = NULL; |
1721 | h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : ds_inc(&pool->all_io_ds); | 1350 | h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds); |
1722 | h->overwrite_mapping = NULL; | 1351 | h->overwrite_mapping = NULL; |
1723 | 1352 | ||
1724 | return h; | 1353 | return h; |
@@ -1928,7 +1557,7 @@ static void __pool_destroy(struct pool *pool) | |||
1928 | if (dm_pool_metadata_close(pool->pmd) < 0) | 1557 | if (dm_pool_metadata_close(pool->pmd) < 0) |
1929 | DMWARN("%s: dm_pool_metadata_close() failed.", __func__); | 1558 | DMWARN("%s: dm_pool_metadata_close() failed.", __func__); |
1930 | 1559 | ||
1931 | prison_destroy(pool->prison); | 1560 | dm_bio_prison_destroy(pool->prison); |
1932 | dm_kcopyd_client_destroy(pool->copier); | 1561 | dm_kcopyd_client_destroy(pool->copier); |
1933 | 1562 | ||
1934 | if (pool->wq) | 1563 | if (pool->wq) |
@@ -1938,6 +1567,8 @@ static void __pool_destroy(struct pool *pool) | |||
1938 | mempool_free(pool->next_mapping, pool->mapping_pool); | 1567 | mempool_free(pool->next_mapping, pool->mapping_pool); |
1939 | mempool_destroy(pool->mapping_pool); | 1568 | mempool_destroy(pool->mapping_pool); |
1940 | mempool_destroy(pool->endio_hook_pool); | 1569 | mempool_destroy(pool->endio_hook_pool); |
1570 | dm_deferred_set_destroy(pool->shared_read_ds); | ||
1571 | dm_deferred_set_destroy(pool->all_io_ds); | ||
1941 | kfree(pool); | 1572 | kfree(pool); |
1942 | } | 1573 | } |
1943 | 1574 | ||
@@ -1976,7 +1607,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
1976 | pool->sectors_per_block_shift = __ffs(block_size); | 1607 | pool->sectors_per_block_shift = __ffs(block_size); |
1977 | pool->low_water_blocks = 0; | 1608 | pool->low_water_blocks = 0; |
1978 | pool_features_init(&pool->pf); | 1609 | pool_features_init(&pool->pf); |
1979 | pool->prison = prison_create(PRISON_CELLS); | 1610 | pool->prison = dm_bio_prison_create(PRISON_CELLS); |
1980 | if (!pool->prison) { | 1611 | if (!pool->prison) { |
1981 | *error = "Error creating pool's bio prison"; | 1612 | *error = "Error creating pool's bio prison"; |
1982 | err_p = ERR_PTR(-ENOMEM); | 1613 | err_p = ERR_PTR(-ENOMEM); |
@@ -2012,8 +1643,20 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
2012 | pool->low_water_triggered = 0; | 1643 | pool->low_water_triggered = 0; |
2013 | pool->no_free_space = 0; | 1644 | pool->no_free_space = 0; |
2014 | bio_list_init(&pool->retry_on_resume_list); | 1645 | bio_list_init(&pool->retry_on_resume_list); |
2015 | ds_init(&pool->shared_read_ds); | 1646 | |
2016 | ds_init(&pool->all_io_ds); | 1647 | pool->shared_read_ds = dm_deferred_set_create(); |
1648 | if (!pool->shared_read_ds) { | ||
1649 | *error = "Error creating pool's shared read deferred set"; | ||
1650 | err_p = ERR_PTR(-ENOMEM); | ||
1651 | goto bad_shared_read_ds; | ||
1652 | } | ||
1653 | |||
1654 | pool->all_io_ds = dm_deferred_set_create(); | ||
1655 | if (!pool->all_io_ds) { | ||
1656 | *error = "Error creating pool's all io deferred set"; | ||
1657 | err_p = ERR_PTR(-ENOMEM); | ||
1658 | goto bad_all_io_ds; | ||
1659 | } | ||
2017 | 1660 | ||
2018 | pool->next_mapping = NULL; | 1661 | pool->next_mapping = NULL; |
2019 | pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, | 1662 | pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, |
@@ -2042,11 +1685,15 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
2042 | bad_endio_hook_pool: | 1685 | bad_endio_hook_pool: |
2043 | mempool_destroy(pool->mapping_pool); | 1686 | mempool_destroy(pool->mapping_pool); |
2044 | bad_mapping_pool: | 1687 | bad_mapping_pool: |
1688 | dm_deferred_set_destroy(pool->all_io_ds); | ||
1689 | bad_all_io_ds: | ||
1690 | dm_deferred_set_destroy(pool->shared_read_ds); | ||
1691 | bad_shared_read_ds: | ||
2045 | destroy_workqueue(pool->wq); | 1692 | destroy_workqueue(pool->wq); |
2046 | bad_wq: | 1693 | bad_wq: |
2047 | dm_kcopyd_client_destroy(pool->copier); | 1694 | dm_kcopyd_client_destroy(pool->copier); |
2048 | bad_kcopyd_client: | 1695 | bad_kcopyd_client: |
2049 | prison_destroy(pool->prison); | 1696 | dm_bio_prison_destroy(pool->prison); |
2050 | bad_prison: | 1697 | bad_prison: |
2051 | kfree(pool); | 1698 | kfree(pool); |
2052 | bad_pool: | 1699 | bad_pool: |
@@ -2272,15 +1919,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
2272 | goto out_flags_changed; | 1919 | goto out_flags_changed; |
2273 | } | 1920 | } |
2274 | 1921 | ||
2275 | /* | ||
2276 | * The block layer requires discard_granularity to be a power of 2. | ||
2277 | */ | ||
2278 | if (pf.discard_enabled && !is_power_of_2(block_size)) { | ||
2279 | ti->error = "Discard support must be disabled when the block size is not a power of 2"; | ||
2280 | r = -EINVAL; | ||
2281 | goto out_flags_changed; | ||
2282 | } | ||
2283 | |||
2284 | pt->pool = pool; | 1922 | pt->pool = pool; |
2285 | pt->ti = ti; | 1923 | pt->ti = ti; |
2286 | pt->metadata_dev = metadata_dev; | 1924 | pt->metadata_dev = metadata_dev; |
@@ -2762,6 +2400,11 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | |||
2762 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | 2400 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); |
2763 | } | 2401 | } |
2764 | 2402 | ||
2403 | static bool block_size_is_power_of_two(struct pool *pool) | ||
2404 | { | ||
2405 | return pool->sectors_per_block_shift >= 0; | ||
2406 | } | ||
2407 | |||
2765 | static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits) | 2408 | static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits) |
2766 | { | 2409 | { |
2767 | struct pool *pool = pt->pool; | 2410 | struct pool *pool = pt->pool; |
@@ -2775,8 +2418,15 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits) | |||
2775 | if (pt->adjusted_pf.discard_passdown) { | 2418 | if (pt->adjusted_pf.discard_passdown) { |
2776 | data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; | 2419 | data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; |
2777 | limits->discard_granularity = data_limits->discard_granularity; | 2420 | limits->discard_granularity = data_limits->discard_granularity; |
2778 | } else | 2421 | } else if (block_size_is_power_of_two(pool)) |
2779 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; | 2422 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; |
2423 | else | ||
2424 | /* | ||
2425 | * Use largest power of 2 that is a factor of sectors_per_block | ||
2426 | * but at least DATA_DEV_BLOCK_SIZE_MIN_SECTORS. | ||
2427 | */ | ||
2428 | limits->discard_granularity = max(1 << (ffs(pool->sectors_per_block) - 1), | ||
2429 | DATA_DEV_BLOCK_SIZE_MIN_SECTORS) << SECTOR_SHIFT; | ||
2780 | } | 2430 | } |
2781 | 2431 | ||
2782 | static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) | 2432 | static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) |
@@ -2804,7 +2454,7 @@ static struct target_type pool_target = { | |||
2804 | .name = "thin-pool", | 2454 | .name = "thin-pool", |
2805 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | | 2455 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | |
2806 | DM_TARGET_IMMUTABLE, | 2456 | DM_TARGET_IMMUTABLE, |
2807 | .version = {1, 4, 0}, | 2457 | .version = {1, 5, 0}, |
2808 | .module = THIS_MODULE, | 2458 | .module = THIS_MODULE, |
2809 | .ctr = pool_ctr, | 2459 | .ctr = pool_ctr, |
2810 | .dtr = pool_dtr, | 2460 | .dtr = pool_dtr, |
@@ -2979,7 +2629,7 @@ static int thin_endio(struct dm_target *ti, | |||
2979 | 2629 | ||
2980 | if (h->shared_read_entry) { | 2630 | if (h->shared_read_entry) { |
2981 | INIT_LIST_HEAD(&work); | 2631 | INIT_LIST_HEAD(&work); |
2982 | ds_dec(h->shared_read_entry, &work); | 2632 | dm_deferred_entry_dec(h->shared_read_entry, &work); |
2983 | 2633 | ||
2984 | spin_lock_irqsave(&pool->lock, flags); | 2634 | spin_lock_irqsave(&pool->lock, flags); |
2985 | list_for_each_entry_safe(m, tmp, &work, list) { | 2635 | list_for_each_entry_safe(m, tmp, &work, list) { |
@@ -2992,7 +2642,7 @@ static int thin_endio(struct dm_target *ti, | |||
2992 | 2642 | ||
2993 | if (h->all_io_entry) { | 2643 | if (h->all_io_entry) { |
2994 | INIT_LIST_HEAD(&work); | 2644 | INIT_LIST_HEAD(&work); |
2995 | ds_dec(h->all_io_entry, &work); | 2645 | dm_deferred_entry_dec(h->all_io_entry, &work); |
2996 | spin_lock_irqsave(&pool->lock, flags); | 2646 | spin_lock_irqsave(&pool->lock, flags); |
2997 | list_for_each_entry_safe(m, tmp, &work, list) | 2647 | list_for_each_entry_safe(m, tmp, &work, list) |
2998 | list_add(&m->list, &pool->prepared_discards); | 2648 | list_add(&m->list, &pool->prepared_discards); |
@@ -3095,7 +2745,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
3095 | 2745 | ||
3096 | static struct target_type thin_target = { | 2746 | static struct target_type thin_target = { |
3097 | .name = "thin", | 2747 | .name = "thin", |
3098 | .version = {1, 4, 0}, | 2748 | .version = {1, 5, 0}, |
3099 | .module = THIS_MODULE, | 2749 | .module = THIS_MODULE, |
3100 | .ctr = thin_ctr, | 2750 | .ctr = thin_ctr, |
3101 | .dtr = thin_dtr, | 2751 | .dtr = thin_dtr, |
@@ -3125,10 +2775,6 @@ static int __init dm_thin_init(void) | |||
3125 | 2775 | ||
3126 | r = -ENOMEM; | 2776 | r = -ENOMEM; |
3127 | 2777 | ||
3128 | _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0); | ||
3129 | if (!_cell_cache) | ||
3130 | goto bad_cell_cache; | ||
3131 | |||
3132 | _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); | 2778 | _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); |
3133 | if (!_new_mapping_cache) | 2779 | if (!_new_mapping_cache) |
3134 | goto bad_new_mapping_cache; | 2780 | goto bad_new_mapping_cache; |
@@ -3142,8 +2788,6 @@ static int __init dm_thin_init(void) | |||
3142 | bad_endio_hook_cache: | 2788 | bad_endio_hook_cache: |
3143 | kmem_cache_destroy(_new_mapping_cache); | 2789 | kmem_cache_destroy(_new_mapping_cache); |
3144 | bad_new_mapping_cache: | 2790 | bad_new_mapping_cache: |
3145 | kmem_cache_destroy(_cell_cache); | ||
3146 | bad_cell_cache: | ||
3147 | dm_unregister_target(&pool_target); | 2791 | dm_unregister_target(&pool_target); |
3148 | bad_pool_target: | 2792 | bad_pool_target: |
3149 | dm_unregister_target(&thin_target); | 2793 | dm_unregister_target(&thin_target); |
@@ -3156,7 +2800,6 @@ static void dm_thin_exit(void) | |||
3156 | dm_unregister_target(&thin_target); | 2800 | dm_unregister_target(&thin_target); |
3157 | dm_unregister_target(&pool_target); | 2801 | dm_unregister_target(&pool_target); |
3158 | 2802 | ||
3159 | kmem_cache_destroy(_cell_cache); | ||
3160 | kmem_cache_destroy(_new_mapping_cache); | 2803 | kmem_cache_destroy(_new_mapping_cache); |
3161 | kmem_cache_destroy(_endio_hook_cache); | 2804 | kmem_cache_destroy(_endio_hook_cache); |
3162 | } | 2805 | } |
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 892ae2766aa6..9e7328bb4030 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c | |||
@@ -438,7 +438,7 @@ static void verity_prefetch_io(struct dm_verity *v, struct dm_verity_io *io) | |||
438 | verity_hash_at_level(v, io->block, i, &hash_block_start, NULL); | 438 | verity_hash_at_level(v, io->block, i, &hash_block_start, NULL); |
439 | verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL); | 439 | verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL); |
440 | if (!i) { | 440 | if (!i) { |
441 | unsigned cluster = *(volatile unsigned *)&dm_verity_prefetch_cluster; | 441 | unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster); |
442 | 442 | ||
443 | cluster >>= v->data_dev_block_bits; | 443 | cluster >>= v->data_dev_block_bits; |
444 | if (unlikely(!cluster)) | 444 | if (unlikely(!cluster)) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 66ceaff6455c..02db9183ca01 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -71,6 +71,7 @@ struct dm_target_io { | |||
71 | struct dm_io *io; | 71 | struct dm_io *io; |
72 | struct dm_target *ti; | 72 | struct dm_target *ti; |
73 | union map_info info; | 73 | union map_info info; |
74 | struct bio clone; | ||
74 | }; | 75 | }; |
75 | 76 | ||
76 | /* | 77 | /* |
@@ -214,7 +215,6 @@ struct dm_md_mempools { | |||
214 | 215 | ||
215 | #define MIN_IOS 256 | 216 | #define MIN_IOS 256 |
216 | static struct kmem_cache *_io_cache; | 217 | static struct kmem_cache *_io_cache; |
217 | static struct kmem_cache *_tio_cache; | ||
218 | static struct kmem_cache *_rq_tio_cache; | 218 | static struct kmem_cache *_rq_tio_cache; |
219 | 219 | ||
220 | /* | 220 | /* |
@@ -232,14 +232,9 @@ static int __init local_init(void) | |||
232 | if (!_io_cache) | 232 | if (!_io_cache) |
233 | return r; | 233 | return r; |
234 | 234 | ||
235 | /* allocate a slab for the target ios */ | ||
236 | _tio_cache = KMEM_CACHE(dm_target_io, 0); | ||
237 | if (!_tio_cache) | ||
238 | goto out_free_io_cache; | ||
239 | |||
240 | _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); | 235 | _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); |
241 | if (!_rq_tio_cache) | 236 | if (!_rq_tio_cache) |
242 | goto out_free_tio_cache; | 237 | goto out_free_io_cache; |
243 | 238 | ||
244 | _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0); | 239 | _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0); |
245 | if (!_rq_bio_info_cache) | 240 | if (!_rq_bio_info_cache) |
@@ -265,8 +260,6 @@ out_free_rq_bio_info_cache: | |||
265 | kmem_cache_destroy(_rq_bio_info_cache); | 260 | kmem_cache_destroy(_rq_bio_info_cache); |
266 | out_free_rq_tio_cache: | 261 | out_free_rq_tio_cache: |
267 | kmem_cache_destroy(_rq_tio_cache); | 262 | kmem_cache_destroy(_rq_tio_cache); |
268 | out_free_tio_cache: | ||
269 | kmem_cache_destroy(_tio_cache); | ||
270 | out_free_io_cache: | 263 | out_free_io_cache: |
271 | kmem_cache_destroy(_io_cache); | 264 | kmem_cache_destroy(_io_cache); |
272 | 265 | ||
@@ -277,7 +270,6 @@ static void local_exit(void) | |||
277 | { | 270 | { |
278 | kmem_cache_destroy(_rq_bio_info_cache); | 271 | kmem_cache_destroy(_rq_bio_info_cache); |
279 | kmem_cache_destroy(_rq_tio_cache); | 272 | kmem_cache_destroy(_rq_tio_cache); |
280 | kmem_cache_destroy(_tio_cache); | ||
281 | kmem_cache_destroy(_io_cache); | 273 | kmem_cache_destroy(_io_cache); |
282 | unregister_blkdev(_major, _name); | 274 | unregister_blkdev(_major, _name); |
283 | dm_uevent_exit(); | 275 | dm_uevent_exit(); |
@@ -463,7 +455,7 @@ static void free_io(struct mapped_device *md, struct dm_io *io) | |||
463 | 455 | ||
464 | static void free_tio(struct mapped_device *md, struct dm_target_io *tio) | 456 | static void free_tio(struct mapped_device *md, struct dm_target_io *tio) |
465 | { | 457 | { |
466 | mempool_free(tio, md->tio_pool); | 458 | bio_put(&tio->clone); |
467 | } | 459 | } |
468 | 460 | ||
469 | static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, | 461 | static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, |
@@ -682,7 +674,6 @@ static void clone_endio(struct bio *bio, int error) | |||
682 | } | 674 | } |
683 | 675 | ||
684 | free_tio(md, tio); | 676 | free_tio(md, tio); |
685 | bio_put(bio); | ||
686 | dec_pending(io, error); | 677 | dec_pending(io, error); |
687 | } | 678 | } |
688 | 679 | ||
@@ -1002,12 +993,12 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) | |||
1002 | } | 993 | } |
1003 | EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); | 994 | EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); |
1004 | 995 | ||
1005 | static void __map_bio(struct dm_target *ti, struct bio *clone, | 996 | static void __map_bio(struct dm_target *ti, struct dm_target_io *tio) |
1006 | struct dm_target_io *tio) | ||
1007 | { | 997 | { |
1008 | int r; | 998 | int r; |
1009 | sector_t sector; | 999 | sector_t sector; |
1010 | struct mapped_device *md; | 1000 | struct mapped_device *md; |
1001 | struct bio *clone = &tio->clone; | ||
1011 | 1002 | ||
1012 | clone->bi_end_io = clone_endio; | 1003 | clone->bi_end_io = clone_endio; |
1013 | clone->bi_private = tio; | 1004 | clone->bi_private = tio; |
@@ -1031,7 +1022,6 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, | |||
1031 | /* error the io and bail out, or requeue it if needed */ | 1022 | /* error the io and bail out, or requeue it if needed */ |
1032 | md = tio->io->md; | 1023 | md = tio->io->md; |
1033 | dec_pending(tio->io, r); | 1024 | dec_pending(tio->io, r); |
1034 | bio_put(clone); | ||
1035 | free_tio(md, tio); | 1025 | free_tio(md, tio); |
1036 | } else if (r) { | 1026 | } else if (r) { |
1037 | DMWARN("unimplemented target map return value: %d", r); | 1027 | DMWARN("unimplemented target map return value: %d", r); |
@@ -1052,14 +1042,13 @@ struct clone_info { | |||
1052 | /* | 1042 | /* |
1053 | * Creates a little bio that just does part of a bvec. | 1043 | * Creates a little bio that just does part of a bvec. |
1054 | */ | 1044 | */ |
1055 | static struct bio *split_bvec(struct bio *bio, sector_t sector, | 1045 | static void split_bvec(struct dm_target_io *tio, struct bio *bio, |
1056 | unsigned short idx, unsigned int offset, | 1046 | sector_t sector, unsigned short idx, unsigned int offset, |
1057 | unsigned int len, struct bio_set *bs) | 1047 | unsigned int len, struct bio_set *bs) |
1058 | { | 1048 | { |
1059 | struct bio *clone; | 1049 | struct bio *clone = &tio->clone; |
1060 | struct bio_vec *bv = bio->bi_io_vec + idx; | 1050 | struct bio_vec *bv = bio->bi_io_vec + idx; |
1061 | 1051 | ||
1062 | clone = bio_alloc_bioset(GFP_NOIO, 1, bs); | ||
1063 | *clone->bi_io_vec = *bv; | 1052 | *clone->bi_io_vec = *bv; |
1064 | 1053 | ||
1065 | clone->bi_sector = sector; | 1054 | clone->bi_sector = sector; |
@@ -1076,20 +1065,18 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, | |||
1076 | bio_integrity_trim(clone, | 1065 | bio_integrity_trim(clone, |
1077 | bio_sector_offset(bio, idx, offset), len); | 1066 | bio_sector_offset(bio, idx, offset), len); |
1078 | } | 1067 | } |
1079 | |||
1080 | return clone; | ||
1081 | } | 1068 | } |
1082 | 1069 | ||
1083 | /* | 1070 | /* |
1084 | * Creates a bio that consists of range of complete bvecs. | 1071 | * Creates a bio that consists of range of complete bvecs. |
1085 | */ | 1072 | */ |
1086 | static struct bio *clone_bio(struct bio *bio, sector_t sector, | 1073 | static void clone_bio(struct dm_target_io *tio, struct bio *bio, |
1087 | unsigned short idx, unsigned short bv_count, | 1074 | sector_t sector, unsigned short idx, |
1088 | unsigned int len, struct bio_set *bs) | 1075 | unsigned short bv_count, unsigned int len, |
1076 | struct bio_set *bs) | ||
1089 | { | 1077 | { |
1090 | struct bio *clone; | 1078 | struct bio *clone = &tio->clone; |
1091 | 1079 | ||
1092 | clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); | ||
1093 | __bio_clone(clone, bio); | 1080 | __bio_clone(clone, bio); |
1094 | clone->bi_sector = sector; | 1081 | clone->bi_sector = sector; |
1095 | clone->bi_idx = idx; | 1082 | clone->bi_idx = idx; |
@@ -1104,14 +1091,16 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
1104 | bio_integrity_trim(clone, | 1091 | bio_integrity_trim(clone, |
1105 | bio_sector_offset(bio, idx, 0), len); | 1092 | bio_sector_offset(bio, idx, 0), len); |
1106 | } | 1093 | } |
1107 | |||
1108 | return clone; | ||
1109 | } | 1094 | } |
1110 | 1095 | ||
1111 | static struct dm_target_io *alloc_tio(struct clone_info *ci, | 1096 | static struct dm_target_io *alloc_tio(struct clone_info *ci, |
1112 | struct dm_target *ti) | 1097 | struct dm_target *ti, int nr_iovecs) |
1113 | { | 1098 | { |
1114 | struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO); | 1099 | struct dm_target_io *tio; |
1100 | struct bio *clone; | ||
1101 | |||
1102 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, ci->md->bs); | ||
1103 | tio = container_of(clone, struct dm_target_io, clone); | ||
1115 | 1104 | ||
1116 | tio->io = ci->io; | 1105 | tio->io = ci->io; |
1117 | tio->ti = ti; | 1106 | tio->ti = ti; |
@@ -1123,8 +1112,8 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, | |||
1123 | static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, | 1112 | static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, |
1124 | unsigned request_nr, sector_t len) | 1113 | unsigned request_nr, sector_t len) |
1125 | { | 1114 | { |
1126 | struct dm_target_io *tio = alloc_tio(ci, ti); | 1115 | struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs); |
1127 | struct bio *clone; | 1116 | struct bio *clone = &tio->clone; |
1128 | 1117 | ||
1129 | tio->info.target_request_nr = request_nr; | 1118 | tio->info.target_request_nr = request_nr; |
1130 | 1119 | ||
@@ -1133,14 +1122,14 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, | |||
1133 | * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush | 1122 | * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush |
1134 | * and discard, so no need for concern about wasted bvec allocations. | 1123 | * and discard, so no need for concern about wasted bvec allocations. |
1135 | */ | 1124 | */ |
1136 | clone = bio_clone_bioset(ci->bio, GFP_NOIO, ci->md->bs); | ||
1137 | 1125 | ||
1126 | __bio_clone(clone, ci->bio); | ||
1138 | if (len) { | 1127 | if (len) { |
1139 | clone->bi_sector = ci->sector; | 1128 | clone->bi_sector = ci->sector; |
1140 | clone->bi_size = to_bytes(len); | 1129 | clone->bi_size = to_bytes(len); |
1141 | } | 1130 | } |
1142 | 1131 | ||
1143 | __map_bio(ti, clone, tio); | 1132 | __map_bio(ti, tio); |
1144 | } | 1133 | } |
1145 | 1134 | ||
1146 | static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti, | 1135 | static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti, |
@@ -1169,14 +1158,13 @@ static int __clone_and_map_empty_flush(struct clone_info *ci) | |||
1169 | */ | 1158 | */ |
1170 | static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) | 1159 | static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) |
1171 | { | 1160 | { |
1172 | struct bio *clone, *bio = ci->bio; | 1161 | struct bio *bio = ci->bio; |
1173 | struct dm_target_io *tio; | 1162 | struct dm_target_io *tio; |
1174 | 1163 | ||
1175 | tio = alloc_tio(ci, ti); | 1164 | tio = alloc_tio(ci, ti, bio->bi_max_vecs); |
1176 | clone = clone_bio(bio, ci->sector, ci->idx, | 1165 | clone_bio(tio, bio, ci->sector, ci->idx, bio->bi_vcnt - ci->idx, |
1177 | bio->bi_vcnt - ci->idx, ci->sector_count, | 1166 | ci->sector_count, ci->md->bs); |
1178 | ci->md->bs); | 1167 | __map_bio(ti, tio); |
1179 | __map_bio(ti, clone, tio); | ||
1180 | ci->sector_count = 0; | 1168 | ci->sector_count = 0; |
1181 | } | 1169 | } |
1182 | 1170 | ||
@@ -1214,7 +1202,7 @@ static int __clone_and_map_discard(struct clone_info *ci) | |||
1214 | 1202 | ||
1215 | static int __clone_and_map(struct clone_info *ci) | 1203 | static int __clone_and_map(struct clone_info *ci) |
1216 | { | 1204 | { |
1217 | struct bio *clone, *bio = ci->bio; | 1205 | struct bio *bio = ci->bio; |
1218 | struct dm_target *ti; | 1206 | struct dm_target *ti; |
1219 | sector_t len = 0, max; | 1207 | sector_t len = 0, max; |
1220 | struct dm_target_io *tio; | 1208 | struct dm_target_io *tio; |
@@ -1254,10 +1242,10 @@ static int __clone_and_map(struct clone_info *ci) | |||
1254 | len += bv_len; | 1242 | len += bv_len; |
1255 | } | 1243 | } |
1256 | 1244 | ||
1257 | tio = alloc_tio(ci, ti); | 1245 | tio = alloc_tio(ci, ti, bio->bi_max_vecs); |
1258 | clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, | 1246 | clone_bio(tio, bio, ci->sector, ci->idx, i - ci->idx, len, |
1259 | ci->md->bs); | 1247 | ci->md->bs); |
1260 | __map_bio(ti, clone, tio); | 1248 | __map_bio(ti, tio); |
1261 | 1249 | ||
1262 | ci->sector += len; | 1250 | ci->sector += len; |
1263 | ci->sector_count -= len; | 1251 | ci->sector_count -= len; |
@@ -1282,12 +1270,11 @@ static int __clone_and_map(struct clone_info *ci) | |||
1282 | 1270 | ||
1283 | len = min(remaining, max); | 1271 | len = min(remaining, max); |
1284 | 1272 | ||
1285 | tio = alloc_tio(ci, ti); | 1273 | tio = alloc_tio(ci, ti, 1); |
1286 | clone = split_bvec(bio, ci->sector, ci->idx, | 1274 | split_bvec(tio, bio, ci->sector, ci->idx, |
1287 | bv->bv_offset + offset, len, | 1275 | bv->bv_offset + offset, len, ci->md->bs); |
1288 | ci->md->bs); | ||
1289 | 1276 | ||
1290 | __map_bio(ti, clone, tio); | 1277 | __map_bio(ti, tio); |
1291 | 1278 | ||
1292 | ci->sector += len; | 1279 | ci->sector += len; |
1293 | ci->sector_count -= len; | 1280 | ci->sector_count -= len; |
@@ -1955,7 +1942,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) | |||
1955 | { | 1942 | { |
1956 | struct dm_md_mempools *p; | 1943 | struct dm_md_mempools *p; |
1957 | 1944 | ||
1958 | if (md->io_pool && md->tio_pool && md->bs) | 1945 | if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) |
1959 | /* the md already has necessary mempools */ | 1946 | /* the md already has necessary mempools */ |
1960 | goto out; | 1947 | goto out; |
1961 | 1948 | ||
@@ -2732,14 +2719,16 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity) | |||
2732 | if (!pools->io_pool) | 2719 | if (!pools->io_pool) |
2733 | goto free_pools_and_out; | 2720 | goto free_pools_and_out; |
2734 | 2721 | ||
2735 | pools->tio_pool = (type == DM_TYPE_BIO_BASED) ? | 2722 | pools->tio_pool = NULL; |
2736 | mempool_create_slab_pool(MIN_IOS, _tio_cache) : | 2723 | if (type == DM_TYPE_REQUEST_BASED) { |
2737 | mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); | 2724 | pools->tio_pool = mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); |
2738 | if (!pools->tio_pool) | 2725 | if (!pools->tio_pool) |
2739 | goto free_io_pool_and_out; | 2726 | goto free_io_pool_and_out; |
2727 | } | ||
2740 | 2728 | ||
2741 | pools->bs = (type == DM_TYPE_BIO_BASED) ? | 2729 | pools->bs = (type == DM_TYPE_BIO_BASED) ? |
2742 | bioset_create(pool_size, 0) : | 2730 | bioset_create(pool_size, |
2731 | offsetof(struct dm_target_io, clone)) : | ||
2743 | bioset_create(pool_size, | 2732 | bioset_create(pool_size, |
2744 | offsetof(struct dm_rq_clone_bio_info, clone)); | 2733 | offsetof(struct dm_rq_clone_bio_info, clone)); |
2745 | if (!pools->bs) | 2734 | if (!pools->bs) |
@@ -2754,7 +2743,8 @@ free_bioset_and_out: | |||
2754 | bioset_free(pools->bs); | 2743 | bioset_free(pools->bs); |
2755 | 2744 | ||
2756 | free_tio_pool_and_out: | 2745 | free_tio_pool_and_out: |
2757 | mempool_destroy(pools->tio_pool); | 2746 | if (pools->tio_pool) |
2747 | mempool_destroy(pools->tio_pool); | ||
2758 | 2748 | ||
2759 | free_io_pool_and_out: | 2749 | free_io_pool_and_out: |
2760 | mempool_destroy(pools->io_pool); | 2750 | mempool_destroy(pools->io_pool); |
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index d77602d63c83..f3a9af8cdec3 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c | |||
@@ -434,14 +434,14 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b, | |||
434 | if (ref_count && !old) { | 434 | if (ref_count && !old) { |
435 | *ev = SM_ALLOC; | 435 | *ev = SM_ALLOC; |
436 | ll->nr_allocated++; | 436 | ll->nr_allocated++; |
437 | ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) - 1); | 437 | le32_add_cpu(&ie_disk.nr_free, -1); |
438 | if (le32_to_cpu(ie_disk.none_free_before) == bit) | 438 | if (le32_to_cpu(ie_disk.none_free_before) == bit) |
439 | ie_disk.none_free_before = cpu_to_le32(bit + 1); | 439 | ie_disk.none_free_before = cpu_to_le32(bit + 1); |
440 | 440 | ||
441 | } else if (old && !ref_count) { | 441 | } else if (old && !ref_count) { |
442 | *ev = SM_FREE; | 442 | *ev = SM_FREE; |
443 | ll->nr_allocated--; | 443 | ll->nr_allocated--; |
444 | ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) + 1); | 444 | le32_add_cpu(&ie_disk.nr_free, 1); |
445 | ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit)); | 445 | ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit)); |
446 | } | 446 | } |
447 | 447 | ||