aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c521
1 files changed, 82 insertions, 439 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index c29410af1e22..058acf3a5ba7 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -5,6 +5,7 @@
5 */ 5 */
6 6
7#include "dm-thin-metadata.h" 7#include "dm-thin-metadata.h"
8#include "dm-bio-prison.h"
8#include "dm.h" 9#include "dm.h"
9 10
10#include <linux/device-mapper.h> 11#include <linux/device-mapper.h>
@@ -21,7 +22,6 @@
21 * Tunable constants 22 * Tunable constants
22 */ 23 */
23#define ENDIO_HOOK_POOL_SIZE 1024 24#define ENDIO_HOOK_POOL_SIZE 1024
24#define DEFERRED_SET_SIZE 64
25#define MAPPING_POOL_SIZE 1024 25#define MAPPING_POOL_SIZE 1024
26#define PRISON_CELLS 1024 26#define PRISON_CELLS 1024
27#define COMMIT_PERIOD HZ 27#define COMMIT_PERIOD HZ
@@ -58,7 +58,7 @@
58 * i) plug io further to this physical block. (see bio_prison code). 58 * i) plug io further to this physical block. (see bio_prison code).
59 * 59 *
60 * ii) quiesce any read io to that shared data block. Obviously 60 * ii) quiesce any read io to that shared data block. Obviously
61 * including all devices that share this block. (see deferred_set code) 61 * including all devices that share this block. (see dm_deferred_set code)
62 * 62 *
63 * iii) copy the data block to a newly allocate block. This step can be 63 * iii) copy the data block to a newly allocate block. This step can be
64 * missed out if the io covers the block. (schedule_copy). 64 * missed out if the io covers the block. (schedule_copy).
@@ -99,381 +99,10 @@
99/*----------------------------------------------------------------*/ 99/*----------------------------------------------------------------*/
100 100
101/* 101/*
102 * Sometimes we can't deal with a bio straight away. We put them in prison
103 * where they can't cause any mischief. Bios are put in a cell identified
104 * by a key, multiple bios can be in the same cell. When the cell is
105 * subsequently unlocked the bios become available.
106 */
107struct bio_prison;
108
109struct cell_key {
110 int virtual;
111 dm_thin_id dev;
112 dm_block_t block;
113};
114
115struct dm_bio_prison_cell {
116 struct hlist_node list;
117 struct bio_prison *prison;
118 struct cell_key key;
119 struct bio *holder;
120 struct bio_list bios;
121};
122
123struct bio_prison {
124 spinlock_t lock;
125 mempool_t *cell_pool;
126
127 unsigned nr_buckets;
128 unsigned hash_mask;
129 struct hlist_head *cells;
130};
131
132static uint32_t calc_nr_buckets(unsigned nr_cells)
133{
134 uint32_t n = 128;
135
136 nr_cells /= 4;
137 nr_cells = min(nr_cells, 8192u);
138
139 while (n < nr_cells)
140 n <<= 1;
141
142 return n;
143}
144
145static struct kmem_cache *_cell_cache;
146
147/*
148 * @nr_cells should be the number of cells you want in use _concurrently_.
149 * Don't confuse it with the number of distinct keys.
150 */
151static struct bio_prison *prison_create(unsigned nr_cells)
152{
153 unsigned i;
154 uint32_t nr_buckets = calc_nr_buckets(nr_cells);
155 size_t len = sizeof(struct bio_prison) +
156 (sizeof(struct hlist_head) * nr_buckets);
157 struct bio_prison *prison = kmalloc(len, GFP_KERNEL);
158
159 if (!prison)
160 return NULL;
161
162 spin_lock_init(&prison->lock);
163 prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
164 if (!prison->cell_pool) {
165 kfree(prison);
166 return NULL;
167 }
168
169 prison->nr_buckets = nr_buckets;
170 prison->hash_mask = nr_buckets - 1;
171 prison->cells = (struct hlist_head *) (prison + 1);
172 for (i = 0; i < nr_buckets; i++)
173 INIT_HLIST_HEAD(prison->cells + i);
174
175 return prison;
176}
177
178static void prison_destroy(struct bio_prison *prison)
179{
180 mempool_destroy(prison->cell_pool);
181 kfree(prison);
182}
183
184static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key)
185{
186 const unsigned long BIG_PRIME = 4294967291UL;
187 uint64_t hash = key->block * BIG_PRIME;
188
189 return (uint32_t) (hash & prison->hash_mask);
190}
191
192static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
193{
194 return (lhs->virtual == rhs->virtual) &&
195 (lhs->dev == rhs->dev) &&
196 (lhs->block == rhs->block);
197}
198
199static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
200 struct cell_key *key)
201{
202 struct dm_bio_prison_cell *cell;
203 struct hlist_node *tmp;
204
205 hlist_for_each_entry(cell, tmp, bucket, list)
206 if (keys_equal(&cell->key, key))
207 return cell;
208
209 return NULL;
210}
211
212/*
213 * This may block if a new cell needs allocating. You must ensure that
214 * cells will be unlocked even if the calling thread is blocked.
215 *
216 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
217 */
218static int bio_detain(struct bio_prison *prison, struct cell_key *key,
219 struct bio *inmate, struct dm_bio_prison_cell **ref)
220{
221 int r = 1;
222 unsigned long flags;
223 uint32_t hash = hash_key(prison, key);
224 struct dm_bio_prison_cell *cell, *cell2;
225
226 BUG_ON(hash > prison->nr_buckets);
227
228 spin_lock_irqsave(&prison->lock, flags);
229
230 cell = __search_bucket(prison->cells + hash, key);
231 if (cell) {
232 bio_list_add(&cell->bios, inmate);
233 goto out;
234 }
235
236 /*
237 * Allocate a new cell
238 */
239 spin_unlock_irqrestore(&prison->lock, flags);
240 cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
241 spin_lock_irqsave(&prison->lock, flags);
242
243 /*
244 * We've been unlocked, so we have to double check that
245 * nobody else has inserted this cell in the meantime.
246 */
247 cell = __search_bucket(prison->cells + hash, key);
248 if (cell) {
249 mempool_free(cell2, prison->cell_pool);
250 bio_list_add(&cell->bios, inmate);
251 goto out;
252 }
253
254 /*
255 * Use new cell.
256 */
257 cell = cell2;
258
259 cell->prison = prison;
260 memcpy(&cell->key, key, sizeof(cell->key));
261 cell->holder = inmate;
262 bio_list_init(&cell->bios);
263 hlist_add_head(&cell->list, prison->cells + hash);
264
265 r = 0;
266
267out:
268 spin_unlock_irqrestore(&prison->lock, flags);
269
270 *ref = cell;
271
272 return r;
273}
274
275/*
276 * @inmates must have been initialised prior to this call
277 */
278static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
279{
280 struct bio_prison *prison = cell->prison;
281
282 hlist_del(&cell->list);
283
284 if (inmates) {
285 bio_list_add(inmates, cell->holder);
286 bio_list_merge(inmates, &cell->bios);
287 }
288
289 mempool_free(cell, prison->cell_pool);
290}
291
292static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
293{
294 unsigned long flags;
295 struct bio_prison *prison = cell->prison;
296
297 spin_lock_irqsave(&prison->lock, flags);
298 __cell_release(cell, bios);
299 spin_unlock_irqrestore(&prison->lock, flags);
300}
301
302/*
303 * There are a couple of places where we put a bio into a cell briefly
304 * before taking it out again. In these situations we know that no other
305 * bio may be in the cell. This function releases the cell, and also does
306 * a sanity check.
307 */
308static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
309{
310 BUG_ON(cell->holder != bio);
311 BUG_ON(!bio_list_empty(&cell->bios));
312
313 __cell_release(cell, NULL);
314}
315
316static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
317{
318 unsigned long flags;
319 struct bio_prison *prison = cell->prison;
320
321 spin_lock_irqsave(&prison->lock, flags);
322 __cell_release_singleton(cell, bio);
323 spin_unlock_irqrestore(&prison->lock, flags);
324}
325
326/*
327 * Sometimes we don't want the holder, just the additional bios.
328 */
329static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
330 struct bio_list *inmates)
331{
332 struct bio_prison *prison = cell->prison;
333
334 hlist_del(&cell->list);
335 bio_list_merge(inmates, &cell->bios);
336
337 mempool_free(cell, prison->cell_pool);
338}
339
340static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
341 struct bio_list *inmates)
342{
343 unsigned long flags;
344 struct bio_prison *prison = cell->prison;
345
346 spin_lock_irqsave(&prison->lock, flags);
347 __cell_release_no_holder(cell, inmates);
348 spin_unlock_irqrestore(&prison->lock, flags);
349}
350
351static void cell_error(struct dm_bio_prison_cell *cell)
352{
353 struct bio_prison *prison = cell->prison;
354 struct bio_list bios;
355 struct bio *bio;
356 unsigned long flags;
357
358 bio_list_init(&bios);
359
360 spin_lock_irqsave(&prison->lock, flags);
361 __cell_release(cell, &bios);
362 spin_unlock_irqrestore(&prison->lock, flags);
363
364 while ((bio = bio_list_pop(&bios)))
365 bio_io_error(bio);
366}
367
368/*----------------------------------------------------------------*/
369
370/*
371 * We use the deferred set to keep track of pending reads to shared blocks.
372 * We do this to ensure the new mapping caused by a write isn't performed
373 * until these prior reads have completed. Otherwise the insertion of the
374 * new mapping could free the old block that the read bios are mapped to.
375 */
376
377struct deferred_set;
378struct deferred_entry {
379 struct deferred_set *ds;
380 unsigned count;
381 struct list_head work_items;
382};
383
384struct deferred_set {
385 spinlock_t lock;
386 unsigned current_entry;
387 unsigned sweeper;
388 struct deferred_entry entries[DEFERRED_SET_SIZE];
389};
390
391static void ds_init(struct deferred_set *ds)
392{
393 int i;
394
395 spin_lock_init(&ds->lock);
396 ds->current_entry = 0;
397 ds->sweeper = 0;
398 for (i = 0; i < DEFERRED_SET_SIZE; i++) {
399 ds->entries[i].ds = ds;
400 ds->entries[i].count = 0;
401 INIT_LIST_HEAD(&ds->entries[i].work_items);
402 }
403}
404
405static struct deferred_entry *ds_inc(struct deferred_set *ds)
406{
407 unsigned long flags;
408 struct deferred_entry *entry;
409
410 spin_lock_irqsave(&ds->lock, flags);
411 entry = ds->entries + ds->current_entry;
412 entry->count++;
413 spin_unlock_irqrestore(&ds->lock, flags);
414
415 return entry;
416}
417
418static unsigned ds_next(unsigned index)
419{
420 return (index + 1) % DEFERRED_SET_SIZE;
421}
422
423static void __sweep(struct deferred_set *ds, struct list_head *head)
424{
425 while ((ds->sweeper != ds->current_entry) &&
426 !ds->entries[ds->sweeper].count) {
427 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
428 ds->sweeper = ds_next(ds->sweeper);
429 }
430
431 if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
432 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
433}
434
435static void ds_dec(struct deferred_entry *entry, struct list_head *head)
436{
437 unsigned long flags;
438
439 spin_lock_irqsave(&entry->ds->lock, flags);
440 BUG_ON(!entry->count);
441 --entry->count;
442 __sweep(entry->ds, head);
443 spin_unlock_irqrestore(&entry->ds->lock, flags);
444}
445
446/*
447 * Returns 1 if deferred or 0 if no pending items to delay job.
448 */
449static int ds_add_work(struct deferred_set *ds, struct list_head *work)
450{
451 int r = 1;
452 unsigned long flags;
453 unsigned next_entry;
454
455 spin_lock_irqsave(&ds->lock, flags);
456 if ((ds->sweeper == ds->current_entry) &&
457 !ds->entries[ds->current_entry].count)
458 r = 0;
459 else {
460 list_add(work, &ds->entries[ds->current_entry].work_items);
461 next_entry = ds_next(ds->current_entry);
462 if (!ds->entries[next_entry].count)
463 ds->current_entry = next_entry;
464 }
465 spin_unlock_irqrestore(&ds->lock, flags);
466
467 return r;
468}
469
470/*----------------------------------------------------------------*/
471
472/*
473 * Key building. 102 * Key building.
474 */ 103 */
475static void build_data_key(struct dm_thin_device *td, 104static void build_data_key(struct dm_thin_device *td,
476 dm_block_t b, struct cell_key *key) 105 dm_block_t b, struct dm_cell_key *key)
477{ 106{
478 key->virtual = 0; 107 key->virtual = 0;
479 key->dev = dm_thin_dev_id(td); 108 key->dev = dm_thin_dev_id(td);
@@ -481,7 +110,7 @@ static void build_data_key(struct dm_thin_device *td,
481} 110}
482 111
483static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, 112static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
484 struct cell_key *key) 113 struct dm_cell_key *key)
485{ 114{
486 key->virtual = 1; 115 key->virtual = 1;
487 key->dev = dm_thin_dev_id(td); 116 key->dev = dm_thin_dev_id(td);
@@ -534,7 +163,7 @@ struct pool {
534 unsigned low_water_triggered:1; /* A dm event has been sent */ 163 unsigned low_water_triggered:1; /* A dm event has been sent */
535 unsigned no_free_space:1; /* A -ENOSPC warning has been issued */ 164 unsigned no_free_space:1; /* A -ENOSPC warning has been issued */
536 165
537 struct bio_prison *prison; 166 struct dm_bio_prison *prison;
538 struct dm_kcopyd_client *copier; 167 struct dm_kcopyd_client *copier;
539 168
540 struct workqueue_struct *wq; 169 struct workqueue_struct *wq;
@@ -552,8 +181,8 @@ struct pool {
552 181
553 struct bio_list retry_on_resume_list; 182 struct bio_list retry_on_resume_list;
554 183
555 struct deferred_set shared_read_ds; 184 struct dm_deferred_set *shared_read_ds;
556 struct deferred_set all_io_ds; 185 struct dm_deferred_set *all_io_ds;
557 186
558 struct dm_thin_new_mapping *next_mapping; 187 struct dm_thin_new_mapping *next_mapping;
559 mempool_t *mapping_pool; 188 mempool_t *mapping_pool;
@@ -660,8 +289,8 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev
660 289
661struct dm_thin_endio_hook { 290struct dm_thin_endio_hook {
662 struct thin_c *tc; 291 struct thin_c *tc;
663 struct deferred_entry *shared_read_entry; 292 struct dm_deferred_entry *shared_read_entry;
664 struct deferred_entry *all_io_entry; 293 struct dm_deferred_entry *all_io_entry;
665 struct dm_thin_new_mapping *overwrite_mapping; 294 struct dm_thin_new_mapping *overwrite_mapping;
666}; 295};
667 296
@@ -877,7 +506,7 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
877 unsigned long flags; 506 unsigned long flags;
878 507
879 spin_lock_irqsave(&pool->lock, flags); 508 spin_lock_irqsave(&pool->lock, flags);
880 cell_release(cell, &pool->deferred_bios); 509 dm_cell_release(cell, &pool->deferred_bios);
881 spin_unlock_irqrestore(&tc->pool->lock, flags); 510 spin_unlock_irqrestore(&tc->pool->lock, flags);
882 511
883 wake_worker(pool); 512 wake_worker(pool);
@@ -896,7 +525,7 @@ static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell
896 bio_list_init(&bios); 525 bio_list_init(&bios);
897 526
898 spin_lock_irqsave(&pool->lock, flags); 527 spin_lock_irqsave(&pool->lock, flags);
899 cell_release_no_holder(cell, &pool->deferred_bios); 528 dm_cell_release_no_holder(cell, &pool->deferred_bios);
900 spin_unlock_irqrestore(&pool->lock, flags); 529 spin_unlock_irqrestore(&pool->lock, flags);
901 530
902 wake_worker(pool); 531 wake_worker(pool);
@@ -906,7 +535,7 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
906{ 535{
907 if (m->bio) 536 if (m->bio)
908 m->bio->bi_end_io = m->saved_bi_end_io; 537 m->bio->bi_end_io = m->saved_bi_end_io;
909 cell_error(m->cell); 538 dm_cell_error(m->cell);
910 list_del(&m->list); 539 list_del(&m->list);
911 mempool_free(m, m->tc->pool->mapping_pool); 540 mempool_free(m, m->tc->pool->mapping_pool);
912} 541}
@@ -921,7 +550,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
921 bio->bi_end_io = m->saved_bi_end_io; 550 bio->bi_end_io = m->saved_bi_end_io;
922 551
923 if (m->err) { 552 if (m->err) {
924 cell_error(m->cell); 553 dm_cell_error(m->cell);
925 goto out; 554 goto out;
926 } 555 }
927 556
@@ -933,7 +562,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
933 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); 562 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
934 if (r) { 563 if (r) {
935 DMERR("dm_thin_insert_block() failed"); 564 DMERR("dm_thin_insert_block() failed");
936 cell_error(m->cell); 565 dm_cell_error(m->cell);
937 goto out; 566 goto out;
938 } 567 }
939 568
@@ -1067,7 +696,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1067 m->err = 0; 696 m->err = 0;
1068 m->bio = NULL; 697 m->bio = NULL;
1069 698
1070 if (!ds_add_work(&pool->shared_read_ds, &m->list)) 699 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1071 m->quiesced = 1; 700 m->quiesced = 1;
1072 701
1073 /* 702 /*
@@ -1099,7 +728,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1099 if (r < 0) { 728 if (r < 0) {
1100 mempool_free(m, pool->mapping_pool); 729 mempool_free(m, pool->mapping_pool);
1101 DMERR("dm_kcopyd_copy() failed"); 730 DMERR("dm_kcopyd_copy() failed");
1102 cell_error(cell); 731 dm_cell_error(cell);
1103 } 732 }
1104 } 733 }
1105} 734}
@@ -1164,7 +793,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1164 if (r < 0) { 793 if (r < 0) {
1165 mempool_free(m, pool->mapping_pool); 794 mempool_free(m, pool->mapping_pool);
1166 DMERR("dm_kcopyd_zero() failed"); 795 DMERR("dm_kcopyd_zero() failed");
1167 cell_error(cell); 796 dm_cell_error(cell);
1168 } 797 }
1169 } 798 }
1170} 799}
@@ -1276,7 +905,7 @@ static void no_space(struct dm_bio_prison_cell *cell)
1276 struct bio_list bios; 905 struct bio_list bios;
1277 906
1278 bio_list_init(&bios); 907 bio_list_init(&bios);
1279 cell_release(cell, &bios); 908 dm_cell_release(cell, &bios);
1280 909
1281 while ((bio = bio_list_pop(&bios))) 910 while ((bio = bio_list_pop(&bios)))
1282 retry_on_resume(bio); 911 retry_on_resume(bio);
@@ -1288,13 +917,13 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1288 unsigned long flags; 917 unsigned long flags;
1289 struct pool *pool = tc->pool; 918 struct pool *pool = tc->pool;
1290 struct dm_bio_prison_cell *cell, *cell2; 919 struct dm_bio_prison_cell *cell, *cell2;
1291 struct cell_key key, key2; 920 struct dm_cell_key key, key2;
1292 dm_block_t block = get_bio_block(tc, bio); 921 dm_block_t block = get_bio_block(tc, bio);
1293 struct dm_thin_lookup_result lookup_result; 922 struct dm_thin_lookup_result lookup_result;
1294 struct dm_thin_new_mapping *m; 923 struct dm_thin_new_mapping *m;
1295 924
1296 build_virtual_key(tc->td, block, &key); 925 build_virtual_key(tc->td, block, &key);
1297 if (bio_detain(tc->pool->prison, &key, bio, &cell)) 926 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
1298 return; 927 return;
1299 928
1300 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 929 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -1306,8 +935,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1306 * on this block. 935 * on this block.
1307 */ 936 */
1308 build_data_key(tc->td, lookup_result.block, &key2); 937 build_data_key(tc->td, lookup_result.block, &key2);
1309 if (bio_detain(tc->pool->prison, &key2, bio, &cell2)) { 938 if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
1310 cell_release_singleton(cell, bio); 939 dm_cell_release_singleton(cell, bio);
1311 break; 940 break;
1312 } 941 }
1313 942
@@ -1326,7 +955,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1326 m->err = 0; 955 m->err = 0;
1327 m->bio = bio; 956 m->bio = bio;
1328 957
1329 if (!ds_add_work(&pool->all_io_ds, &m->list)) { 958 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1330 spin_lock_irqsave(&pool->lock, flags); 959 spin_lock_irqsave(&pool->lock, flags);
1331 list_add(&m->list, &pool->prepared_discards); 960 list_add(&m->list, &pool->prepared_discards);
1332 spin_unlock_irqrestore(&pool->lock, flags); 961 spin_unlock_irqrestore(&pool->lock, flags);
@@ -1338,8 +967,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1338 * a block boundary. So we submit the discard of a 967 * a block boundary. So we submit the discard of a
1339 * partial block appropriately. 968 * partial block appropriately.
1340 */ 969 */
1341 cell_release_singleton(cell, bio); 970 dm_cell_release_singleton(cell, bio);
1342 cell_release_singleton(cell2, bio); 971 dm_cell_release_singleton(cell2, bio);
1343 if ((!lookup_result.shared) && pool->pf.discard_passdown) 972 if ((!lookup_result.shared) && pool->pf.discard_passdown)
1344 remap_and_issue(tc, bio, lookup_result.block); 973 remap_and_issue(tc, bio, lookup_result.block);
1345 else 974 else
@@ -1351,20 +980,20 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1351 /* 980 /*
1352 * It isn't provisioned, just forget it. 981 * It isn't provisioned, just forget it.
1353 */ 982 */
1354 cell_release_singleton(cell, bio); 983 dm_cell_release_singleton(cell, bio);
1355 bio_endio(bio, 0); 984 bio_endio(bio, 0);
1356 break; 985 break;
1357 986
1358 default: 987 default:
1359 DMERR("discard: find block unexpectedly returned %d", r); 988 DMERR("discard: find block unexpectedly returned %d", r);
1360 cell_release_singleton(cell, bio); 989 dm_cell_release_singleton(cell, bio);
1361 bio_io_error(bio); 990 bio_io_error(bio);
1362 break; 991 break;
1363 } 992 }
1364} 993}
1365 994
1366static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, 995static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1367 struct cell_key *key, 996 struct dm_cell_key *key,
1368 struct dm_thin_lookup_result *lookup_result, 997 struct dm_thin_lookup_result *lookup_result,
1369 struct dm_bio_prison_cell *cell) 998 struct dm_bio_prison_cell *cell)
1370{ 999{
@@ -1384,7 +1013,7 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1384 1013
1385 default: 1014 default:
1386 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); 1015 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1387 cell_error(cell); 1016 dm_cell_error(cell);
1388 break; 1017 break;
1389 } 1018 }
1390} 1019}
@@ -1395,14 +1024,14 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1395{ 1024{
1396 struct dm_bio_prison_cell *cell; 1025 struct dm_bio_prison_cell *cell;
1397 struct pool *pool = tc->pool; 1026 struct pool *pool = tc->pool;
1398 struct cell_key key; 1027 struct dm_cell_key key;
1399 1028
1400 /* 1029 /*
1401 * If cell is already occupied, then sharing is already in the process 1030 * If cell is already occupied, then sharing is already in the process
1402 * of being broken so we have nothing further to do here. 1031 * of being broken so we have nothing further to do here.
1403 */ 1032 */
1404 build_data_key(tc->td, lookup_result->block, &key); 1033 build_data_key(tc->td, lookup_result->block, &key);
1405 if (bio_detain(pool->prison, &key, bio, &cell)) 1034 if (dm_bio_detain(pool->prison, &key, bio, &cell))
1406 return; 1035 return;
1407 1036
1408 if (bio_data_dir(bio) == WRITE && bio->bi_size) 1037 if (bio_data_dir(bio) == WRITE && bio->bi_size)
@@ -1410,9 +1039,9 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1410 else { 1039 else {
1411 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 1040 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1412 1041
1413 h->shared_read_entry = ds_inc(&pool->shared_read_ds); 1042 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1414 1043
1415 cell_release_singleton(cell, bio); 1044 dm_cell_release_singleton(cell, bio);
1416 remap_and_issue(tc, bio, lookup_result->block); 1045 remap_and_issue(tc, bio, lookup_result->block);
1417 } 1046 }
1418} 1047}
@@ -1427,7 +1056,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1427 * Remap empty bios (flushes) immediately, without provisioning. 1056 * Remap empty bios (flushes) immediately, without provisioning.
1428 */ 1057 */
1429 if (!bio->bi_size) { 1058 if (!bio->bi_size) {
1430 cell_release_singleton(cell, bio); 1059 dm_cell_release_singleton(cell, bio);
1431 remap_and_issue(tc, bio, 0); 1060 remap_and_issue(tc, bio, 0);
1432 return; 1061 return;
1433 } 1062 }
@@ -1437,7 +1066,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1437 */ 1066 */
1438 if (bio_data_dir(bio) == READ) { 1067 if (bio_data_dir(bio) == READ) {
1439 zero_fill_bio(bio); 1068 zero_fill_bio(bio);
1440 cell_release_singleton(cell, bio); 1069 dm_cell_release_singleton(cell, bio);
1441 bio_endio(bio, 0); 1070 bio_endio(bio, 0);
1442 return; 1071 return;
1443 } 1072 }
@@ -1458,7 +1087,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1458 default: 1087 default:
1459 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); 1088 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1460 set_pool_mode(tc->pool, PM_READ_ONLY); 1089 set_pool_mode(tc->pool, PM_READ_ONLY);
1461 cell_error(cell); 1090 dm_cell_error(cell);
1462 break; 1091 break;
1463 } 1092 }
1464} 1093}
@@ -1468,7 +1097,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1468 int r; 1097 int r;
1469 dm_block_t block = get_bio_block(tc, bio); 1098 dm_block_t block = get_bio_block(tc, bio);
1470 struct dm_bio_prison_cell *cell; 1099 struct dm_bio_prison_cell *cell;
1471 struct cell_key key; 1100 struct dm_cell_key key;
1472 struct dm_thin_lookup_result lookup_result; 1101 struct dm_thin_lookup_result lookup_result;
1473 1102
1474 /* 1103 /*
@@ -1476,7 +1105,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1476 * being provisioned so we have nothing further to do here. 1105 * being provisioned so we have nothing further to do here.
1477 */ 1106 */
1478 build_virtual_key(tc->td, block, &key); 1107 build_virtual_key(tc->td, block, &key);
1479 if (bio_detain(tc->pool->prison, &key, bio, &cell)) 1108 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
1480 return; 1109 return;
1481 1110
1482 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1111 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -1491,7 +1120,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1491 * TODO: this will probably have to change when discard goes 1120 * TODO: this will probably have to change when discard goes
1492 * back in. 1121 * back in.
1493 */ 1122 */
1494 cell_release_singleton(cell, bio); 1123 dm_cell_release_singleton(cell, bio);
1495 1124
1496 if (lookup_result.shared) 1125 if (lookup_result.shared)
1497 process_shared_bio(tc, bio, block, &lookup_result); 1126 process_shared_bio(tc, bio, block, &lookup_result);
@@ -1501,7 +1130,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1501 1130
1502 case -ENODATA: 1131 case -ENODATA:
1503 if (bio_data_dir(bio) == READ && tc->origin_dev) { 1132 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1504 cell_release_singleton(cell, bio); 1133 dm_cell_release_singleton(cell, bio);
1505 remap_to_origin_and_issue(tc, bio); 1134 remap_to_origin_and_issue(tc, bio);
1506 } else 1135 } else
1507 provision_block(tc, bio, block, cell); 1136 provision_block(tc, bio, block, cell);
@@ -1509,7 +1138,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1509 1138
1510 default: 1139 default:
1511 DMERR("dm_thin_find_block() failed, error = %d", r); 1140 DMERR("dm_thin_find_block() failed, error = %d", r);
1512 cell_release_singleton(cell, bio); 1141 dm_cell_release_singleton(cell, bio);
1513 bio_io_error(bio); 1142 bio_io_error(bio);
1514 break; 1143 break;
1515 } 1144 }
@@ -1718,7 +1347,7 @@ static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *b
1718 1347
1719 h->tc = tc; 1348 h->tc = tc;
1720 h->shared_read_entry = NULL; 1349 h->shared_read_entry = NULL;
1721 h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : ds_inc(&pool->all_io_ds); 1350 h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds);
1722 h->overwrite_mapping = NULL; 1351 h->overwrite_mapping = NULL;
1723 1352
1724 return h; 1353 return h;
@@ -1928,7 +1557,7 @@ static void __pool_destroy(struct pool *pool)
1928 if (dm_pool_metadata_close(pool->pmd) < 0) 1557 if (dm_pool_metadata_close(pool->pmd) < 0)
1929 DMWARN("%s: dm_pool_metadata_close() failed.", __func__); 1558 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1930 1559
1931 prison_destroy(pool->prison); 1560 dm_bio_prison_destroy(pool->prison);
1932 dm_kcopyd_client_destroy(pool->copier); 1561 dm_kcopyd_client_destroy(pool->copier);
1933 1562
1934 if (pool->wq) 1563 if (pool->wq)
@@ -1938,6 +1567,8 @@ static void __pool_destroy(struct pool *pool)
1938 mempool_free(pool->next_mapping, pool->mapping_pool); 1567 mempool_free(pool->next_mapping, pool->mapping_pool);
1939 mempool_destroy(pool->mapping_pool); 1568 mempool_destroy(pool->mapping_pool);
1940 mempool_destroy(pool->endio_hook_pool); 1569 mempool_destroy(pool->endio_hook_pool);
1570 dm_deferred_set_destroy(pool->shared_read_ds);
1571 dm_deferred_set_destroy(pool->all_io_ds);
1941 kfree(pool); 1572 kfree(pool);
1942} 1573}
1943 1574
@@ -1976,7 +1607,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
1976 pool->sectors_per_block_shift = __ffs(block_size); 1607 pool->sectors_per_block_shift = __ffs(block_size);
1977 pool->low_water_blocks = 0; 1608 pool->low_water_blocks = 0;
1978 pool_features_init(&pool->pf); 1609 pool_features_init(&pool->pf);
1979 pool->prison = prison_create(PRISON_CELLS); 1610 pool->prison = dm_bio_prison_create(PRISON_CELLS);
1980 if (!pool->prison) { 1611 if (!pool->prison) {
1981 *error = "Error creating pool's bio prison"; 1612 *error = "Error creating pool's bio prison";
1982 err_p = ERR_PTR(-ENOMEM); 1613 err_p = ERR_PTR(-ENOMEM);
@@ -2012,8 +1643,20 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2012 pool->low_water_triggered = 0; 1643 pool->low_water_triggered = 0;
2013 pool->no_free_space = 0; 1644 pool->no_free_space = 0;
2014 bio_list_init(&pool->retry_on_resume_list); 1645 bio_list_init(&pool->retry_on_resume_list);
2015 ds_init(&pool->shared_read_ds); 1646
2016 ds_init(&pool->all_io_ds); 1647 pool->shared_read_ds = dm_deferred_set_create();
1648 if (!pool->shared_read_ds) {
1649 *error = "Error creating pool's shared read deferred set";
1650 err_p = ERR_PTR(-ENOMEM);
1651 goto bad_shared_read_ds;
1652 }
1653
1654 pool->all_io_ds = dm_deferred_set_create();
1655 if (!pool->all_io_ds) {
1656 *error = "Error creating pool's all io deferred set";
1657 err_p = ERR_PTR(-ENOMEM);
1658 goto bad_all_io_ds;
1659 }
2017 1660
2018 pool->next_mapping = NULL; 1661 pool->next_mapping = NULL;
2019 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, 1662 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
@@ -2042,11 +1685,15 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2042bad_endio_hook_pool: 1685bad_endio_hook_pool:
2043 mempool_destroy(pool->mapping_pool); 1686 mempool_destroy(pool->mapping_pool);
2044bad_mapping_pool: 1687bad_mapping_pool:
1688 dm_deferred_set_destroy(pool->all_io_ds);
1689bad_all_io_ds:
1690 dm_deferred_set_destroy(pool->shared_read_ds);
1691bad_shared_read_ds:
2045 destroy_workqueue(pool->wq); 1692 destroy_workqueue(pool->wq);
2046bad_wq: 1693bad_wq:
2047 dm_kcopyd_client_destroy(pool->copier); 1694 dm_kcopyd_client_destroy(pool->copier);
2048bad_kcopyd_client: 1695bad_kcopyd_client:
2049 prison_destroy(pool->prison); 1696 dm_bio_prison_destroy(pool->prison);
2050bad_prison: 1697bad_prison:
2051 kfree(pool); 1698 kfree(pool);
2052bad_pool: 1699bad_pool:
@@ -2272,15 +1919,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2272 goto out_flags_changed; 1919 goto out_flags_changed;
2273 } 1920 }
2274 1921
2275 /*
2276 * The block layer requires discard_granularity to be a power of 2.
2277 */
2278 if (pf.discard_enabled && !is_power_of_2(block_size)) {
2279 ti->error = "Discard support must be disabled when the block size is not a power of 2";
2280 r = -EINVAL;
2281 goto out_flags_changed;
2282 }
2283
2284 pt->pool = pool; 1922 pt->pool = pool;
2285 pt->ti = ti; 1923 pt->ti = ti;
2286 pt->metadata_dev = metadata_dev; 1924 pt->metadata_dev = metadata_dev;
@@ -2762,6 +2400,11 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2762 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 2400 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2763} 2401}
2764 2402
2403static bool block_size_is_power_of_two(struct pool *pool)
2404{
2405 return pool->sectors_per_block_shift >= 0;
2406}
2407
2765static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits) 2408static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
2766{ 2409{
2767 struct pool *pool = pt->pool; 2410 struct pool *pool = pt->pool;
@@ -2775,8 +2418,15 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
2775 if (pt->adjusted_pf.discard_passdown) { 2418 if (pt->adjusted_pf.discard_passdown) {
2776 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; 2419 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
2777 limits->discard_granularity = data_limits->discard_granularity; 2420 limits->discard_granularity = data_limits->discard_granularity;
2778 } else 2421 } else if (block_size_is_power_of_two(pool))
2779 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 2422 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2423 else
2424 /*
2425 * Use largest power of 2 that is a factor of sectors_per_block
2426 * but at least DATA_DEV_BLOCK_SIZE_MIN_SECTORS.
2427 */
2428 limits->discard_granularity = max(1 << (ffs(pool->sectors_per_block) - 1),
2429 DATA_DEV_BLOCK_SIZE_MIN_SECTORS) << SECTOR_SHIFT;
2780} 2430}
2781 2431
2782static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) 2432static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -2804,7 +2454,7 @@ static struct target_type pool_target = {
2804 .name = "thin-pool", 2454 .name = "thin-pool",
2805 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2455 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2806 DM_TARGET_IMMUTABLE, 2456 DM_TARGET_IMMUTABLE,
2807 .version = {1, 4, 0}, 2457 .version = {1, 5, 0},
2808 .module = THIS_MODULE, 2458 .module = THIS_MODULE,
2809 .ctr = pool_ctr, 2459 .ctr = pool_ctr,
2810 .dtr = pool_dtr, 2460 .dtr = pool_dtr,
@@ -2979,7 +2629,7 @@ static int thin_endio(struct dm_target *ti,
2979 2629
2980 if (h->shared_read_entry) { 2630 if (h->shared_read_entry) {
2981 INIT_LIST_HEAD(&work); 2631 INIT_LIST_HEAD(&work);
2982 ds_dec(h->shared_read_entry, &work); 2632 dm_deferred_entry_dec(h->shared_read_entry, &work);
2983 2633
2984 spin_lock_irqsave(&pool->lock, flags); 2634 spin_lock_irqsave(&pool->lock, flags);
2985 list_for_each_entry_safe(m, tmp, &work, list) { 2635 list_for_each_entry_safe(m, tmp, &work, list) {
@@ -2992,7 +2642,7 @@ static int thin_endio(struct dm_target *ti,
2992 2642
2993 if (h->all_io_entry) { 2643 if (h->all_io_entry) {
2994 INIT_LIST_HEAD(&work); 2644 INIT_LIST_HEAD(&work);
2995 ds_dec(h->all_io_entry, &work); 2645 dm_deferred_entry_dec(h->all_io_entry, &work);
2996 spin_lock_irqsave(&pool->lock, flags); 2646 spin_lock_irqsave(&pool->lock, flags);
2997 list_for_each_entry_safe(m, tmp, &work, list) 2647 list_for_each_entry_safe(m, tmp, &work, list)
2998 list_add(&m->list, &pool->prepared_discards); 2648 list_add(&m->list, &pool->prepared_discards);
@@ -3095,7 +2745,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
3095 2745
3096static struct target_type thin_target = { 2746static struct target_type thin_target = {
3097 .name = "thin", 2747 .name = "thin",
3098 .version = {1, 4, 0}, 2748 .version = {1, 5, 0},
3099 .module = THIS_MODULE, 2749 .module = THIS_MODULE,
3100 .ctr = thin_ctr, 2750 .ctr = thin_ctr,
3101 .dtr = thin_dtr, 2751 .dtr = thin_dtr,
@@ -3125,10 +2775,6 @@ static int __init dm_thin_init(void)
3125 2775
3126 r = -ENOMEM; 2776 r = -ENOMEM;
3127 2777
3128 _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
3129 if (!_cell_cache)
3130 goto bad_cell_cache;
3131
3132 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); 2778 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
3133 if (!_new_mapping_cache) 2779 if (!_new_mapping_cache)
3134 goto bad_new_mapping_cache; 2780 goto bad_new_mapping_cache;
@@ -3142,8 +2788,6 @@ static int __init dm_thin_init(void)
3142bad_endio_hook_cache: 2788bad_endio_hook_cache:
3143 kmem_cache_destroy(_new_mapping_cache); 2789 kmem_cache_destroy(_new_mapping_cache);
3144bad_new_mapping_cache: 2790bad_new_mapping_cache:
3145 kmem_cache_destroy(_cell_cache);
3146bad_cell_cache:
3147 dm_unregister_target(&pool_target); 2791 dm_unregister_target(&pool_target);
3148bad_pool_target: 2792bad_pool_target:
3149 dm_unregister_target(&thin_target); 2793 dm_unregister_target(&thin_target);
@@ -3156,7 +2800,6 @@ static void dm_thin_exit(void)
3156 dm_unregister_target(&thin_target); 2800 dm_unregister_target(&thin_target);
3157 dm_unregister_target(&pool_target); 2801 dm_unregister_target(&pool_target);
3158 2802
3159 kmem_cache_destroy(_cell_cache);
3160 kmem_cache_destroy(_new_mapping_cache); 2803 kmem_cache_destroy(_new_mapping_cache);
3161 kmem_cache_destroy(_endio_hook_cache); 2804 kmem_cache_destroy(_endio_hook_cache);
3162} 2805}