aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c128
1 files changed, 80 insertions, 48 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 2c63326638b6..1af40ee209e2 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -718,6 +718,22 @@ static int bio_triggers_commit(struct cache *cache, struct bio *bio)
718 return bio->bi_rw & (REQ_FLUSH | REQ_FUA); 718 return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
719} 719}
720 720
721/*
722 * You must increment the deferred set whilst the prison cell is held. To
723 * encourage this, we ask for 'cell' to be passed in.
724 */
725static void inc_ds(struct cache *cache, struct bio *bio,
726 struct dm_bio_prison_cell *cell)
727{
728 size_t pb_data_size = get_per_bio_data_size(cache);
729 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
730
731 BUG_ON(!cell);
732 BUG_ON(pb->all_io_entry);
733
734 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
735}
736
721static void issue(struct cache *cache, struct bio *bio) 737static void issue(struct cache *cache, struct bio *bio)
722{ 738{
723 unsigned long flags; 739 unsigned long flags;
@@ -737,6 +753,12 @@ static void issue(struct cache *cache, struct bio *bio)
737 spin_unlock_irqrestore(&cache->lock, flags); 753 spin_unlock_irqrestore(&cache->lock, flags);
738} 754}
739 755
756static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell)
757{
758 inc_ds(cache, bio, cell);
759 issue(cache, bio);
760}
761
740static void defer_writethrough_bio(struct cache *cache, struct bio *bio) 762static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
741{ 763{
742 unsigned long flags; 764 unsigned long flags;
@@ -1015,6 +1037,11 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
1015 1037
1016 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); 1038 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1017 remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); 1039 remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
1040
1041 /*
1042 * No need to inc_ds() here, since the cell will be held for the
1043 * duration of the io.
1044 */
1018 generic_make_request(bio); 1045 generic_make_request(bio);
1019} 1046}
1020 1047
@@ -1115,8 +1142,7 @@ static void check_for_quiesced_migrations(struct cache *cache,
1115 return; 1142 return;
1116 1143
1117 INIT_LIST_HEAD(&work); 1144 INIT_LIST_HEAD(&work);
1118 if (pb->all_io_entry) 1145 dm_deferred_entry_dec(pb->all_io_entry, &work);
1119 dm_deferred_entry_dec(pb->all_io_entry, &work);
1120 1146
1121 if (!list_empty(&work)) 1147 if (!list_empty(&work))
1122 queue_quiesced_migrations(cache, &work); 1148 queue_quiesced_migrations(cache, &work);
@@ -1252,6 +1278,11 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
1252 else 1278 else
1253 remap_to_cache(cache, bio, 0); 1279 remap_to_cache(cache, bio, 0);
1254 1280
1281 /*
1282 * REQ_FLUSH is not directed at any particular block so we don't
1283 * need to inc_ds(). REQ_FUA's are split into a write + REQ_FLUSH
1284 * by dm-core.
1285 */
1255 issue(cache, bio); 1286 issue(cache, bio);
1256} 1287}
1257 1288
@@ -1301,15 +1332,6 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
1301 &cache->stats.read_miss : &cache->stats.write_miss); 1332 &cache->stats.read_miss : &cache->stats.write_miss);
1302} 1333}
1303 1334
1304static void issue_cache_bio(struct cache *cache, struct bio *bio,
1305 struct per_bio_data *pb,
1306 dm_oblock_t oblock, dm_cblock_t cblock)
1307{
1308 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1309 remap_to_cache_dirty(cache, bio, oblock, cblock);
1310 issue(cache, bio);
1311}
1312
1313static void process_bio(struct cache *cache, struct prealloc *structs, 1335static void process_bio(struct cache *cache, struct prealloc *structs,
1314 struct bio *bio) 1336 struct bio *bio)
1315{ 1337{
@@ -1318,8 +1340,6 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1318 dm_oblock_t block = get_bio_block(cache, bio); 1340 dm_oblock_t block = get_bio_block(cache, bio);
1319 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; 1341 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
1320 struct policy_result lookup_result; 1342 struct policy_result lookup_result;
1321 size_t pb_data_size = get_per_bio_data_size(cache);
1322 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1323 bool discarded_block = is_discarded_oblock(cache, block); 1343 bool discarded_block = is_discarded_oblock(cache, block);
1324 bool passthrough = passthrough_mode(&cache->features); 1344 bool passthrough = passthrough_mode(&cache->features);
1325 bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache)); 1345 bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
@@ -1359,9 +1379,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1359 1379
1360 } else { 1380 } else {
1361 /* FIXME: factor out issue_origin() */ 1381 /* FIXME: factor out issue_origin() */
1362 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1363 remap_to_origin_clear_discard(cache, bio, block); 1382 remap_to_origin_clear_discard(cache, bio, block);
1364 issue(cache, bio); 1383 inc_and_issue(cache, bio, new_ocell);
1365 } 1384 }
1366 } else { 1385 } else {
1367 inc_hit_counter(cache, bio); 1386 inc_hit_counter(cache, bio);
@@ -1369,20 +1388,21 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1369 if (bio_data_dir(bio) == WRITE && 1388 if (bio_data_dir(bio) == WRITE &&
1370 writethrough_mode(&cache->features) && 1389 writethrough_mode(&cache->features) &&
1371 !is_dirty(cache, lookup_result.cblock)) { 1390 !is_dirty(cache, lookup_result.cblock)) {
1372 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1373 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); 1391 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
1374 issue(cache, bio); 1392 inc_and_issue(cache, bio, new_ocell);
1375 } else 1393
1376 issue_cache_bio(cache, bio, pb, block, lookup_result.cblock); 1394 } else {
1395 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
1396 inc_and_issue(cache, bio, new_ocell);
1397 }
1377 } 1398 }
1378 1399
1379 break; 1400 break;
1380 1401
1381 case POLICY_MISS: 1402 case POLICY_MISS:
1382 inc_miss_counter(cache, bio); 1403 inc_miss_counter(cache, bio);
1383 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1384 remap_to_origin_clear_discard(cache, bio, block); 1404 remap_to_origin_clear_discard(cache, bio, block);
1385 issue(cache, bio); 1405 inc_and_issue(cache, bio, new_ocell);
1386 break; 1406 break;
1387 1407
1388 case POLICY_NEW: 1408 case POLICY_NEW:
@@ -1501,6 +1521,9 @@ static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
1501 bio_list_init(&cache->deferred_flush_bios); 1521 bio_list_init(&cache->deferred_flush_bios);
1502 spin_unlock_irqrestore(&cache->lock, flags); 1522 spin_unlock_irqrestore(&cache->lock, flags);
1503 1523
1524 /*
1525 * These bios have already been through inc_ds()
1526 */
1504 while ((bio = bio_list_pop(&bios))) 1527 while ((bio = bio_list_pop(&bios)))
1505 submit_bios ? generic_make_request(bio) : bio_io_error(bio); 1528 submit_bios ? generic_make_request(bio) : bio_io_error(bio);
1506} 1529}
@@ -1518,6 +1541,9 @@ static void process_deferred_writethrough_bios(struct cache *cache)
1518 bio_list_init(&cache->deferred_writethrough_bios); 1541 bio_list_init(&cache->deferred_writethrough_bios);
1519 spin_unlock_irqrestore(&cache->lock, flags); 1542 spin_unlock_irqrestore(&cache->lock, flags);
1520 1543
1544 /*
1545 * These bios have already been through inc_ds()
1546 */
1521 while ((bio = bio_list_pop(&bios))) 1547 while ((bio = bio_list_pop(&bios)))
1522 generic_make_request(bio); 1548 generic_make_request(bio);
1523} 1549}
@@ -1694,6 +1720,7 @@ static void do_worker(struct work_struct *ws)
1694 1720
1695 if (commit_if_needed(cache)) { 1721 if (commit_if_needed(cache)) {
1696 process_deferred_flush_bios(cache, false); 1722 process_deferred_flush_bios(cache, false);
1723 process_migrations(cache, &cache->need_commit_migrations, migration_failure);
1697 1724
1698 /* 1725 /*
1699 * FIXME: rollback metadata or just go into a 1726 * FIXME: rollback metadata or just go into a
@@ -2406,16 +2433,13 @@ out:
2406 return r; 2433 return r;
2407} 2434}
2408 2435
2409static int cache_map(struct dm_target *ti, struct bio *bio) 2436static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell **cell)
2410{ 2437{
2411 struct cache *cache = ti->private;
2412
2413 int r; 2438 int r;
2414 dm_oblock_t block = get_bio_block(cache, bio); 2439 dm_oblock_t block = get_bio_block(cache, bio);
2415 size_t pb_data_size = get_per_bio_data_size(cache); 2440 size_t pb_data_size = get_per_bio_data_size(cache);
2416 bool can_migrate = false; 2441 bool can_migrate = false;
2417 bool discarded_block; 2442 bool discarded_block;
2418 struct dm_bio_prison_cell *cell;
2419 struct policy_result lookup_result; 2443 struct policy_result lookup_result;
2420 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); 2444 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
2421 2445
@@ -2437,15 +2461,15 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2437 /* 2461 /*
2438 * Check to see if that block is currently migrating. 2462 * Check to see if that block is currently migrating.
2439 */ 2463 */
2440 cell = alloc_prison_cell(cache); 2464 *cell = alloc_prison_cell(cache);
2441 if (!cell) { 2465 if (!*cell) {
2442 defer_bio(cache, bio); 2466 defer_bio(cache, bio);
2443 return DM_MAPIO_SUBMITTED; 2467 return DM_MAPIO_SUBMITTED;
2444 } 2468 }
2445 2469
2446 r = bio_detain(cache, block, bio, cell, 2470 r = bio_detain(cache, block, bio, *cell,
2447 (cell_free_fn) free_prison_cell, 2471 (cell_free_fn) free_prison_cell,
2448 cache, &cell); 2472 cache, cell);
2449 if (r) { 2473 if (r) {
2450 if (r < 0) 2474 if (r < 0)
2451 defer_bio(cache, bio); 2475 defer_bio(cache, bio);
@@ -2458,11 +2482,12 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2458 r = policy_map(cache->policy, block, false, can_migrate, discarded_block, 2482 r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
2459 bio, &lookup_result); 2483 bio, &lookup_result);
2460 if (r == -EWOULDBLOCK) { 2484 if (r == -EWOULDBLOCK) {
2461 cell_defer(cache, cell, true); 2485 cell_defer(cache, *cell, true);
2462 return DM_MAPIO_SUBMITTED; 2486 return DM_MAPIO_SUBMITTED;
2463 2487
2464 } else if (r) { 2488 } else if (r) {
2465 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r); 2489 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
2490 cell_defer(cache, *cell, false);
2466 bio_io_error(bio); 2491 bio_io_error(bio);
2467 return DM_MAPIO_SUBMITTED; 2492 return DM_MAPIO_SUBMITTED;
2468 } 2493 }
@@ -2476,52 +2501,44 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2476 * We need to invalidate this block, so 2501 * We need to invalidate this block, so
2477 * defer for the worker thread. 2502 * defer for the worker thread.
2478 */ 2503 */
2479 cell_defer(cache, cell, true); 2504 cell_defer(cache, *cell, true);
2480 r = DM_MAPIO_SUBMITTED; 2505 r = DM_MAPIO_SUBMITTED;
2481 2506
2482 } else { 2507 } else {
2483 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2484 inc_miss_counter(cache, bio); 2508 inc_miss_counter(cache, bio);
2485 remap_to_origin_clear_discard(cache, bio, block); 2509 remap_to_origin_clear_discard(cache, bio, block);
2486
2487 cell_defer(cache, cell, false);
2488 } 2510 }
2489 2511
2490 } else { 2512 } else {
2491 inc_hit_counter(cache, bio); 2513 inc_hit_counter(cache, bio);
2492 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2493
2494 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && 2514 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
2495 !is_dirty(cache, lookup_result.cblock)) 2515 !is_dirty(cache, lookup_result.cblock))
2496 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); 2516 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
2497 else 2517 else
2498 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); 2518 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
2499
2500 cell_defer(cache, cell, false);
2501 } 2519 }
2502 break; 2520 break;
2503 2521
2504 case POLICY_MISS: 2522 case POLICY_MISS:
2505 inc_miss_counter(cache, bio); 2523 inc_miss_counter(cache, bio);
2506 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2507
2508 if (pb->req_nr != 0) { 2524 if (pb->req_nr != 0) {
2509 /* 2525 /*
2510 * This is a duplicate writethrough io that is no 2526 * This is a duplicate writethrough io that is no
2511 * longer needed because the block has been demoted. 2527 * longer needed because the block has been demoted.
2512 */ 2528 */
2513 bio_endio(bio, 0); 2529 bio_endio(bio, 0);
2514 cell_defer(cache, cell, false); 2530 cell_defer(cache, *cell, false);
2515 return DM_MAPIO_SUBMITTED; 2531 r = DM_MAPIO_SUBMITTED;
2516 } else { 2532
2533 } else
2517 remap_to_origin_clear_discard(cache, bio, block); 2534 remap_to_origin_clear_discard(cache, bio, block);
2518 cell_defer(cache, cell, false); 2535
2519 }
2520 break; 2536 break;
2521 2537
2522 default: 2538 default:
2523 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__, 2539 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
2524 (unsigned) lookup_result.op); 2540 (unsigned) lookup_result.op);
2541 cell_defer(cache, *cell, false);
2525 bio_io_error(bio); 2542 bio_io_error(bio);
2526 r = DM_MAPIO_SUBMITTED; 2543 r = DM_MAPIO_SUBMITTED;
2527 } 2544 }
@@ -2529,6 +2546,21 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2529 return r; 2546 return r;
2530} 2547}
2531 2548
2549static int cache_map(struct dm_target *ti, struct bio *bio)
2550{
2551 int r;
2552 struct dm_bio_prison_cell *cell;
2553 struct cache *cache = ti->private;
2554
2555 r = __cache_map(cache, bio, &cell);
2556 if (r == DM_MAPIO_REMAPPED) {
2557 inc_ds(cache, bio, cell);
2558 cell_defer(cache, cell, false);
2559 }
2560
2561 return r;
2562}
2563
2532static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) 2564static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2533{ 2565{
2534 struct cache *cache = ti->private; 2566 struct cache *cache = ti->private;
@@ -2808,7 +2840,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
2808 residency = policy_residency(cache->policy); 2840 residency = policy_residency(cache->policy);
2809 2841
2810 DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ", 2842 DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
2811 (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT), 2843 (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
2812 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), 2844 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2813 (unsigned long long)nr_blocks_metadata, 2845 (unsigned long long)nr_blocks_metadata,
2814 cache->sectors_per_block, 2846 cache->sectors_per_block,
@@ -3062,7 +3094,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3062 */ 3094 */
3063 if (io_opt_sectors < cache->sectors_per_block || 3095 if (io_opt_sectors < cache->sectors_per_block ||
3064 do_div(io_opt_sectors, cache->sectors_per_block)) { 3096 do_div(io_opt_sectors, cache->sectors_per_block)) {
3065 blk_limits_io_min(limits, 0); 3097 blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
3066 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); 3098 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
3067 } 3099 }
3068 set_discard_limits(cache, limits); 3100 set_discard_limits(cache, limits);
@@ -3072,7 +3104,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3072 3104
3073static struct target_type cache_target = { 3105static struct target_type cache_target = {
3074 .name = "cache", 3106 .name = "cache",
3075 .version = {1, 4, 0}, 3107 .version = {1, 5, 0},
3076 .module = THIS_MODULE, 3108 .module = THIS_MODULE,
3077 .ctr = cache_ctr, 3109 .ctr = cache_ctr,
3078 .dtr = cache_dtr, 3110 .dtr = cache_dtr,