aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2014-05-13 11:18:38 -0400
committerMike Snitzer <snitzer@redhat.com>2014-08-01 12:30:32 -0400
commit8c081b52c6833a30a69ea3bdcef316eccc740c87 (patch)
treea48e9f698921808409304af54aa9c3b6268c0702 /drivers/md/dm-cache-target.c
parente5aea7b49f2b1fd01f35ca7abeb76f5c56128a55 (diff)
dm cache: simplify deferred set reference count increments
Factor out inc_and_issue and inc_ds helpers to simplify deferred set reference count increments. Also cleanup cache_map to consistently call cell_defer and inc_ds when the bio is DM_MAPIO_REMAPPED. No functional change. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c123
1 files changed, 77 insertions, 46 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 2c63326638b6..2a156af246b2 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -718,6 +718,22 @@ static int bio_triggers_commit(struct cache *cache, struct bio *bio)
718 return bio->bi_rw & (REQ_FLUSH | REQ_FUA); 718 return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
719} 719}
720 720
721/*
722 * You must increment the deferred set whilst the prison cell is held. To
723 * encourage this, we ask for 'cell' to be passed in.
724 */
725static void inc_ds(struct cache *cache, struct bio *bio,
726 struct dm_bio_prison_cell *cell)
727{
728 size_t pb_data_size = get_per_bio_data_size(cache);
729 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
730
731 BUG_ON(!cell);
732 BUG_ON(pb->all_io_entry);
733
734 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
735}
736
721static void issue(struct cache *cache, struct bio *bio) 737static void issue(struct cache *cache, struct bio *bio)
722{ 738{
723 unsigned long flags; 739 unsigned long flags;
@@ -737,6 +753,12 @@ static void issue(struct cache *cache, struct bio *bio)
737 spin_unlock_irqrestore(&cache->lock, flags); 753 spin_unlock_irqrestore(&cache->lock, flags);
738} 754}
739 755
756static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell)
757{
758 inc_ds(cache, bio, cell);
759 issue(cache, bio);
760}
761
740static void defer_writethrough_bio(struct cache *cache, struct bio *bio) 762static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
741{ 763{
742 unsigned long flags; 764 unsigned long flags;
@@ -1015,6 +1037,11 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
1015 1037
1016 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); 1038 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1017 remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); 1039 remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
1040
1041 /*
1042 * No need to inc_ds() here, since the cell will be held for the
1043 * duration of the io.
1044 */
1018 generic_make_request(bio); 1045 generic_make_request(bio);
1019} 1046}
1020 1047
@@ -1115,8 +1142,7 @@ static void check_for_quiesced_migrations(struct cache *cache,
1115 return; 1142 return;
1116 1143
1117 INIT_LIST_HEAD(&work); 1144 INIT_LIST_HEAD(&work);
1118 if (pb->all_io_entry) 1145 dm_deferred_entry_dec(pb->all_io_entry, &work);
1119 dm_deferred_entry_dec(pb->all_io_entry, &work);
1120 1146
1121 if (!list_empty(&work)) 1147 if (!list_empty(&work))
1122 queue_quiesced_migrations(cache, &work); 1148 queue_quiesced_migrations(cache, &work);
@@ -1252,6 +1278,11 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
1252 else 1278 else
1253 remap_to_cache(cache, bio, 0); 1279 remap_to_cache(cache, bio, 0);
1254 1280
1281 /*
1282 * REQ_FLUSH is not directed at any particular block so we don't
1283 * need to inc_ds(). REQ_FUA's are split into a write + REQ_FLUSH
1284 * by dm-core.
1285 */
1255 issue(cache, bio); 1286 issue(cache, bio);
1256} 1287}
1257 1288
@@ -1301,15 +1332,6 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
1301 &cache->stats.read_miss : &cache->stats.write_miss); 1332 &cache->stats.read_miss : &cache->stats.write_miss);
1302} 1333}
1303 1334
1304static void issue_cache_bio(struct cache *cache, struct bio *bio,
1305 struct per_bio_data *pb,
1306 dm_oblock_t oblock, dm_cblock_t cblock)
1307{
1308 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1309 remap_to_cache_dirty(cache, bio, oblock, cblock);
1310 issue(cache, bio);
1311}
1312
1313static void process_bio(struct cache *cache, struct prealloc *structs, 1335static void process_bio(struct cache *cache, struct prealloc *structs,
1314 struct bio *bio) 1336 struct bio *bio)
1315{ 1337{
@@ -1318,8 +1340,6 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1318 dm_oblock_t block = get_bio_block(cache, bio); 1340 dm_oblock_t block = get_bio_block(cache, bio);
1319 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; 1341 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
1320 struct policy_result lookup_result; 1342 struct policy_result lookup_result;
1321 size_t pb_data_size = get_per_bio_data_size(cache);
1322 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1323 bool discarded_block = is_discarded_oblock(cache, block); 1343 bool discarded_block = is_discarded_oblock(cache, block);
1324 bool passthrough = passthrough_mode(&cache->features); 1344 bool passthrough = passthrough_mode(&cache->features);
1325 bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache)); 1345 bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
@@ -1359,9 +1379,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1359 1379
1360 } else { 1380 } else {
1361 /* FIXME: factor out issue_origin() */ 1381 /* FIXME: factor out issue_origin() */
1362 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1363 remap_to_origin_clear_discard(cache, bio, block); 1382 remap_to_origin_clear_discard(cache, bio, block);
1364 issue(cache, bio); 1383 inc_and_issue(cache, bio, new_ocell);
1365 } 1384 }
1366 } else { 1385 } else {
1367 inc_hit_counter(cache, bio); 1386 inc_hit_counter(cache, bio);
@@ -1369,20 +1388,21 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1369 if (bio_data_dir(bio) == WRITE && 1388 if (bio_data_dir(bio) == WRITE &&
1370 writethrough_mode(&cache->features) && 1389 writethrough_mode(&cache->features) &&
1371 !is_dirty(cache, lookup_result.cblock)) { 1390 !is_dirty(cache, lookup_result.cblock)) {
1372 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1373 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); 1391 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
1374 issue(cache, bio); 1392 inc_and_issue(cache, bio, new_ocell);
1375 } else 1393
1376 issue_cache_bio(cache, bio, pb, block, lookup_result.cblock); 1394 } else {
1395 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
1396 inc_and_issue(cache, bio, new_ocell);
1397 }
1377 } 1398 }
1378 1399
1379 break; 1400 break;
1380 1401
1381 case POLICY_MISS: 1402 case POLICY_MISS:
1382 inc_miss_counter(cache, bio); 1403 inc_miss_counter(cache, bio);
1383 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1384 remap_to_origin_clear_discard(cache, bio, block); 1404 remap_to_origin_clear_discard(cache, bio, block);
1385 issue(cache, bio); 1405 inc_and_issue(cache, bio, new_ocell);
1386 break; 1406 break;
1387 1407
1388 case POLICY_NEW: 1408 case POLICY_NEW:
@@ -1501,6 +1521,9 @@ static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
1501 bio_list_init(&cache->deferred_flush_bios); 1521 bio_list_init(&cache->deferred_flush_bios);
1502 spin_unlock_irqrestore(&cache->lock, flags); 1522 spin_unlock_irqrestore(&cache->lock, flags);
1503 1523
1524 /*
1525 * These bios have already been through inc_ds()
1526 */
1504 while ((bio = bio_list_pop(&bios))) 1527 while ((bio = bio_list_pop(&bios)))
1505 submit_bios ? generic_make_request(bio) : bio_io_error(bio); 1528 submit_bios ? generic_make_request(bio) : bio_io_error(bio);
1506} 1529}
@@ -1518,6 +1541,9 @@ static void process_deferred_writethrough_bios(struct cache *cache)
1518 bio_list_init(&cache->deferred_writethrough_bios); 1541 bio_list_init(&cache->deferred_writethrough_bios);
1519 spin_unlock_irqrestore(&cache->lock, flags); 1542 spin_unlock_irqrestore(&cache->lock, flags);
1520 1543
1544 /*
1545 * These bios have already been through inc_ds()
1546 */
1521 while ((bio = bio_list_pop(&bios))) 1547 while ((bio = bio_list_pop(&bios)))
1522 generic_make_request(bio); 1548 generic_make_request(bio);
1523} 1549}
@@ -2406,16 +2432,13 @@ out:
2406 return r; 2432 return r;
2407} 2433}
2408 2434
2409static int cache_map(struct dm_target *ti, struct bio *bio) 2435static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell **cell)
2410{ 2436{
2411 struct cache *cache = ti->private;
2412
2413 int r; 2437 int r;
2414 dm_oblock_t block = get_bio_block(cache, bio); 2438 dm_oblock_t block = get_bio_block(cache, bio);
2415 size_t pb_data_size = get_per_bio_data_size(cache); 2439 size_t pb_data_size = get_per_bio_data_size(cache);
2416 bool can_migrate = false; 2440 bool can_migrate = false;
2417 bool discarded_block; 2441 bool discarded_block;
2418 struct dm_bio_prison_cell *cell;
2419 struct policy_result lookup_result; 2442 struct policy_result lookup_result;
2420 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); 2443 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
2421 2444
@@ -2437,15 +2460,15 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2437 /* 2460 /*
2438 * Check to see if that block is currently migrating. 2461 * Check to see if that block is currently migrating.
2439 */ 2462 */
2440 cell = alloc_prison_cell(cache); 2463 *cell = alloc_prison_cell(cache);
2441 if (!cell) { 2464 if (!*cell) {
2442 defer_bio(cache, bio); 2465 defer_bio(cache, bio);
2443 return DM_MAPIO_SUBMITTED; 2466 return DM_MAPIO_SUBMITTED;
2444 } 2467 }
2445 2468
2446 r = bio_detain(cache, block, bio, cell, 2469 r = bio_detain(cache, block, bio, *cell,
2447 (cell_free_fn) free_prison_cell, 2470 (cell_free_fn) free_prison_cell,
2448 cache, &cell); 2471 cache, cell);
2449 if (r) { 2472 if (r) {
2450 if (r < 0) 2473 if (r < 0)
2451 defer_bio(cache, bio); 2474 defer_bio(cache, bio);
@@ -2458,11 +2481,12 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2458 r = policy_map(cache->policy, block, false, can_migrate, discarded_block, 2481 r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
2459 bio, &lookup_result); 2482 bio, &lookup_result);
2460 if (r == -EWOULDBLOCK) { 2483 if (r == -EWOULDBLOCK) {
2461 cell_defer(cache, cell, true); 2484 cell_defer(cache, *cell, true);
2462 return DM_MAPIO_SUBMITTED; 2485 return DM_MAPIO_SUBMITTED;
2463 2486
2464 } else if (r) { 2487 } else if (r) {
2465 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r); 2488 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
2489 cell_defer(cache, *cell, false);
2466 bio_io_error(bio); 2490 bio_io_error(bio);
2467 return DM_MAPIO_SUBMITTED; 2491 return DM_MAPIO_SUBMITTED;
2468 } 2492 }
@@ -2476,52 +2500,44 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2476 * We need to invalidate this block, so 2500 * We need to invalidate this block, so
2477 * defer for the worker thread. 2501 * defer for the worker thread.
2478 */ 2502 */
2479 cell_defer(cache, cell, true); 2503 cell_defer(cache, *cell, true);
2480 r = DM_MAPIO_SUBMITTED; 2504 r = DM_MAPIO_SUBMITTED;
2481 2505
2482 } else { 2506 } else {
2483 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2484 inc_miss_counter(cache, bio); 2507 inc_miss_counter(cache, bio);
2485 remap_to_origin_clear_discard(cache, bio, block); 2508 remap_to_origin_clear_discard(cache, bio, block);
2486
2487 cell_defer(cache, cell, false);
2488 } 2509 }
2489 2510
2490 } else { 2511 } else {
2491 inc_hit_counter(cache, bio); 2512 inc_hit_counter(cache, bio);
2492 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2493
2494 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && 2513 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
2495 !is_dirty(cache, lookup_result.cblock)) 2514 !is_dirty(cache, lookup_result.cblock))
2496 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); 2515 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
2497 else 2516 else
2498 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); 2517 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
2499
2500 cell_defer(cache, cell, false);
2501 } 2518 }
2502 break; 2519 break;
2503 2520
2504 case POLICY_MISS: 2521 case POLICY_MISS:
2505 inc_miss_counter(cache, bio); 2522 inc_miss_counter(cache, bio);
2506 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2507
2508 if (pb->req_nr != 0) { 2523 if (pb->req_nr != 0) {
2509 /* 2524 /*
2510 * This is a duplicate writethrough io that is no 2525 * This is a duplicate writethrough io that is no
2511 * longer needed because the block has been demoted. 2526 * longer needed because the block has been demoted.
2512 */ 2527 */
2513 bio_endio(bio, 0); 2528 bio_endio(bio, 0);
2514 cell_defer(cache, cell, false); 2529 cell_defer(cache, *cell, false);
2515 return DM_MAPIO_SUBMITTED; 2530 r = DM_MAPIO_SUBMITTED;
2516 } else { 2531
2532 } else
2517 remap_to_origin_clear_discard(cache, bio, block); 2533 remap_to_origin_clear_discard(cache, bio, block);
2518 cell_defer(cache, cell, false); 2534
2519 }
2520 break; 2535 break;
2521 2536
2522 default: 2537 default:
2523 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__, 2538 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
2524 (unsigned) lookup_result.op); 2539 (unsigned) lookup_result.op);
2540 cell_defer(cache, *cell, false);
2525 bio_io_error(bio); 2541 bio_io_error(bio);
2526 r = DM_MAPIO_SUBMITTED; 2542 r = DM_MAPIO_SUBMITTED;
2527 } 2543 }
@@ -2529,6 +2545,21 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2529 return r; 2545 return r;
2530} 2546}
2531 2547
2548static int cache_map(struct dm_target *ti, struct bio *bio)
2549{
2550 int r;
2551 struct dm_bio_prison_cell *cell;
2552 struct cache *cache = ti->private;
2553
2554 r = __cache_map(cache, bio, &cell);
2555 if (r == DM_MAPIO_REMAPPED) {
2556 inc_ds(cache, bio, cell);
2557 cell_defer(cache, cell, false);
2558 }
2559
2560 return r;
2561}
2562
2532static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) 2563static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2533{ 2564{
2534 struct cache *cache = ti->private; 2565 struct cache *cache = ti->private;
@@ -3072,7 +3103,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3072 3103
3073static struct target_type cache_target = { 3104static struct target_type cache_target = {
3074 .name = "cache", 3105 .name = "cache",
3075 .version = {1, 4, 0}, 3106 .version = {1, 5, 0},
3076 .module = THIS_MODULE, 3107 .module = THIS_MODULE,
3077 .ctr = cache_ctr, 3108 .ctr = cache_ctr,
3078 .dtr = cache_dtr, 3109 .dtr = cache_dtr,