diff options
author | Mike Snitzer <snitzer@redhat.com> | 2019-02-25 11:07:10 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2019-03-05 14:53:52 -0500 |
commit | de7180ff908b2bc0342e832dbdaa9a5f1ecaa33a (patch) | |
tree | db7c3b6e894a7d5f96ab155500ff5a8275cfad66 /drivers/md | |
parent | f87e033b3b923d91194348c11221e1bbc92e51b2 (diff) |
dm cache: add support for discard passdown to the origin device
DM cache now defaults to passing discards down to the origin device.
User may disable this using the "no_discard_passdown" feature when
creating the cache device.
If the cache's underlying origin device doesn't support discards then
passdown is disabled (with warning). Similarly, if the underlying
origin device's max_discard_sectors is less than a cache block discard
passdown will be disabled (this is required because sizing of the cache
internal discard bitset depends on it).
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-cache-target.c | 126 |
1 files changed, 100 insertions, 26 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index adc529f12b6b..d249cf8ac277 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -353,6 +353,7 @@ struct cache_features { | |||
353 | enum cache_metadata_mode mode; | 353 | enum cache_metadata_mode mode; |
354 | enum cache_io_mode io_mode; | 354 | enum cache_io_mode io_mode; |
355 | unsigned metadata_version; | 355 | unsigned metadata_version; |
356 | bool discard_passdown:1; | ||
356 | }; | 357 | }; |
357 | 358 | ||
358 | struct cache_stats { | 359 | struct cache_stats { |
@@ -1899,7 +1900,11 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio) | |||
1899 | b = to_dblock(from_dblock(b) + 1); | 1900 | b = to_dblock(from_dblock(b) + 1); |
1900 | } | 1901 | } |
1901 | 1902 | ||
1902 | bio_endio(bio); | 1903 | if (cache->features.discard_passdown) { |
1904 | remap_to_origin(cache, bio); | ||
1905 | generic_make_request(bio); | ||
1906 | } else | ||
1907 | bio_endio(bio); | ||
1903 | 1908 | ||
1904 | return false; | 1909 | return false; |
1905 | } | 1910 | } |
@@ -2233,13 +2238,14 @@ static void init_features(struct cache_features *cf) | |||
2233 | cf->mode = CM_WRITE; | 2238 | cf->mode = CM_WRITE; |
2234 | cf->io_mode = CM_IO_WRITEBACK; | 2239 | cf->io_mode = CM_IO_WRITEBACK; |
2235 | cf->metadata_version = 1; | 2240 | cf->metadata_version = 1; |
2241 | cf->discard_passdown = true; | ||
2236 | } | 2242 | } |
2237 | 2243 | ||
2238 | static int parse_features(struct cache_args *ca, struct dm_arg_set *as, | 2244 | static int parse_features(struct cache_args *ca, struct dm_arg_set *as, |
2239 | char **error) | 2245 | char **error) |
2240 | { | 2246 | { |
2241 | static const struct dm_arg _args[] = { | 2247 | static const struct dm_arg _args[] = { |
2242 | {0, 2, "Invalid number of cache feature arguments"}, | 2248 | {0, 3, "Invalid number of cache feature arguments"}, |
2243 | }; | 2249 | }; |
2244 | 2250 | ||
2245 | int r, mode_ctr = 0; | 2251 | int r, mode_ctr = 0; |
@@ -2274,6 +2280,9 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as, | |||
2274 | else if (!strcasecmp(arg, "metadata2")) | 2280 | else if (!strcasecmp(arg, "metadata2")) |
2275 | cf->metadata_version = 2; | 2281 | cf->metadata_version = 2; |
2276 | 2282 | ||
2283 | else if (!strcasecmp(arg, "no_discard_passdown")) | ||
2284 | cf->discard_passdown = false; | ||
2285 | |||
2277 | else { | 2286 | else { |
2278 | *error = "Unrecognised cache feature requested"; | 2287 | *error = "Unrecognised cache feature requested"; |
2279 | return -EINVAL; | 2288 | return -EINVAL; |
@@ -3119,6 +3128,39 @@ static void cache_resume(struct dm_target *ti) | |||
3119 | do_waker(&cache->waker.work); | 3128 | do_waker(&cache->waker.work); |
3120 | } | 3129 | } |
3121 | 3130 | ||
3131 | static void emit_flags(struct cache *cache, char *result, | ||
3132 | unsigned maxlen, ssize_t *sz_ptr) | ||
3133 | { | ||
3134 | ssize_t sz = *sz_ptr; | ||
3135 | struct cache_features *cf = &cache->features; | ||
3136 | unsigned count = (cf->metadata_version == 2) + !cf->discard_passdown + 1; | ||
3137 | |||
3138 | DMEMIT("%u ", count); | ||
3139 | |||
3140 | if (cf->metadata_version == 2) | ||
3141 | DMEMIT("metadata2 "); | ||
3142 | |||
3143 | if (writethrough_mode(cache)) | ||
3144 | DMEMIT("writethrough "); | ||
3145 | |||
3146 | else if (passthrough_mode(cache)) | ||
3147 | DMEMIT("passthrough "); | ||
3148 | |||
3149 | else if (writeback_mode(cache)) | ||
3150 | DMEMIT("writeback "); | ||
3151 | |||
3152 | else { | ||
3153 | DMEMIT("unknown "); | ||
3154 | DMERR("%s: internal error: unknown io mode: %d", | ||
3155 | cache_device_name(cache), (int) cf->io_mode); | ||
3156 | } | ||
3157 | |||
3158 | if (!cf->discard_passdown) | ||
3159 | DMEMIT("no_discard_passdown "); | ||
3160 | |||
3161 | *sz_ptr = sz; | ||
3162 | } | ||
3163 | |||
3122 | /* | 3164 | /* |
3123 | * Status format: | 3165 | * Status format: |
3124 | * | 3166 | * |
@@ -3185,25 +3227,7 @@ static void cache_status(struct dm_target *ti, status_type_t type, | |||
3185 | (unsigned) atomic_read(&cache->stats.promotion), | 3227 | (unsigned) atomic_read(&cache->stats.promotion), |
3186 | (unsigned long) atomic_read(&cache->nr_dirty)); | 3228 | (unsigned long) atomic_read(&cache->nr_dirty)); |
3187 | 3229 | ||
3188 | if (cache->features.metadata_version == 2) | 3230 | emit_flags(cache, result, maxlen, &sz); |
3189 | DMEMIT("2 metadata2 "); | ||
3190 | else | ||
3191 | DMEMIT("1 "); | ||
3192 | |||
3193 | if (writethrough_mode(cache)) | ||
3194 | DMEMIT("writethrough "); | ||
3195 | |||
3196 | else if (passthrough_mode(cache)) | ||
3197 | DMEMIT("passthrough "); | ||
3198 | |||
3199 | else if (writeback_mode(cache)) | ||
3200 | DMEMIT("writeback "); | ||
3201 | |||
3202 | else { | ||
3203 | DMERR("%s: internal error: unknown io mode: %d", | ||
3204 | cache_device_name(cache), (int) cache->features.io_mode); | ||
3205 | goto err; | ||
3206 | } | ||
3207 | 3231 | ||
3208 | DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); | 3232 | DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); |
3209 | 3233 | ||
@@ -3432,14 +3456,62 @@ static int cache_iterate_devices(struct dm_target *ti, | |||
3432 | return r; | 3456 | return r; |
3433 | } | 3457 | } |
3434 | 3458 | ||
3459 | static bool origin_dev_supports_discard(struct block_device *origin_bdev) | ||
3460 | { | ||
3461 | struct request_queue *q = bdev_get_queue(origin_bdev); | ||
3462 | |||
3463 | return q && blk_queue_discard(q); | ||
3464 | } | ||
3465 | |||
3466 | /* | ||
3467 | * If discard_passdown was enabled verify that the origin device | ||
3468 | * supports discards. Disable discard_passdown if not. | ||
3469 | */ | ||
3470 | static void disable_passdown_if_not_supported(struct cache *cache) | ||
3471 | { | ||
3472 | struct block_device *origin_bdev = cache->origin_dev->bdev; | ||
3473 | struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits; | ||
3474 | const char *reason = NULL; | ||
3475 | char buf[BDEVNAME_SIZE]; | ||
3476 | |||
3477 | if (!cache->features.discard_passdown) | ||
3478 | return; | ||
3479 | |||
3480 | if (!origin_dev_supports_discard(origin_bdev)) | ||
3481 | reason = "discard unsupported"; | ||
3482 | |||
3483 | else if (origin_limits->max_discard_sectors < cache->sectors_per_block) | ||
3484 | reason = "max discard sectors smaller than a block"; | ||
3485 | |||
3486 | if (reason) { | ||
3487 | DMWARN("Origin device (%s) %s: Disabling discard passdown.", | ||
3488 | bdevname(origin_bdev, buf), reason); | ||
3489 | cache->features.discard_passdown = false; | ||
3490 | } | ||
3491 | } | ||
3492 | |||
3435 | static void set_discard_limits(struct cache *cache, struct queue_limits *limits) | 3493 | static void set_discard_limits(struct cache *cache, struct queue_limits *limits) |
3436 | { | 3494 | { |
3495 | struct block_device *origin_bdev = cache->origin_dev->bdev; | ||
3496 | struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits; | ||
3497 | |||
3498 | if (!cache->features.discard_passdown) { | ||
3499 | /* No passdown is done so setting own virtual limits */ | ||
3500 | limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024, | ||
3501 | cache->origin_sectors); | ||
3502 | limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; | ||
3503 | return; | ||
3504 | } | ||
3505 | |||
3437 | /* | 3506 | /* |
3438 | * FIXME: these limits may be incompatible with the cache device | 3507 | * cache_iterate_devices() is stacking both origin and fast device limits |
3508 | * but discards aren't passed to fast device, so inherit origin's limits. | ||
3439 | */ | 3509 | */ |
3440 | limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024, | 3510 | limits->max_discard_sectors = origin_limits->max_discard_sectors; |
3441 | cache->origin_sectors); | 3511 | limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors; |
3442 | limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; | 3512 | limits->discard_granularity = origin_limits->discard_granularity; |
3513 | limits->discard_alignment = origin_limits->discard_alignment; | ||
3514 | limits->discard_misaligned = origin_limits->discard_misaligned; | ||
3443 | } | 3515 | } |
3444 | 3516 | ||
3445 | static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) | 3517 | static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) |
@@ -3456,6 +3528,8 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
3456 | blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT); | 3528 | blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT); |
3457 | blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); | 3529 | blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); |
3458 | } | 3530 | } |
3531 | |||
3532 | disable_passdown_if_not_supported(cache); | ||
3459 | set_discard_limits(cache, limits); | 3533 | set_discard_limits(cache, limits); |
3460 | } | 3534 | } |
3461 | 3535 | ||
@@ -3463,7 +3537,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
3463 | 3537 | ||
3464 | static struct target_type cache_target = { | 3538 | static struct target_type cache_target = { |
3465 | .name = "cache", | 3539 | .name = "cache", |
3466 | .version = {2, 0, 0}, | 3540 | .version = {2, 1, 0}, |
3467 | .module = THIS_MODULE, | 3541 | .module = THIS_MODULE, |
3468 | .ctr = cache_ctr, | 3542 | .ctr = cache_ctr, |
3469 | .dtr = cache_dtr, | 3543 | .dtr = cache_dtr, |