diff options
| author | Ilya Dryomov <idryomov@gmail.com> | 2019-03-15 09:50:04 -0400 |
|---|---|---|
| committer | Ilya Dryomov <idryomov@gmail.com> | 2019-03-18 15:30:01 -0400 |
| commit | 16d80c54ad42c573a897ae7bcf5a9816be54e6fe (patch) | |
| tree | c2ff54b1c6e1f6867fa25a6bc201dffe77c82a8e | |
| parent | 9e98c678c2d6ae3a17cb2de55d17f69dddaa231b (diff) | |
rbd: set io_min, io_opt and discard_granularity to alloc_size
Now that we have alloc_size that controls our discard behavior, it
doesn't make sense to have these set to object (set) size. alloc_size
defaults to 64k, but because discard_granularity is likely 4M, only
ranges that are equal to or bigger than 4M can be considered during
fstrim. A smaller io_min is also more likely to be met, resulting in
fewer deferred writes on bluestore OSDs.
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Reviewed-by: Jason Dillaman <dillaman@redhat.com>
| -rw-r--r-- | drivers/block/rbd.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4ba967d65cf9..3b2c9289dccb 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
| @@ -833,7 +833,7 @@ static int parse_rbd_opts_token(char *c, void *private) | |||
| 833 | pctx->opts->queue_depth = intval; | 833 | pctx->opts->queue_depth = intval; |
| 834 | break; | 834 | break; |
| 835 | case Opt_alloc_size: | 835 | case Opt_alloc_size: |
| 836 | if (intval < 1) { | 836 | if (intval < SECTOR_SIZE) { |
| 837 | pr_err("alloc_size out of range\n"); | 837 | pr_err("alloc_size out of range\n"); |
| 838 | return -EINVAL; | 838 | return -EINVAL; |
| 839 | } | 839 | } |
| @@ -4203,12 +4203,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) | |||
| 4203 | q->limits.max_sectors = queue_max_hw_sectors(q); | 4203 | q->limits.max_sectors = queue_max_hw_sectors(q); |
| 4204 | blk_queue_max_segments(q, USHRT_MAX); | 4204 | blk_queue_max_segments(q, USHRT_MAX); |
| 4205 | blk_queue_max_segment_size(q, UINT_MAX); | 4205 | blk_queue_max_segment_size(q, UINT_MAX); |
| 4206 | blk_queue_io_min(q, objset_bytes); | 4206 | blk_queue_io_min(q, rbd_dev->opts->alloc_size); |
| 4207 | blk_queue_io_opt(q, objset_bytes); | 4207 | blk_queue_io_opt(q, rbd_dev->opts->alloc_size); |
| 4208 | 4208 | ||
| 4209 | if (rbd_dev->opts->trim) { | 4209 | if (rbd_dev->opts->trim) { |
| 4210 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); | 4210 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); |
| 4211 | q->limits.discard_granularity = objset_bytes; | 4211 | q->limits.discard_granularity = rbd_dev->opts->alloc_size; |
| 4212 | blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); | 4212 | blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); |
| 4213 | blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); | 4213 | blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); |
| 4214 | } | 4214 | } |
