diff options
author | Bob Liu <bob.liu@oracle.com> | 2016-07-01 17:43:39 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2016-08-19 12:31:59 -0400 |
commit | 172335ada40ce26806e514c83a504b45c14a4139 (patch) | |
tree | 09172b74d288b3908c6cd3d41b65cc74aa35ae2f | |
parent | 6c647b0eb01cd7326dca093590f5e123e3c68b9c (diff) |
xen-blkfront: introduce blkif_set_queue_limits()
blk_mq_update_nr_hw_queues() reset all queue limits to default which it's
not as xen-blkfront expected, introducing blkif_set_queue_limits() to reset
limits with initial correct values.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
Acked-by: Roger Pau Monné <roger.pau@citrix.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r-- | drivers/block/xen-blkfront.c | 86 |
1 files changed, 48 insertions, 38 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 6a1756d72dcb..f84e220a26e6 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -189,6 +189,8 @@ struct blkfront_info | |||
189 | struct mutex mutex; | 189 | struct mutex mutex; |
190 | struct xenbus_device *xbdev; | 190 | struct xenbus_device *xbdev; |
191 | struct gendisk *gd; | 191 | struct gendisk *gd; |
192 | u16 sector_size; | ||
193 | unsigned int physical_sector_size; | ||
192 | int vdevice; | 194 | int vdevice; |
193 | blkif_vdev_t handle; | 195 | blkif_vdev_t handle; |
194 | enum blkif_state connected; | 196 | enum blkif_state connected; |
@@ -910,9 +912,45 @@ static struct blk_mq_ops blkfront_mq_ops = { | |||
910 | .map_queue = blk_mq_map_queue, | 912 | .map_queue = blk_mq_map_queue, |
911 | }; | 913 | }; |
912 | 914 | ||
915 | static void blkif_set_queue_limits(struct blkfront_info *info) | ||
916 | { | ||
917 | struct request_queue *rq = info->rq; | ||
918 | struct gendisk *gd = info->gd; | ||
919 | unsigned int segments = info->max_indirect_segments ? : | ||
920 | BLKIF_MAX_SEGMENTS_PER_REQUEST; | ||
921 | |||
922 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); | ||
923 | |||
924 | if (info->feature_discard) { | ||
925 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); | ||
926 | blk_queue_max_discard_sectors(rq, get_capacity(gd)); | ||
927 | rq->limits.discard_granularity = info->discard_granularity; | ||
928 | rq->limits.discard_alignment = info->discard_alignment; | ||
929 | if (info->feature_secdiscard) | ||
930 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); | ||
931 | } | ||
932 | |||
933 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ | ||
934 | blk_queue_logical_block_size(rq, info->sector_size); | ||
935 | blk_queue_physical_block_size(rq, info->physical_sector_size); | ||
936 | blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512); | ||
937 | |||
938 | /* Each segment in a request is up to an aligned page in size. */ | ||
939 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); | ||
940 | blk_queue_max_segment_size(rq, PAGE_SIZE); | ||
941 | |||
942 | /* Ensure a merged request will fit in a single I/O ring slot. */ | ||
943 | blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG); | ||
944 | |||
945 | /* Make sure buffer addresses are sector-aligned. */ | ||
946 | blk_queue_dma_alignment(rq, 511); | ||
947 | |||
948 | /* Make sure we don't use bounce buffers. */ | ||
949 | blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); | ||
950 | } | ||
951 | |||
913 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | 952 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, |
914 | unsigned int physical_sector_size, | 953 | unsigned int physical_sector_size) |
915 | unsigned int segments) | ||
916 | { | 954 | { |
917 | struct request_queue *rq; | 955 | struct request_queue *rq; |
918 | struct blkfront_info *info = gd->private_data; | 956 | struct blkfront_info *info = gd->private_data; |
@@ -944,36 +982,11 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | |||
944 | } | 982 | } |
945 | 983 | ||
946 | rq->queuedata = info; | 984 | rq->queuedata = info; |
947 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); | 985 | info->rq = gd->queue = rq; |
948 | 986 | info->gd = gd; | |
949 | if (info->feature_discard) { | 987 | info->sector_size = sector_size; |
950 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); | 988 | info->physical_sector_size = physical_sector_size; |
951 | blk_queue_max_discard_sectors(rq, get_capacity(gd)); | 989 | blkif_set_queue_limits(info); |
952 | rq->limits.discard_granularity = info->discard_granularity; | ||
953 | rq->limits.discard_alignment = info->discard_alignment; | ||
954 | if (info->feature_secdiscard) | ||
955 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); | ||
956 | } | ||
957 | |||
958 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ | ||
959 | blk_queue_logical_block_size(rq, sector_size); | ||
960 | blk_queue_physical_block_size(rq, physical_sector_size); | ||
961 | blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512); | ||
962 | |||
963 | /* Each segment in a request is up to an aligned page in size. */ | ||
964 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); | ||
965 | blk_queue_max_segment_size(rq, PAGE_SIZE); | ||
966 | |||
967 | /* Ensure a merged request will fit in a single I/O ring slot. */ | ||
968 | blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG); | ||
969 | |||
970 | /* Make sure buffer addresses are sector-aligned. */ | ||
971 | blk_queue_dma_alignment(rq, 511); | ||
972 | |||
973 | /* Make sure we don't use bounce buffers. */ | ||
974 | blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); | ||
975 | |||
976 | gd->queue = rq; | ||
977 | 990 | ||
978 | return 0; | 991 | return 0; |
979 | } | 992 | } |
@@ -1136,16 +1149,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, | |||
1136 | gd->private_data = info; | 1149 | gd->private_data = info; |
1137 | set_capacity(gd, capacity); | 1150 | set_capacity(gd, capacity); |
1138 | 1151 | ||
1139 | if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size, | 1152 | if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) { |
1140 | info->max_indirect_segments ? : | ||
1141 | BLKIF_MAX_SEGMENTS_PER_REQUEST)) { | ||
1142 | del_gendisk(gd); | 1153 | del_gendisk(gd); |
1143 | goto release; | 1154 | goto release; |
1144 | } | 1155 | } |
1145 | 1156 | ||
1146 | info->rq = gd->queue; | ||
1147 | info->gd = gd; | ||
1148 | |||
1149 | xlvbd_flush(info); | 1157 | xlvbd_flush(info); |
1150 | 1158 | ||
1151 | if (vdisk_info & VDISK_READONLY) | 1159 | if (vdisk_info & VDISK_READONLY) |
@@ -2007,6 +2015,8 @@ static int blkif_recover(struct blkfront_info *info) | |||
2007 | struct split_bio *split_bio; | 2015 | struct split_bio *split_bio; |
2008 | 2016 | ||
2009 | blkfront_gather_backend_features(info); | 2017 | blkfront_gather_backend_features(info); |
2018 | /* Reset limits changed by blk_mq_update_nr_hw_queues(). */ | ||
2019 | blkif_set_queue_limits(info); | ||
2010 | segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; | 2020 | segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; |
2011 | blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG); | 2021 | blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG); |
2012 | 2022 | ||