aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-mq-debugfs.c1
-rw-r--r--block/blk-zoned.c164
-rw-r--r--drivers/block/null_blk.h11
-rw-r--r--drivers/block/null_blk_main.c23
-rw-r--r--drivers/block/null_blk_zoned.c57
-rw-r--r--drivers/md/dm-flakey.c30
-rw-r--r--drivers/md/dm-linear.c35
-rw-r--r--drivers/md/dm.c169
-rw-r--r--drivers/scsi/sd.c13
-rw-r--r--drivers/scsi/sd.h11
-rw-r--r--drivers/scsi/sd_zbc.c153
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/linux/device-mapper.h12
-rw-r--r--include/trace/events/f2fs.h1
16 files changed, 266 insertions, 425 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3ed60723e242..bc6ea87d10e0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2300,7 +2300,6 @@ generic_make_request_checks(struct bio *bio)
2300 if (!q->limits.max_write_same_sectors) 2300 if (!q->limits.max_write_same_sectors)
2301 goto not_supported; 2301 goto not_supported;
2302 break; 2302 break;
2303 case REQ_OP_ZONE_REPORT:
2304 case REQ_OP_ZONE_RESET: 2303 case REQ_OP_ZONE_RESET:
2305 if (!blk_queue_is_zoned(q)) 2304 if (!blk_queue_is_zoned(q))
2306 goto not_supported; 2305 goto not_supported;
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 41b86f50d126..10b284a1f18d 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -283,7 +283,6 @@ static const char *const op_name[] = {
283 REQ_OP_NAME(WRITE), 283 REQ_OP_NAME(WRITE),
284 REQ_OP_NAME(FLUSH), 284 REQ_OP_NAME(FLUSH),
285 REQ_OP_NAME(DISCARD), 285 REQ_OP_NAME(DISCARD),
286 REQ_OP_NAME(ZONE_REPORT),
287 REQ_OP_NAME(SECURE_ERASE), 286 REQ_OP_NAME(SECURE_ERASE),
288 REQ_OP_NAME(ZONE_RESET), 287 REQ_OP_NAME(ZONE_RESET),
289 REQ_OP_NAME(WRITE_SAME), 288 REQ_OP_NAME(WRITE_SAME),
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 5d967fd39fbd..90cf503091d5 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -93,13 +93,10 @@ unsigned int blkdev_nr_zones(struct block_device *bdev)
93EXPORT_SYMBOL_GPL(blkdev_nr_zones); 93EXPORT_SYMBOL_GPL(blkdev_nr_zones);
94 94
95/* 95/*
96 * Check that a zone report belongs to the partition. 96 * Check that a zone report belongs to this partition, and if yes, fix its start
97 * If yes, fix its start sector and write pointer, copy it in the 97 * sector and write pointer and return true. Return false otherwise.
98 * zone information array and return true. Return false otherwise.
99 */ 98 */
100static bool blkdev_report_zone(struct block_device *bdev, 99static bool blkdev_report_zone(struct block_device *bdev, struct blk_zone *rep)
101 struct blk_zone *rep,
102 struct blk_zone *zone)
103{ 100{
104 sector_t offset = get_start_sect(bdev); 101 sector_t offset = get_start_sect(bdev);
105 102
@@ -114,11 +111,36 @@ static bool blkdev_report_zone(struct block_device *bdev,
114 rep->wp = rep->start + rep->len; 111 rep->wp = rep->start + rep->len;
115 else 112 else
116 rep->wp -= offset; 113 rep->wp -= offset;
117 memcpy(zone, rep, sizeof(struct blk_zone));
118
119 return true; 114 return true;
120} 115}
121 116
117static int blk_report_zones(struct gendisk *disk, sector_t sector,
118 struct blk_zone *zones, unsigned int *nr_zones,
119 gfp_t gfp_mask)
120{
121 struct request_queue *q = disk->queue;
122 unsigned int z = 0, n, nrz = *nr_zones;
123 sector_t capacity = get_capacity(disk);
124 int ret;
125
126 while (z < nrz && sector < capacity) {
127 n = nrz - z;
128 ret = disk->fops->report_zones(disk, sector, &zones[z], &n,
129 gfp_mask);
130 if (ret)
131 return ret;
132 if (!n)
133 break;
134 sector += blk_queue_zone_sectors(q) * n;
135 z += n;
136 }
137
138 WARN_ON(z > *nr_zones);
139 *nr_zones = z;
140
141 return 0;
142}
143
122/** 144/**
123 * blkdev_report_zones - Get zones information 145 * blkdev_report_zones - Get zones information
124 * @bdev: Target block device 146 * @bdev: Target block device
@@ -133,130 +155,46 @@ static bool blkdev_report_zone(struct block_device *bdev,
133 * requested by @nr_zones. The number of zones actually reported is 155 * requested by @nr_zones. The number of zones actually reported is
134 * returned in @nr_zones. 156 * returned in @nr_zones.
135 */ 157 */
136int blkdev_report_zones(struct block_device *bdev, 158int blkdev_report_zones(struct block_device *bdev, sector_t sector,
137 sector_t sector, 159 struct blk_zone *zones, unsigned int *nr_zones,
138 struct blk_zone *zones,
139 unsigned int *nr_zones,
140 gfp_t gfp_mask) 160 gfp_t gfp_mask)
141{ 161{
142 struct request_queue *q = bdev_get_queue(bdev); 162 struct request_queue *q = bdev_get_queue(bdev);
143 struct blk_zone_report_hdr *hdr; 163 unsigned int i, nrz;
144 unsigned int nrz = *nr_zones;
145 struct page *page;
146 unsigned int nr_rep;
147 size_t rep_bytes;
148 unsigned int nr_pages;
149 struct bio *bio;
150 struct bio_vec *bv;
151 unsigned int i, n, nz;
152 unsigned int ofst;
153 void *addr;
154 int ret; 164 int ret;
155 165
156 if (!q)
157 return -ENXIO;
158
159 if (!blk_queue_is_zoned(q)) 166 if (!blk_queue_is_zoned(q))
160 return -EOPNOTSUPP; 167 return -EOPNOTSUPP;
161 168
162 if (!nrz)
163 return 0;
164
165 if (sector > bdev->bd_part->nr_sects) {
166 *nr_zones = 0;
167 return 0;
168 }
169
170 /* 169 /*
171 * The zone report has a header. So make room for it in the 170 * A block device that advertized itself as zoned must have a
172 * payload. Also make sure that the report fits in a single BIO 171 * report_zones method. If it does not have one defined, the device
173 * that will not be split down the stack. 172 * driver has a bug. So warn about that.
174 */ 173 */
175 rep_bytes = sizeof(struct blk_zone_report_hdr) + 174 if (WARN_ON_ONCE(!bdev->bd_disk->fops->report_zones))
176 sizeof(struct blk_zone) * nrz; 175 return -EOPNOTSUPP;
177 rep_bytes = (rep_bytes + PAGE_SIZE - 1) & PAGE_MASK;
178 if (rep_bytes > (queue_max_sectors(q) << 9))
179 rep_bytes = queue_max_sectors(q) << 9;
180
181 nr_pages = min_t(unsigned int, BIO_MAX_PAGES,
182 rep_bytes >> PAGE_SHIFT);
183 nr_pages = min_t(unsigned int, nr_pages,
184 queue_max_segments(q));
185
186 bio = bio_alloc(gfp_mask, nr_pages);
187 if (!bio)
188 return -ENOMEM;
189 176
190 bio_set_dev(bio, bdev); 177 if (!*nr_zones || sector >= bdev->bd_part->nr_sects) {
191 bio->bi_iter.bi_sector = blk_zone_start(q, sector); 178 *nr_zones = 0;
192 bio_set_op_attrs(bio, REQ_OP_ZONE_REPORT, 0); 179 return 0;
193
194 for (i = 0; i < nr_pages; i++) {
195 page = alloc_page(gfp_mask);
196 if (!page) {
197 ret = -ENOMEM;
198 goto out;
199 }
200 if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
201 __free_page(page);
202 break;
203 }
204 } 180 }
205 181
206 if (i == 0) 182 nrz = min(*nr_zones,
207 ret = -ENOMEM; 183 __blkdev_nr_zones(q, bdev->bd_part->nr_sects - sector));
208 else 184 ret = blk_report_zones(bdev->bd_disk, get_start_sect(bdev) + sector,
209 ret = submit_bio_wait(bio); 185 zones, &nrz, gfp_mask);
210 if (ret) 186 if (ret)
211 goto out; 187 return ret;
212
213 /*
214 * Process the report result: skip the header and go through the
215 * reported zones to fixup and fixup the zone information for
216 * partitions. At the same time, return the zone information into
217 * the zone array.
218 */
219 n = 0;
220 nz = 0;
221 nr_rep = 0;
222 bio_for_each_segment_all(bv, bio, i) {
223 188
224 if (!bv->bv_page) 189 for (i = 0; i < nrz; i++) {
190 if (!blkdev_report_zone(bdev, zones))
225 break; 191 break;
226 192 zones++;
227 addr = kmap_atomic(bv->bv_page);
228
229 /* Get header in the first page */
230 ofst = 0;
231 if (!nr_rep) {
232 hdr = addr;
233 nr_rep = hdr->nr_zones;
234 ofst = sizeof(struct blk_zone_report_hdr);
235 }
236
237 /* Fixup and report zones */
238 while (ofst < bv->bv_len &&
239 n < nr_rep && nz < nrz) {
240 if (blkdev_report_zone(bdev, addr + ofst, &zones[nz]))
241 nz++;
242 ofst += sizeof(struct blk_zone);
243 n++;
244 }
245
246 kunmap_atomic(addr);
247
248 if (n >= nr_rep || nz >= nrz)
249 break;
250
251 } 193 }
252 194
253 *nr_zones = nz; 195 *nr_zones = i;
254out:
255 bio_for_each_segment_all(bv, bio, i)
256 __free_page(bv->bv_page);
257 bio_put(bio);
258 196
259 return ret; 197 return 0;
260} 198}
261EXPORT_SYMBOL_GPL(blkdev_report_zones); 199EXPORT_SYMBOL_GPL(blkdev_report_zones);
262 200
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index 34e0030f0592..7685df43f1ef 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -87,7 +87,9 @@ struct nullb {
87#ifdef CONFIG_BLK_DEV_ZONED 87#ifdef CONFIG_BLK_DEV_ZONED
88int null_zone_init(struct nullb_device *dev); 88int null_zone_init(struct nullb_device *dev);
89void null_zone_exit(struct nullb_device *dev); 89void null_zone_exit(struct nullb_device *dev);
90blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio); 90int null_zone_report(struct gendisk *disk, sector_t sector,
91 struct blk_zone *zones, unsigned int *nr_zones,
92 gfp_t gfp_mask);
91void null_zone_write(struct nullb_cmd *cmd, sector_t sector, 93void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
92 unsigned int nr_sectors); 94 unsigned int nr_sectors);
93void null_zone_reset(struct nullb_cmd *cmd, sector_t sector); 95void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
@@ -97,10 +99,11 @@ static inline int null_zone_init(struct nullb_device *dev)
97 return -EINVAL; 99 return -EINVAL;
98} 100}
99static inline void null_zone_exit(struct nullb_device *dev) {} 101static inline void null_zone_exit(struct nullb_device *dev) {}
100static inline blk_status_t null_zone_report(struct nullb *nullb, 102static inline int null_zone_report(struct gendisk *disk, sector_t sector,
101 struct bio *bio) 103 struct blk_zone *zones,
104 unsigned int *nr_zones, gfp_t gfp_mask)
102{ 105{
103 return BLK_STS_NOTSUPP; 106 return -EOPNOTSUPP;
104} 107}
105static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector, 108static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
106 unsigned int nr_sectors) 109 unsigned int nr_sectors)
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index e94591021682..5ba426dbf377 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1129,34 +1129,12 @@ static void null_restart_queue_async(struct nullb *nullb)
1129 blk_mq_start_stopped_hw_queues(q, true); 1129 blk_mq_start_stopped_hw_queues(q, true);
1130} 1130}
1131 1131
1132static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
1133{
1134 struct nullb_device *dev = cmd->nq->dev;
1135
1136 if (dev->queue_mode == NULL_Q_BIO) {
1137 if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
1138 cmd->error = null_zone_report(nullb, cmd->bio);
1139 return true;
1140 }
1141 } else {
1142 if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
1143 cmd->error = null_zone_report(nullb, cmd->rq->bio);
1144 return true;
1145 }
1146 }
1147
1148 return false;
1149}
1150
1151static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) 1132static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1152{ 1133{
1153 struct nullb_device *dev = cmd->nq->dev; 1134 struct nullb_device *dev = cmd->nq->dev;
1154 struct nullb *nullb = dev->nullb; 1135 struct nullb *nullb = dev->nullb;
1155 int err = 0; 1136 int err = 0;
1156 1137
1157 if (cmd_report_zone(nullb, cmd))
1158 goto out;
1159
1160 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { 1138 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1161 struct request *rq = cmd->rq; 1139 struct request *rq = cmd->rq;
1162 1140
@@ -1443,6 +1421,7 @@ static const struct block_device_operations null_fops = {
1443 .owner = THIS_MODULE, 1421 .owner = THIS_MODULE,
1444 .open = null_open, 1422 .open = null_open,
1445 .release = null_release, 1423 .release = null_release,
1424 .report_zones = null_zone_report,
1446}; 1425};
1447 1426
1448static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) 1427static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 7c6b86d98700..c0b0e4a3fa8f 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -48,54 +48,27 @@ void null_zone_exit(struct nullb_device *dev)
48 kvfree(dev->zones); 48 kvfree(dev->zones);
49} 49}
50 50
51static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio, 51int null_zone_report(struct gendisk *disk, sector_t sector,
52 unsigned int zno, unsigned int nr_zones) 52 struct blk_zone *zones, unsigned int *nr_zones,
53 gfp_t gfp_mask)
53{ 54{
54 struct blk_zone_report_hdr *hdr = NULL; 55 struct nullb *nullb = disk->private_data;
55 struct bio_vec bvec; 56 struct nullb_device *dev = nullb->dev;
56 struct bvec_iter iter; 57 unsigned int zno, nrz = 0;
57 void *addr;
58 unsigned int zones_to_cpy;
59
60 bio_for_each_segment(bvec, bio, iter) {
61 addr = kmap_atomic(bvec.bv_page);
62
63 zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
64
65 if (!hdr) {
66 hdr = (struct blk_zone_report_hdr *)addr;
67 hdr->nr_zones = nr_zones;
68 zones_to_cpy--;
69 addr += sizeof(struct blk_zone_report_hdr);
70 }
71
72 zones_to_cpy = min_t(unsigned int, zones_to_cpy, nr_zones);
73
74 memcpy(addr, &dev->zones[zno],
75 zones_to_cpy * sizeof(struct blk_zone));
76
77 kunmap_atomic(addr);
78 58
79 nr_zones -= zones_to_cpy; 59 if (!dev->zoned)
80 zno += zones_to_cpy; 60 /* Not a zoned null device */
61 return -EOPNOTSUPP;
81 62
82 if (!nr_zones) 63 zno = null_zone_no(dev, sector);
83 break; 64 if (zno < dev->nr_zones) {
65 nrz = min_t(unsigned int, *nr_zones, dev->nr_zones - zno);
66 memcpy(zones, &dev->zones[zno], nrz * sizeof(struct blk_zone));
84 } 67 }
85}
86 68
87blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio) 69 *nr_zones = nrz;
88{
89 struct nullb_device *dev = nullb->dev;
90 unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
91 unsigned int nr_zones = dev->nr_zones - zno;
92 unsigned int max_zones;
93 70
94 max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1; 71 return 0;
95 nr_zones = min_t(unsigned int, nr_zones, max_zones);
96 null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
97
98 return BLK_STS_OK;
99} 72}
100 73
101void null_zone_write(struct nullb_cmd *cmd, sector_t sector, 74void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 32aabe27b37c..3cb97fa4c11d 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -315,10 +315,6 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
315 if (bio_op(bio) == REQ_OP_ZONE_RESET) 315 if (bio_op(bio) == REQ_OP_ZONE_RESET)
316 goto map_bio; 316 goto map_bio;
317 317
318 /* We need to remap reported zones, so remember the BIO iter */
319 if (bio_op(bio) == REQ_OP_ZONE_REPORT)
320 goto map_bio;
321
322 /* Are we alive ? */ 318 /* Are we alive ? */
323 elapsed = (jiffies - fc->start_time) / HZ; 319 elapsed = (jiffies - fc->start_time) / HZ;
324 if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) { 320 if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
@@ -380,11 +376,6 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
380 if (bio_op(bio) == REQ_OP_ZONE_RESET) 376 if (bio_op(bio) == REQ_OP_ZONE_RESET)
381 return DM_ENDIO_DONE; 377 return DM_ENDIO_DONE;
382 378
383 if (bio_op(bio) == REQ_OP_ZONE_REPORT) {
384 dm_remap_zone_report(ti, bio, fc->start);
385 return DM_ENDIO_DONE;
386 }
387
388 if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { 379 if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
389 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && 380 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
390 all_corrupt_bio_flags_match(bio, fc)) { 381 all_corrupt_bio_flags_match(bio, fc)) {
@@ -457,6 +448,26 @@ static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
457 return 0; 448 return 0;
458} 449}
459 450
451#ifdef CONFIG_BLK_DEV_ZONED
452static int flakey_report_zones(struct dm_target *ti, sector_t sector,
453 struct blk_zone *zones, unsigned int *nr_zones,
454 gfp_t gfp_mask)
455{
456 struct flakey_c *fc = ti->private;
457 int ret;
458
459 /* Do report and remap it */
460 ret = blkdev_report_zones(fc->dev->bdev, flakey_map_sector(ti, sector),
461 zones, nr_zones, gfp_mask);
462 if (ret != 0)
463 return ret;
464
465 if (*nr_zones)
466 dm_remap_zone_report(ti, fc->start, zones, nr_zones);
467 return 0;
468}
469#endif
470
460static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) 471static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
461{ 472{
462 struct flakey_c *fc = ti->private; 473 struct flakey_c *fc = ti->private;
@@ -469,6 +480,7 @@ static struct target_type flakey_target = {
469 .version = {1, 5, 0}, 480 .version = {1, 5, 0},
470#ifdef CONFIG_BLK_DEV_ZONED 481#ifdef CONFIG_BLK_DEV_ZONED
471 .features = DM_TARGET_ZONED_HM, 482 .features = DM_TARGET_ZONED_HM,
483 .report_zones = flakey_report_zones,
472#endif 484#endif
473 .module = THIS_MODULE, 485 .module = THIS_MODULE,
474 .ctr = flakey_ctr, 486 .ctr = flakey_ctr,
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 2f7c44a006c4..8d7ddee6ac4d 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -102,19 +102,6 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
102 return DM_MAPIO_REMAPPED; 102 return DM_MAPIO_REMAPPED;
103} 103}
104 104
105#ifdef CONFIG_BLK_DEV_ZONED
106static int linear_end_io(struct dm_target *ti, struct bio *bio,
107 blk_status_t *error)
108{
109 struct linear_c *lc = ti->private;
110
111 if (!*error && bio_op(bio) == REQ_OP_ZONE_REPORT)
112 dm_remap_zone_report(ti, bio, lc->start);
113
114 return DM_ENDIO_DONE;
115}
116#endif
117
118static void linear_status(struct dm_target *ti, status_type_t type, 105static void linear_status(struct dm_target *ti, status_type_t type,
119 unsigned status_flags, char *result, unsigned maxlen) 106 unsigned status_flags, char *result, unsigned maxlen)
120{ 107{
@@ -148,6 +135,26 @@ static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
148 return 0; 135 return 0;
149} 136}
150 137
138#ifdef CONFIG_BLK_DEV_ZONED
139static int linear_report_zones(struct dm_target *ti, sector_t sector,
140 struct blk_zone *zones, unsigned int *nr_zones,
141 gfp_t gfp_mask)
142{
143 struct linear_c *lc = (struct linear_c *) ti->private;
144 int ret;
145
146 /* Do report and remap it */
147 ret = blkdev_report_zones(lc->dev->bdev, linear_map_sector(ti, sector),
148 zones, nr_zones, gfp_mask);
149 if (ret != 0)
150 return ret;
151
152 if (*nr_zones)
153 dm_remap_zone_report(ti, lc->start, zones, nr_zones);
154 return 0;
155}
156#endif
157
151static int linear_iterate_devices(struct dm_target *ti, 158static int linear_iterate_devices(struct dm_target *ti,
152 iterate_devices_callout_fn fn, void *data) 159 iterate_devices_callout_fn fn, void *data)
153{ 160{
@@ -211,8 +218,8 @@ static struct target_type linear_target = {
211 .name = "linear", 218 .name = "linear",
212 .version = {1, 4, 0}, 219 .version = {1, 4, 0},
213#ifdef CONFIG_BLK_DEV_ZONED 220#ifdef CONFIG_BLK_DEV_ZONED
214 .end_io = linear_end_io,
215 .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM, 221 .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
222 .report_zones = linear_report_zones,
216#else 223#else
217 .features = DM_TARGET_PASSES_INTEGRITY, 224 .features = DM_TARGET_PASSES_INTEGRITY,
218#endif 225#endif
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 45abb54037fc..6be21dc210a1 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -458,6 +458,57 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
458 return dm_get_geometry(md, geo); 458 return dm_get_geometry(md, geo);
459} 459}
460 460
461static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
462 struct blk_zone *zones, unsigned int *nr_zones,
463 gfp_t gfp_mask)
464{
465#ifdef CONFIG_BLK_DEV_ZONED
466 struct mapped_device *md = disk->private_data;
467 struct dm_target *tgt;
468 struct dm_table *map;
469 int srcu_idx, ret;
470
471 if (dm_suspended_md(md))
472 return -EAGAIN;
473
474 map = dm_get_live_table(md, &srcu_idx);
475 if (!map)
476 return -EIO;
477
478 tgt = dm_table_find_target(map, sector);
479 if (!dm_target_is_valid(tgt)) {
480 ret = -EIO;
481 goto out;
482 }
483
484 /*
485 * If we are executing this, we already know that the block device
486 * is a zoned device and so each target should have support for that
487 * type of drive. A missing report_zones method means that the target
488 * driver has a problem.
489 */
490 if (WARN_ON(!tgt->type->report_zones)) {
491 ret = -EIO;
492 goto out;
493 }
494
495 /*
496 * blkdev_report_zones() will loop and call this again to cover all the
497 * zones of the target, eventually moving on to the next target.
498 * So there is no need to loop here trying to fill the entire array
499 * of zones.
500 */
501 ret = tgt->type->report_zones(tgt, sector, zones,
502 nr_zones, gfp_mask);
503
504out:
505 dm_put_live_table(md, srcu_idx);
506 return ret;
507#else
508 return -ENOTSUPP;
509#endif
510}
511
461static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 512static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
462 struct block_device **bdev) 513 struct block_device **bdev)
463 __acquires(md->io_barrier) 514 __acquires(md->io_barrier)
@@ -1155,93 +1206,49 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1155EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1206EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1156 1207
1157/* 1208/*
1158 * The zone descriptors obtained with a zone report indicate zone positions 1209 * The zone descriptors obtained with a zone report indicate
1159 * within the target backing device, regardless of that device is a partition 1210 * zone positions within the underlying device of the target. The zone
1160 * and regardless of the target mapping start sector on the device or partition. 1211 * descriptors must be remapped to match their position within the dm device.
1161 * The zone descriptors start sector and write pointer position must be adjusted 1212 * The caller target should obtain the zones information using
1162 * to match their relative position within the dm device. 1213 * blkdev_report_zones() to ensure that remapping for partition offset is
1163 * A target may call dm_remap_zone_report() after completion of a 1214 * already handled.
1164 * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
1165 * backing device.
1166 */ 1215 */
1167void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) 1216void dm_remap_zone_report(struct dm_target *ti, sector_t start,
1217 struct blk_zone *zones, unsigned int *nr_zones)
1168{ 1218{
1169#ifdef CONFIG_BLK_DEV_ZONED 1219#ifdef CONFIG_BLK_DEV_ZONED
1170 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1171 struct bio *report_bio = tio->io->orig_bio;
1172 struct blk_zone_report_hdr *hdr = NULL;
1173 struct blk_zone *zone; 1220 struct blk_zone *zone;
1174 unsigned int nr_rep = 0; 1221 unsigned int nrz = *nr_zones;
1175 unsigned int ofst; 1222 int i;
1176 sector_t part_offset;
1177 struct bio_vec bvec;
1178 struct bvec_iter iter;
1179 void *addr;
1180
1181 if (bio->bi_status)
1182 return;
1183
1184 /*
1185 * bio sector was incremented by the request size on completion. Taking
1186 * into account the original request sector, the target start offset on
1187 * the backing device and the target mapping offset (ti->begin), the
1188 * start sector of the backing device. The partition offset is always 0
1189 * if the target uses a whole device.
1190 */
1191 part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
1192 1223
1193 /* 1224 /*
1194 * Remap the start sector of the reported zones. For sequential zones, 1225 * Remap the start sector and write pointer position of the zones in
1195 * also remap the write pointer position. 1226 * the array. Since we may have obtained from the target underlying
1227 * device more zones that the target size, also adjust the number
1228 * of zones.
1196 */ 1229 */
1197 bio_for_each_segment(bvec, report_bio, iter) { 1230 for (i = 0; i < nrz; i++) {
1198 addr = kmap_atomic(bvec.bv_page); 1231 zone = zones + i;
1199 1232 if (zone->start >= start + ti->len) {
1200 /* Remember the report header in the first page */ 1233 memset(zone, 0, sizeof(struct blk_zone) * (nrz - i));
1201 if (!hdr) { 1234 break;
1202 hdr = addr;
1203 ofst = sizeof(struct blk_zone_report_hdr);
1204 } else
1205 ofst = 0;
1206
1207 /* Set zones start sector */
1208 while (hdr->nr_zones && ofst < bvec.bv_len) {
1209 zone = addr + ofst;
1210 zone->start -= part_offset;
1211 if (zone->start >= start + ti->len) {
1212 hdr->nr_zones = 0;
1213 break;
1214 }
1215 zone->start = zone->start + ti->begin - start;
1216 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
1217 if (zone->cond == BLK_ZONE_COND_FULL)
1218 zone->wp = zone->start + zone->len;
1219 else if (zone->cond == BLK_ZONE_COND_EMPTY)
1220 zone->wp = zone->start;
1221 else
1222 zone->wp = zone->wp + ti->begin - start - part_offset;
1223 }
1224 ofst += sizeof(struct blk_zone);
1225 hdr->nr_zones--;
1226 nr_rep++;
1227 } 1235 }
1228 1236
1229 if (addr != hdr) 1237 zone->start = zone->start + ti->begin - start;
1230 kunmap_atomic(addr); 1238 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
1239 continue;
1231 1240
1232 if (!hdr->nr_zones) 1241 if (zone->cond == BLK_ZONE_COND_FULL)
1233 break; 1242 zone->wp = zone->start + zone->len;
1234 } 1243 else if (zone->cond == BLK_ZONE_COND_EMPTY)
1235 1244 zone->wp = zone->start;
1236 if (hdr) { 1245 else
1237 hdr->nr_zones = nr_rep; 1246 zone->wp = zone->wp + ti->begin - start;
1238 kunmap_atomic(hdr);
1239 } 1247 }
1240 1248
1241 bio_advance(report_bio, report_bio->bi_iter.bi_size); 1249 *nr_zones = i;
1242
1243#else /* !CONFIG_BLK_DEV_ZONED */ 1250#else /* !CONFIG_BLK_DEV_ZONED */
1244 bio->bi_status = BLK_STS_NOTSUPP; 1251 *nr_zones = 0;
1245#endif 1252#endif
1246} 1253}
1247EXPORT_SYMBOL_GPL(dm_remap_zone_report); 1254EXPORT_SYMBOL_GPL(dm_remap_zone_report);
@@ -1327,8 +1334,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1327 return r; 1334 return r;
1328 } 1335 }
1329 1336
1330 if (bio_op(bio) != REQ_OP_ZONE_REPORT) 1337 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1331 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1332 clone->bi_iter.bi_size = to_bytes(len); 1338 clone->bi_iter.bi_size = to_bytes(len);
1333 1339
1334 if (unlikely(bio_integrity(bio) != NULL)) 1340 if (unlikely(bio_integrity(bio) != NULL))
@@ -1541,7 +1547,6 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
1541 */ 1547 */
1542static int __split_and_process_non_flush(struct clone_info *ci) 1548static int __split_and_process_non_flush(struct clone_info *ci)
1543{ 1549{
1544 struct bio *bio = ci->bio;
1545 struct dm_target *ti; 1550 struct dm_target *ti;
1546 unsigned len; 1551 unsigned len;
1547 int r; 1552 int r;
@@ -1553,11 +1558,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
1553 if (unlikely(__process_abnormal_io(ci, ti, &r))) 1558 if (unlikely(__process_abnormal_io(ci, ti, &r)))
1554 return r; 1559 return r;
1555 1560
1556 if (bio_op(bio) == REQ_OP_ZONE_REPORT) 1561 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1557 len = ci->sector_count;
1558 else
1559 len = min_t(sector_t, max_io_len(ci->sector, ti),
1560 ci->sector_count);
1561 1562
1562 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1563 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1563 if (r < 0) 1564 if (r < 0)
@@ -1616,9 +1617,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1616 * We take a clone of the original to store in 1617 * We take a clone of the original to store in
1617 * ci.io->orig_bio to be used by end_io_acct() and 1618 * ci.io->orig_bio to be used by end_io_acct() and
1618 * for dec_pending to use for completion handling. 1619 * for dec_pending to use for completion handling.
1619 * As this path is not used for REQ_OP_ZONE_REPORT,
1620 * the usage of io->orig_bio in dm_remap_zone_report()
1621 * won't be affected by this reassignment.
1622 */ 1620 */
1623 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1621 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
1624 GFP_NOIO, &md->queue->bio_split); 1622 GFP_NOIO, &md->queue->bio_split);
@@ -3167,6 +3165,7 @@ static const struct block_device_operations dm_blk_dops = {
3167 .release = dm_blk_close, 3165 .release = dm_blk_close,
3168 .ioctl = dm_blk_ioctl, 3166 .ioctl = dm_blk_ioctl,
3169 .getgeo = dm_blk_getgeo, 3167 .getgeo = dm_blk_getgeo,
3168 .report_zones = dm_blk_report_zones,
3170 .pr_ops = &dm_pr_ops, 3169 .pr_ops = &dm_pr_ops,
3171 .owner = THIS_MODULE 3170 .owner = THIS_MODULE
3172}; 3171};
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b762d0fd773c..42c0f299021d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1272,8 +1272,6 @@ static int sd_init_command(struct scsi_cmnd *cmd)
1272 case REQ_OP_READ: 1272 case REQ_OP_READ:
1273 case REQ_OP_WRITE: 1273 case REQ_OP_WRITE:
1274 return sd_setup_read_write_cmnd(cmd); 1274 return sd_setup_read_write_cmnd(cmd);
1275 case REQ_OP_ZONE_REPORT:
1276 return sd_zbc_setup_report_cmnd(cmd);
1277 case REQ_OP_ZONE_RESET: 1275 case REQ_OP_ZONE_RESET:
1278 return sd_zbc_setup_reset_cmnd(cmd); 1276 return sd_zbc_setup_reset_cmnd(cmd);
1279 default: 1277 default:
@@ -1802,6 +1800,7 @@ static const struct block_device_operations sd_fops = {
1802 .check_events = sd_check_events, 1800 .check_events = sd_check_events,
1803 .revalidate_disk = sd_revalidate_disk, 1801 .revalidate_disk = sd_revalidate_disk,
1804 .unlock_native_capacity = sd_unlock_native_capacity, 1802 .unlock_native_capacity = sd_unlock_native_capacity,
1803 .report_zones = sd_zbc_report_zones,
1805 .pr_ops = &sd_pr_ops, 1804 .pr_ops = &sd_pr_ops,
1806}; 1805};
1807 1806
@@ -1953,16 +1952,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1953 scsi_set_resid(SCpnt, blk_rq_bytes(req)); 1952 scsi_set_resid(SCpnt, blk_rq_bytes(req));
1954 } 1953 }
1955 break; 1954 break;
1956 case REQ_OP_ZONE_REPORT:
1957 if (!result) {
1958 good_bytes = scsi_bufflen(SCpnt)
1959 - scsi_get_resid(SCpnt);
1960 scsi_set_resid(SCpnt, 0);
1961 } else {
1962 good_bytes = 0;
1963 scsi_set_resid(SCpnt, blk_rq_bytes(req));
1964 }
1965 break;
1966 default: 1955 default:
1967 /* 1956 /*
1968 * In case of bogus fw or device, we could end up having 1957 * In case of bogus fw or device, we could end up having
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index a7d4f50b67d4..f72f20fd0d8b 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -273,10 +273,12 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
273extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer); 273extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
274extern void sd_zbc_remove(struct scsi_disk *sdkp); 274extern void sd_zbc_remove(struct scsi_disk *sdkp);
275extern void sd_zbc_print_zones(struct scsi_disk *sdkp); 275extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
276extern int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd);
277extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd); 276extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd);
278extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, 277extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
279 struct scsi_sense_hdr *sshdr); 278 struct scsi_sense_hdr *sshdr);
279extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
280 struct blk_zone *zones, unsigned int *nr_zones,
281 gfp_t gfp_mask);
280 282
281#else /* CONFIG_BLK_DEV_ZONED */ 283#else /* CONFIG_BLK_DEV_ZONED */
282 284
@@ -290,11 +292,6 @@ static inline void sd_zbc_remove(struct scsi_disk *sdkp) {}
290 292
291static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {} 293static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
292 294
293static inline int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
294{
295 return BLKPREP_INVALID;
296}
297
298static inline int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd) 295static inline int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
299{ 296{
300 return BLKPREP_INVALID; 297 return BLKPREP_INVALID;
@@ -304,6 +301,8 @@ static inline void sd_zbc_complete(struct scsi_cmnd *cmd,
304 unsigned int good_bytes, 301 unsigned int good_bytes,
305 struct scsi_sense_hdr *sshdr) {} 302 struct scsi_sense_hdr *sshdr) {}
306 303
304#define sd_zbc_report_zones NULL
305
307#endif /* CONFIG_BLK_DEV_ZONED */ 306#endif /* CONFIG_BLK_DEV_ZONED */
308 307
309#endif /* _SCSI_DISK_H */ 308#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 0678e1e108b0..0f2cfc81fce3 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -62,7 +62,7 @@ static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
62} 62}
63 63
64/** 64/**
65 * sd_zbc_report_zones - Issue a REPORT ZONES scsi command. 65 * sd_zbc_do_report_zones - Issue a REPORT ZONES scsi command.
66 * @sdkp: The target disk 66 * @sdkp: The target disk
67 * @buf: Buffer to use for the reply 67 * @buf: Buffer to use for the reply
68 * @buflen: the buffer size 68 * @buflen: the buffer size
@@ -75,9 +75,9 @@ static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
75 * zones and will only report the count of zones fitting in the command reply 75 * zones and will only report the count of zones fitting in the command reply
76 * buffer. 76 * buffer.
77 */ 77 */
78static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf, 78static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
79 unsigned int buflen, sector_t lba, 79 unsigned int buflen, sector_t lba,
80 bool partial) 80 bool partial)
81{ 81{
82 struct scsi_device *sdp = sdkp->device; 82 struct scsi_device *sdp = sdkp->device;
83 const int timeout = sdp->request_queue->rq_timeout; 83 const int timeout = sdp->request_queue->rq_timeout;
@@ -118,108 +118,56 @@ static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
118} 118}
119 119
120/** 120/**
121 * sd_zbc_setup_report_cmnd - Prepare a REPORT ZONES scsi command 121 * sd_zbc_report_zones - Disk report zones operation.
122 * @cmd: The command to setup 122 * @disk: The target disk
123 * @sector: Start 512B sector of the report
124 * @zones: Array of zone descriptors
125 * @nr_zones: Number of descriptors in the array
126 * @gfp_mask: Memory allocation mask
123 * 127 *
124 * Call in sd_init_command() for a REQ_OP_ZONE_REPORT request. 128 * Execute a report zones command on the target disk.
125 */ 129 */
126int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd) 130int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
131 struct blk_zone *zones, unsigned int *nr_zones,
132 gfp_t gfp_mask)
127{ 133{
128 struct request *rq = cmd->request; 134 struct scsi_disk *sdkp = scsi_disk(disk);
129 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 135 unsigned int i, buflen, nrz = *nr_zones;
130 sector_t lba, sector = blk_rq_pos(rq); 136 unsigned char *buf;
131 unsigned int nr_bytes = blk_rq_bytes(rq); 137 size_t offset = 0;
132 int ret; 138 int ret = 0;
133
134 WARN_ON(nr_bytes == 0);
135 139
136 if (!sd_is_zoned(sdkp)) 140 if (!sd_is_zoned(sdkp))
137 /* Not a zoned device */ 141 /* Not a zoned device */
138 return BLKPREP_KILL; 142 return -EOPNOTSUPP;
139
140 ret = scsi_init_io(cmd);
141 if (ret != BLKPREP_OK)
142 return ret;
143
144 cmd->cmd_len = 16;
145 memset(cmd->cmnd, 0, cmd->cmd_len);
146 cmd->cmnd[0] = ZBC_IN;
147 cmd->cmnd[1] = ZI_REPORT_ZONES;
148 lba = sectors_to_logical(sdkp->device, sector);
149 put_unaligned_be64(lba, &cmd->cmnd[2]);
150 put_unaligned_be32(nr_bytes, &cmd->cmnd[10]);
151 /* Do partial report for speeding things up */
152 cmd->cmnd[14] = ZBC_REPORT_ZONE_PARTIAL;
153
154 cmd->sc_data_direction = DMA_FROM_DEVICE;
155 cmd->sdb.length = nr_bytes;
156 cmd->transfersize = sdkp->device->sector_size;
157 cmd->allowed = 0;
158 143
159 return BLKPREP_OK; 144 /*
160} 145 * Get a reply buffer for the number of requested zones plus a header.
161 146 * For ATA, buffers must be aligned to 512B.
162/** 147 */
163 * sd_zbc_report_zones_complete - Process a REPORT ZONES scsi command reply. 148 buflen = roundup((nrz + 1) * 64, 512);
164 * @scmd: The completed report zones command 149 buf = kmalloc(buflen, gfp_mask);
165 * @good_bytes: reply size in bytes 150 if (!buf)
166 * 151 return -ENOMEM;
167 * Convert all reported zone descriptors to struct blk_zone. The conversion
168 * is done in-place, directly in the request specified sg buffer.
169 */
170static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
171 unsigned int good_bytes)
172{
173 struct request *rq = scmd->request;
174 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
175 struct sg_mapping_iter miter;
176 struct blk_zone_report_hdr hdr;
177 struct blk_zone zone;
178 unsigned int offset, bytes = 0;
179 unsigned long flags;
180 u8 *buf;
181
182 if (good_bytes < 64)
183 return;
184
185 memset(&hdr, 0, sizeof(struct blk_zone_report_hdr));
186
187 sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
188 SG_MITER_TO_SG | SG_MITER_ATOMIC);
189 152
190 local_irq_save(flags); 153 ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
191 while (sg_miter_next(&miter) && bytes < good_bytes) { 154 sectors_to_logical(sdkp->device, sector), true);
155 if (ret)
156 goto out_free_buf;
192 157
193 buf = miter.addr; 158 nrz = min(nrz, get_unaligned_be32(&buf[0]) / 64);
194 offset = 0; 159 for (i = 0; i < nrz; i++) {
160 offset += 64;
161 sd_zbc_parse_report(sdkp, buf + offset, zones);
162 zones++;
163 }
195 164
196 if (bytes == 0) { 165 *nr_zones = nrz;
197 /* Set the report header */
198 hdr.nr_zones = min_t(unsigned int,
199 (good_bytes - 64) / 64,
200 get_unaligned_be32(&buf[0]) / 64);
201 memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr));
202 offset += 64;
203 bytes += 64;
204 }
205 166
206 /* Parse zone descriptors */ 167out_free_buf:
207 while (offset < miter.length && hdr.nr_zones) { 168 kfree(buf);
208 WARN_ON(offset > miter.length);
209 buf = miter.addr + offset;
210 sd_zbc_parse_report(sdkp, buf, &zone);
211 memcpy(buf, &zone, sizeof(struct blk_zone));
212 offset += 64;
213 bytes += 64;
214 hdr.nr_zones--;
215 }
216
217 if (!hdr.nr_zones)
218 break;
219 169
220 } 170 return ret;
221 sg_miter_stop(&miter);
222 local_irq_restore(flags);
223} 171}
224 172
225/** 173/**
@@ -302,13 +250,6 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
302 case REQ_OP_WRITE_ZEROES: 250 case REQ_OP_WRITE_ZEROES:
303 case REQ_OP_WRITE_SAME: 251 case REQ_OP_WRITE_SAME:
304 break; 252 break;
305
306 case REQ_OP_ZONE_REPORT:
307
308 if (!result)
309 sd_zbc_report_zones_complete(cmd, good_bytes);
310 break;
311
312 } 253 }
313} 254}
314 255
@@ -390,7 +331,7 @@ static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
390 return -ENOMEM; 331 return -ENOMEM;
391 332
392 /* Do a report zone to get max_lba and the same field */ 333 /* Do a report zone to get max_lba and the same field */
393 ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0, false); 334 ret = sd_zbc_do_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0, false);
394 if (ret) 335 if (ret)
395 goto out_free; 336 goto out_free;
396 337
@@ -447,8 +388,8 @@ static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
447 } 388 }
448 389
449 if (block < sdkp->capacity) { 390 if (block < sdkp->capacity) {
450 ret = sd_zbc_report_zones(sdkp, buf, 391 ret = sd_zbc_do_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
451 SD_ZBC_BUF_SIZE, block, true); 392 block, true);
452 if (ret) 393 if (ret)
453 goto out_free; 394 goto out_free;
454 } 395 }
@@ -565,8 +506,8 @@ sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift,
565 goto out; 506 goto out;
566 507
567 while (lba < sdkp->capacity) { 508 while (lba < sdkp->capacity) {
568 ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 509 ret = sd_zbc_do_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, lba,
569 lba, true); 510 true);
570 if (ret) 511 if (ret)
571 goto out; 512 goto out;
572 lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 513 lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 9578c7ab1eb6..093a818c5b68 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -283,8 +283,6 @@ enum req_opf {
283 REQ_OP_FLUSH = 2, 283 REQ_OP_FLUSH = 2,
284 /* discard sectors */ 284 /* discard sectors */
285 REQ_OP_DISCARD = 3, 285 REQ_OP_DISCARD = 3,
286 /* get zone information */
287 REQ_OP_ZONE_REPORT = 4,
288 /* securely erase sectors */ 286 /* securely erase sectors */
289 REQ_OP_SECURE_ERASE = 5, 287 REQ_OP_SECURE_ERASE = 5,
290 /* seset a zone write pointer */ 288 /* seset a zone write pointer */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 6bb845f9601a..51fe6472ce02 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -396,11 +396,6 @@ struct queue_limits {
396 396
397#ifdef CONFIG_BLK_DEV_ZONED 397#ifdef CONFIG_BLK_DEV_ZONED
398 398
399struct blk_zone_report_hdr {
400 unsigned int nr_zones;
401 u8 padding[60];
402};
403
404extern unsigned int blkdev_nr_zones(struct block_device *bdev); 399extern unsigned int blkdev_nr_zones(struct block_device *bdev);
405extern int blkdev_report_zones(struct block_device *bdev, 400extern int blkdev_report_zones(struct block_device *bdev,
406 sector_t sector, struct blk_zone *zones, 401 sector_t sector, struct blk_zone *zones,
@@ -1867,6 +1862,9 @@ struct block_device_operations {
1867 int (*getgeo)(struct block_device *, struct hd_geometry *); 1862 int (*getgeo)(struct block_device *, struct hd_geometry *);
1868 /* this callback is with swap_lock and sometimes page table lock held */ 1863 /* this callback is with swap_lock and sometimes page table lock held */
1869 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1864 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1865 int (*report_zones)(struct gendisk *, sector_t sector,
1866 struct blk_zone *zones, unsigned int *nr_zones,
1867 gfp_t gfp_mask);
1870 struct module *owner; 1868 struct module *owner;
1871 const struct pr_ops *pr_ops; 1869 const struct pr_ops *pr_ops;
1872}; 1870};
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 6fb0808e87c8..a23b396a8edc 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -92,6 +92,11 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
92 92
93typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); 93typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
94 94
95typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector,
96 struct blk_zone *zones,
97 unsigned int *nr_zones,
98 gfp_t gfp_mask);
99
95/* 100/*
96 * These iteration functions are typically used to check (and combine) 101 * These iteration functions are typically used to check (and combine)
97 * properties of underlying devices. 102 * properties of underlying devices.
@@ -180,6 +185,9 @@ struct target_type {
180 dm_status_fn status; 185 dm_status_fn status;
181 dm_message_fn message; 186 dm_message_fn message;
182 dm_prepare_ioctl_fn prepare_ioctl; 187 dm_prepare_ioctl_fn prepare_ioctl;
188#ifdef CONFIG_BLK_DEV_ZONED
189 dm_report_zones_fn report_zones;
190#endif
183 dm_busy_fn busy; 191 dm_busy_fn busy;
184 dm_iterate_devices_fn iterate_devices; 192 dm_iterate_devices_fn iterate_devices;
185 dm_io_hints_fn io_hints; 193 dm_io_hints_fn io_hints;
@@ -420,8 +428,8 @@ struct gendisk *dm_disk(struct mapped_device *md);
420int dm_suspended(struct dm_target *ti); 428int dm_suspended(struct dm_target *ti);
421int dm_noflush_suspending(struct dm_target *ti); 429int dm_noflush_suspending(struct dm_target *ti);
422void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); 430void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
423void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, 431void dm_remap_zone_report(struct dm_target *ti, sector_t start,
424 sector_t start); 432 struct blk_zone *zones, unsigned int *nr_zones);
425union map_info *dm_get_rq_mapinfo(struct request *rq); 433union map_info *dm_get_rq_mapinfo(struct request *rq);
426 434
427struct queue_limits *dm_get_queue_limits(struct mapped_device *md); 435struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 795698925d20..3ec73f17ee2a 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -82,7 +82,6 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
82 { REQ_OP_WRITE, "WRITE" }, \ 82 { REQ_OP_WRITE, "WRITE" }, \
83 { REQ_OP_FLUSH, "FLUSH" }, \ 83 { REQ_OP_FLUSH, "FLUSH" }, \
84 { REQ_OP_DISCARD, "DISCARD" }, \ 84 { REQ_OP_DISCARD, "DISCARD" }, \
85 { REQ_OP_ZONE_REPORT, "ZONE_REPORT" }, \
86 { REQ_OP_SECURE_ERASE, "SECURE_ERASE" }, \ 85 { REQ_OP_SECURE_ERASE, "SECURE_ERASE" }, \
87 { REQ_OP_ZONE_RESET, "ZONE_RESET" }, \ 86 { REQ_OP_ZONE_RESET, "ZONE_RESET" }, \
88 { REQ_OP_WRITE_SAME, "WRITE_SAME" }, \ 87 { REQ_OP_WRITE_SAME, "WRITE_SAME" }, \