aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-04-05 13:21:16 -0400
committerJens Axboe <axboe@fb.com>2017-04-08 13:25:38 -0400
commitf09a06a193d942a12c1a33c153388b3962222006 (patch)
treee72cfe46da5b2cbe20a91e84c311347f081d087c
parent19372e2769179ddd154a0d6fbbdb719eb5d0af12 (diff)
brd: remove discard support
It's just a in-driver reimplementation of writing zeroes to the pages, which fails if the discards aren't page aligned. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/block/brd.c54
1 files changed, 0 insertions, 54 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 3adc32a3153b..4ec84d504780 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -134,28 +134,6 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
134 return page; 134 return page;
135} 135}
136 136
137static void brd_free_page(struct brd_device *brd, sector_t sector)
138{
139 struct page *page;
140 pgoff_t idx;
141
142 spin_lock(&brd->brd_lock);
143 idx = sector >> PAGE_SECTORS_SHIFT;
144 page = radix_tree_delete(&brd->brd_pages, idx);
145 spin_unlock(&brd->brd_lock);
146 if (page)
147 __free_page(page);
148}
149
150static void brd_zero_page(struct brd_device *brd, sector_t sector)
151{
152 struct page *page;
153
154 page = brd_lookup_page(brd, sector);
155 if (page)
156 clear_highpage(page);
157}
158
159/* 137/*
160 * Free all backing store pages and radix tree. This must only be called when 138 * Free all backing store pages and radix tree. This must only be called when
161 * there are no other users of the device. 139 * there are no other users of the device.
@@ -212,24 +190,6 @@ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
212 return 0; 190 return 0;
213} 191}
214 192
215static void discard_from_brd(struct brd_device *brd,
216 sector_t sector, size_t n)
217{
218 while (n >= PAGE_SIZE) {
219 /*
220 * Don't want to actually discard pages here because
221 * re-allocating the pages can result in writeback
222 * deadlocks under heavy load.
223 */
224 if (0)
225 brd_free_page(brd, sector);
226 else
227 brd_zero_page(brd, sector);
228 sector += PAGE_SIZE >> SECTOR_SHIFT;
229 n -= PAGE_SIZE;
230 }
231}
232
233/* 193/*
234 * Copy n bytes from src to the brd starting at sector. Does not sleep. 194 * Copy n bytes from src to the brd starting at sector. Does not sleep.
235 */ 195 */
@@ -338,14 +298,6 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
338 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) 298 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
339 goto io_error; 299 goto io_error;
340 300
341 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
342 if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
343 bio->bi_iter.bi_size & ~PAGE_MASK)
344 goto io_error;
345 discard_from_brd(brd, sector, bio->bi_iter.bi_size);
346 goto out;
347 }
348
349 bio_for_each_segment(bvec, bio, iter) { 301 bio_for_each_segment(bvec, bio, iter) {
350 unsigned int len = bvec.bv_len; 302 unsigned int len = bvec.bv_len;
351 int err; 303 int err;
@@ -357,7 +309,6 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
357 sector += len >> SECTOR_SHIFT; 309 sector += len >> SECTOR_SHIFT;
358 } 310 }
359 311
360out:
361 bio_endio(bio); 312 bio_endio(bio);
362 return BLK_QC_T_NONE; 313 return BLK_QC_T_NONE;
363io_error: 314io_error:
@@ -464,11 +415,6 @@ static struct brd_device *brd_alloc(int i)
464 * is harmless) 415 * is harmless)
465 */ 416 */
466 blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE); 417 blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
467
468 brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
469 blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
470 brd->brd_queue->limits.discard_zeroes_data = 1;
471 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
472#ifdef CONFIG_BLK_DEV_RAM_DAX 418#ifdef CONFIG_BLK_DEV_RAM_DAX
473 queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue); 419 queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
474#endif 420#endif