aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/brd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/brd.c')
-rw-r--r--drivers/block/brd.c53
1 files changed, 52 insertions, 1 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 6081e81d5738..f1bf79d9bc0a 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -133,6 +133,28 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
133 return page; 133 return page;
134} 134}
135 135
136static void brd_free_page(struct brd_device *brd, sector_t sector)
137{
138 struct page *page;
139 pgoff_t idx;
140
141 spin_lock(&brd->brd_lock);
142 idx = sector >> PAGE_SECTORS_SHIFT;
143 page = radix_tree_delete(&brd->brd_pages, idx);
144 spin_unlock(&brd->brd_lock);
145 if (page)
146 __free_page(page);
147}
148
149static void brd_zero_page(struct brd_device *brd, sector_t sector)
150{
151 struct page *page;
152
153 page = brd_lookup_page(brd, sector);
154 if (page)
155 clear_highpage(page);
156}
157
136/* 158/*
137 * Free all backing store pages and radix tree. This must only be called when 159 * Free all backing store pages and radix tree. This must only be called when
138 * there are no other users of the device. 160 * there are no other users of the device.
@@ -189,6 +211,24 @@ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
189 return 0; 211 return 0;
190} 212}
191 213
214static void discard_from_brd(struct brd_device *brd,
215 sector_t sector, size_t n)
216{
217 while (n >= PAGE_SIZE) {
218 /*
219 * Don't want to actually discard pages here because
220 * re-allocating the pages can result in writeback
221 * deadlocks under heavy load.
222 */
223 if (0)
224 brd_free_page(brd, sector);
225 else
226 brd_zero_page(brd, sector);
227 sector += PAGE_SIZE >> SECTOR_SHIFT;
228 n -= PAGE_SIZE;
229 }
230}
231
192/* 232/*
193 * Copy n bytes from src to the brd starting at sector. Does not sleep. 233 * Copy n bytes from src to the brd starting at sector. Does not sleep.
194 */ 234 */
@@ -300,6 +340,12 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
300 get_capacity(bdev->bd_disk)) 340 get_capacity(bdev->bd_disk))
301 goto out; 341 goto out;
302 342
343 if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
344 err = 0;
345 discard_from_brd(brd, sector, bio->bi_size);
346 goto out;
347 }
348
303 rw = bio_rw(bio); 349 rw = bio_rw(bio);
304 if (rw == READA) 350 if (rw == READA)
305 rw = READ; 351 rw = READ;
@@ -320,7 +366,7 @@ out:
320} 366}
321 367
322#ifdef CONFIG_BLK_DEV_XIP 368#ifdef CONFIG_BLK_DEV_XIP
323static int brd_direct_access (struct block_device *bdev, sector_t sector, 369static int brd_direct_access(struct block_device *bdev, sector_t sector,
324 void **kaddr, unsigned long *pfn) 370 void **kaddr, unsigned long *pfn)
325{ 371{
326 struct brd_device *brd = bdev->bd_disk->private_data; 372 struct brd_device *brd = bdev->bd_disk->private_data;
@@ -437,6 +483,11 @@ static struct brd_device *brd_alloc(int i)
437 blk_queue_max_hw_sectors(brd->brd_queue, 1024); 483 blk_queue_max_hw_sectors(brd->brd_queue, 1024);
438 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); 484 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
439 485
486 brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
487 brd->brd_queue->limits.max_discard_sectors = UINT_MAX;
488 brd->brd_queue->limits.discard_zeroes_data = 1;
489 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
490
440 disk = brd->brd_disk = alloc_disk(1 << part_shift); 491 disk = brd->brd_disk = alloc_disk(1 << part_shift);
441 if (!disk) 492 if (!disk)
442 goto out_free_queue; 493 goto out_free_queue;