aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/block_dev.c63
-rw-r--r--fs/mpage.c12
-rw-r--r--include/linux/blkdev.h4
3 files changed, 79 insertions, 0 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 552a8d13bc32..83fba15cc394 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -363,6 +363,69 @@ int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
363} 363}
364EXPORT_SYMBOL(blkdev_fsync); 364EXPORT_SYMBOL(blkdev_fsync);
365 365
366/**
367 * bdev_read_page() - Start reading a page from a block device
368 * @bdev: The device to read the page from
369 * @sector: The offset on the device to read the page to (need not be aligned)
370 * @page: The page to read
371 *
372 * On entry, the page should be locked. It will be unlocked when the page
373 * has been read. If the block driver implements rw_page synchronously,
374 * that will be true on exit from this function, but it need not be.
375 *
376 * Errors returned by this function are usually "soft", eg out of memory, or
377 * queue full; callers should try a different route to read this page rather
378 * than propagate an error back up the stack.
379 *
380 * Return: negative errno if an error occurs, 0 if submission was successful.
381 */
382int bdev_read_page(struct block_device *bdev, sector_t sector,
383 struct page *page)
384{
385 const struct block_device_operations *ops = bdev->bd_disk->fops;
386 if (!ops->rw_page)
387 return -EOPNOTSUPP;
388 return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
389}
390EXPORT_SYMBOL_GPL(bdev_read_page);
391
392/**
393 * bdev_write_page() - Start writing a page to a block device
394 * @bdev: The device to write the page to
395 * @sector: The offset on the device to write the page to (need not be aligned)
396 * @page: The page to write
397 * @wbc: The writeback_control for the write
398 *
399 * On entry, the page should be locked and not currently under writeback.
400 * On exit, if the write started successfully, the page will be unlocked and
401 * under writeback. If the write failed already (eg the driver failed to
402 * queue the page to the device), the page will still be locked. If the
403 * caller is a ->writepage implementation, it will need to unlock the page.
404 *
405 * Errors returned by this function are usually "soft", eg out of memory, or
406 * queue full; callers should try a different route to write this page rather
407 * than propagate an error back up the stack.
408 *
409 * Return: negative errno if an error occurs, 0 if submission was successful.
410 */
411int bdev_write_page(struct block_device *bdev, sector_t sector,
412 struct page *page, struct writeback_control *wbc)
413{
414 int result;
415 int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE;
416 const struct block_device_operations *ops = bdev->bd_disk->fops;
417 if (!ops->rw_page)
418 return -EOPNOTSUPP;
419 set_page_writeback(page);
420 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw);
421 if (result)
422 end_page_writeback(page);
423 else
424 unlock_page(page);
425 return result;
426}
427EXPORT_SYMBOL_GPL(bdev_write_page);
428
366/* 429/*
367 * pseudo-fs 430 * pseudo-fs
368 */ 431 */
diff --git a/fs/mpage.c b/fs/mpage.c
index 10da0da73017..5f9ed622274f 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -269,6 +269,11 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
269 269
270alloc_new: 270alloc_new:
271 if (bio == NULL) { 271 if (bio == NULL) {
272 if (first_hole == blocks_per_page) {
273 if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
274 page))
275 goto out;
276 }
272 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), 277 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
273 min_t(int, nr_pages, bio_get_nr_vecs(bdev)), 278 min_t(int, nr_pages, bio_get_nr_vecs(bdev)),
274 GFP_KERNEL); 279 GFP_KERNEL);
@@ -587,6 +592,13 @@ page_is_mapped:
587 592
588alloc_new: 593alloc_new:
589 if (bio == NULL) { 594 if (bio == NULL) {
595 if (first_unmapped == blocks_per_page) {
596 if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
597 page, wbc)) {
598 clean_buffers(page, first_unmapped);
599 goto out;
600 }
601 }
590 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), 602 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
591 bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH); 603 bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH);
592 if (bio == NULL) 604 if (bio == NULL)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 45cf6e537c83..2f3886e6cc78 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1588,6 +1588,7 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g)
1588struct block_device_operations { 1588struct block_device_operations {
1589 int (*open) (struct block_device *, fmode_t); 1589 int (*open) (struct block_device *, fmode_t);
1590 void (*release) (struct gendisk *, fmode_t); 1590 void (*release) (struct gendisk *, fmode_t);
1591 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
1591 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1592 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1592 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1593 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1593 int (*direct_access) (struct block_device *, sector_t, 1594 int (*direct_access) (struct block_device *, sector_t,
@@ -1606,6 +1607,9 @@ struct block_device_operations {
1606 1607
1607extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1608extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1608 unsigned long); 1609 unsigned long);
1610extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1611extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1612 struct writeback_control *);
1609#else /* CONFIG_BLOCK */ 1613#else /* CONFIG_BLOCK */
1610 1614
1611struct block_device; 1615struct block_device;