aboutsummaryrefslogtreecommitdiffstats
path: root/fs/block_dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/block_dev.c')
-rw-r--r--fs/block_dev.c63
1 files changed, 63 insertions, 0 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index e68e150b1b16..6d7274619bf9 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -364,6 +364,69 @@ int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
364} 364}
365EXPORT_SYMBOL(blkdev_fsync); 365EXPORT_SYMBOL(blkdev_fsync);
366 366
367/**
368 * bdev_read_page() - Start reading a page from a block device
369 * @bdev: The device to read the page from
370 * @sector: The offset on the device to read the page to (need not be aligned)
371 * @page: The page to read
372 *
373 * On entry, the page should be locked. It will be unlocked when the page
374 * has been read. If the block driver implements rw_page synchronously,
375 * that will be true on exit from this function, but it need not be.
376 *
377 * Errors returned by this function are usually "soft", eg out of memory, or
378 * queue full; callers should try a different route to read this page rather
379 * than propagate an error back up the stack.
380 *
381 * Return: negative errno if an error occurs, 0 if submission was successful.
382 */
383int bdev_read_page(struct block_device *bdev, sector_t sector,
384 struct page *page)
385{
386 const struct block_device_operations *ops = bdev->bd_disk->fops;
387 if (!ops->rw_page)
388 return -EOPNOTSUPP;
389 return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
390}
391EXPORT_SYMBOL_GPL(bdev_read_page);
392
393/**
394 * bdev_write_page() - Start writing a page to a block device
395 * @bdev: The device to write the page to
396 * @sector: The offset on the device to write the page to (need not be aligned)
397 * @page: The page to write
398 * @wbc: The writeback_control for the write
399 *
400 * On entry, the page should be locked and not currently under writeback.
401 * On exit, if the write started successfully, the page will be unlocked and
402 * under writeback. If the write failed already (eg the driver failed to
403 * queue the page to the device), the page will still be locked. If the
404 * caller is a ->writepage implementation, it will need to unlock the page.
405 *
406 * Errors returned by this function are usually "soft", eg out of memory, or
407 * queue full; callers should try a different route to write this page rather
408 * than propagate an error back up the stack.
409 *
410 * Return: negative errno if an error occurs, 0 if submission was successful.
411 */
412int bdev_write_page(struct block_device *bdev, sector_t sector,
413 struct page *page, struct writeback_control *wbc)
414{
415 int result;
416 int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE;
417 const struct block_device_operations *ops = bdev->bd_disk->fops;
418 if (!ops->rw_page)
419 return -EOPNOTSUPP;
420 set_page_writeback(page);
421 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw);
422 if (result)
423 end_page_writeback(page);
424 else
425 unlock_page(page);
426 return result;
427}
428EXPORT_SYMBOL_GPL(bdev_write_page);
429
367/* 430/*
368 * pseudo-fs 431 * pseudo-fs
369 */ 432 */