aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorToshi Kani <toshi.kani@hpe.com>2016-06-22 19:54:53 -0400
committerMike Snitzer <snitzer@redhat.com>2016-07-20 23:49:49 -0400
commit545ed20e6df68a4d2584a29a2a28ee8b2f7e9547 (patch)
tree2d826caf0e6cfdf27d6a21401dea468cfb03e0d1 /drivers/md/dm.c
parente9ccb945c4d937189668f9e3cf00949da4727762 (diff)
dm: add infrastructure for DAX support
Change mapped device to implement direct_access function, dm_blk_direct_access(), which calls a target direct_access function. 'struct target_type' is extended to have target direct_access interface. This function limits direct accessible size to the dm_target's limit with max_io_len(). Add dm_table_supports_dax() to iterate all targets and associated block devices to check for DAX support. To add DAX support to a DM target the target must only implement the direct_access function. Add a new dm type, DM_TYPE_DAX_BIO_BASED, which indicates that mapped device supports DAX and is bio based. This new type is used to assure that all target devices have DAX support and remain that way after QUEUE_FLAG_DAX is set in mapped device. At initial table load, QUEUE_FLAG_DAX is set to mapped device when setting DM_TYPE_DAX_BIO_BASED to the type. Any subsequent table load to the mapped device must have the same type, or else it fails per the check in table_load(). Signed-off-by: Toshi Kani <toshi.kani@hpe.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c38
1 files changed, 36 insertions, 2 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 7538b8972820..4dca5a792e4b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -905,6 +905,33 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
905} 905}
906EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 906EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
907 907
908static long dm_blk_direct_access(struct block_device *bdev, sector_t sector,
909 void __pmem **kaddr, pfn_t *pfn, long size)
910{
911 struct mapped_device *md = bdev->bd_disk->private_data;
912 struct dm_table *map;
913 struct dm_target *ti;
914 int srcu_idx;
915 long len, ret = -EIO;
916
917 map = dm_get_live_table(md, &srcu_idx);
918 if (!map)
919 goto out;
920
921 ti = dm_table_find_target(map, sector);
922 if (!dm_target_is_valid(ti))
923 goto out;
924
925 len = max_io_len(sector, ti) << SECTOR_SHIFT;
926 size = min(len, size);
927
928 if (ti->type->direct_access)
929 ret = ti->type->direct_access(ti, sector, kaddr, pfn, size);
930out:
931 dm_put_live_table(md, srcu_idx);
932 return min(ret, size);
933}
934
908/* 935/*
909 * A target may call dm_accept_partial_bio only from the map routine. It is 936 * A target may call dm_accept_partial_bio only from the map routine. It is
910 * allowed for all bio types except REQ_PREFLUSH. 937 * allowed for all bio types except REQ_PREFLUSH.
@@ -1548,7 +1575,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1548 1575
1549 if (md->bs) { 1576 if (md->bs) {
1550 /* The md already has necessary mempools. */ 1577 /* The md already has necessary mempools. */
1551 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { 1578 if (dm_table_bio_based(t)) {
1552 /* 1579 /*
1553 * Reload bioset because front_pad may have changed 1580 * Reload bioset because front_pad may have changed
1554 * because a different table was loaded. 1581 * because a different table was loaded.
@@ -1744,8 +1771,9 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits);
1744int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 1771int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
1745{ 1772{
1746 int r; 1773 int r;
1774 unsigned type = dm_get_md_type(md);
1747 1775
1748 switch (dm_get_md_type(md)) { 1776 switch (type) {
1749 case DM_TYPE_REQUEST_BASED: 1777 case DM_TYPE_REQUEST_BASED:
1750 r = dm_old_init_request_queue(md); 1778 r = dm_old_init_request_queue(md);
1751 if (r) { 1779 if (r) {
@@ -1761,6 +1789,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
1761 } 1789 }
1762 break; 1790 break;
1763 case DM_TYPE_BIO_BASED: 1791 case DM_TYPE_BIO_BASED:
1792 case DM_TYPE_DAX_BIO_BASED:
1764 dm_init_normal_md_queue(md); 1793 dm_init_normal_md_queue(md);
1765 blk_queue_make_request(md->queue, dm_make_request); 1794 blk_queue_make_request(md->queue, dm_make_request);
1766 /* 1795 /*
@@ -1769,6 +1798,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
1769 */ 1798 */
1770 bioset_free(md->queue->bio_split); 1799 bioset_free(md->queue->bio_split);
1771 md->queue->bio_split = NULL; 1800 md->queue->bio_split = NULL;
1801
1802 if (type == DM_TYPE_DAX_BIO_BASED)
1803 queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
1772 break; 1804 break;
1773 } 1805 }
1774 1806
@@ -2465,6 +2497,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
2465 2497
2466 switch (type) { 2498 switch (type) {
2467 case DM_TYPE_BIO_BASED: 2499 case DM_TYPE_BIO_BASED:
2500 case DM_TYPE_DAX_BIO_BASED:
2468 cachep = _io_cache; 2501 cachep = _io_cache;
2469 pool_size = dm_get_reserved_bio_based_ios(); 2502 pool_size = dm_get_reserved_bio_based_ios();
2470 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2503 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
@@ -2691,6 +2724,7 @@ static const struct block_device_operations dm_blk_dops = {
2691 .open = dm_blk_open, 2724 .open = dm_blk_open,
2692 .release = dm_blk_close, 2725 .release = dm_blk_close,
2693 .ioctl = dm_blk_ioctl, 2726 .ioctl = dm_blk_ioctl,
2727 .direct_access = dm_blk_direct_access,
2694 .getgeo = dm_blk_getgeo, 2728 .getgeo = dm_blk_getgeo,
2695 .pr_ops = &dm_pr_ops, 2729 .pr_ops = &dm_pr_ops,
2696 .owner = THIS_MODULE 2730 .owner = THIS_MODULE