diff options
| author | Dan Williams <dan.j.williams@intel.com> | 2019-05-16 16:26:29 -0400 |
|---|---|---|
| committer | Dan Williams <dan.j.williams@intel.com> | 2019-05-20 18:02:08 -0400 |
| commit | 7bf7eac8d648057519adb6fce1e31458c902212c (patch) | |
| tree | 1d4489b03dd6ef2526c41de62d525fdc5a1842ea | |
| parent | c01dafad77fea8d64c4fdca0a6031c980842ad65 (diff) | |
dax: Arrange for dax_supported check to span multiple devices
Pankaj reports that starting with commit ad428cdb525a "dax: Check the
end of the block-device capacity with dax_direct_access()" device-mapper
no longer allows dax operation. This results from the stricter checks in
__bdev_dax_supported() that validate that the start and end of a
block-device map to the same 'pagemap' instance.
Teach the dax-core and device-mapper to validate the 'pagemap' on a
per-target basis. This is accomplished by refactoring the
bdev_dax_supported() internals into generic_fsdax_supported() which
takes a sector range to validate. Consequently generic_fsdax_supported()
is suitable to be used in a device-mapper ->iterate_devices() callback.
A new ->dax_supported() operation is added to allow composite devices to
split and route upper-level bdev_dax_supported() requests.
Fixes: ad428cdb525a ("dax: Check the end of the block-device...")
Cc: <stable@vger.kernel.org>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Reported-by: Pankaj Gupta <pagupta@redhat.com>
Reviewed-by: Pankaj Gupta <pagupta@redhat.com>
Tested-by: Pankaj Gupta <pagupta@redhat.com>
Tested-by: Vaibhav Jain <vaibhav@linux.ibm.com>
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
| -rw-r--r-- | drivers/dax/super.c | 88 | ||||
| -rw-r--r-- | drivers/md/dm-table.c | 17 | ||||
| -rw-r--r-- | drivers/md/dm.c | 20 | ||||
| -rw-r--r-- | drivers/md/dm.h | 1 | ||||
| -rw-r--r-- | drivers/nvdimm/pmem.c | 1 | ||||
| -rw-r--r-- | drivers/s390/block/dcssblk.c | 1 | ||||
| -rw-r--r-- | include/linux/dax.h | 26 |
7 files changed, 117 insertions, 37 deletions
diff --git a/drivers/dax/super.c b/drivers/dax/super.c index bbd57ca0634a..3a7b0a0bf469 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c | |||
| @@ -73,22 +73,12 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) | |||
| 73 | EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); | 73 | EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); |
| 74 | #endif | 74 | #endif |
| 75 | 75 | ||
| 76 | /** | 76 | bool __generic_fsdax_supported(struct dax_device *dax_dev, |
| 77 | * __bdev_dax_supported() - Check if the device supports dax for filesystem | 77 | struct block_device *bdev, int blocksize, sector_t start, |
| 78 | * @bdev: block device to check | 78 | sector_t sectors) |
| 79 | * @blocksize: The block size of the device | ||
| 80 | * | ||
| 81 | * This is a library function for filesystems to check if the block device | ||
| 82 | * can be mounted with dax option. | ||
| 83 | * | ||
| 84 | * Return: true if supported, false if unsupported | ||
| 85 | */ | ||
| 86 | bool __bdev_dax_supported(struct block_device *bdev, int blocksize) | ||
| 87 | { | 79 | { |
| 88 | struct dax_device *dax_dev; | ||
| 89 | bool dax_enabled = false; | 80 | bool dax_enabled = false; |
| 90 | pgoff_t pgoff, pgoff_end; | 81 | pgoff_t pgoff, pgoff_end; |
| 91 | struct request_queue *q; | ||
| 92 | char buf[BDEVNAME_SIZE]; | 82 | char buf[BDEVNAME_SIZE]; |
| 93 | void *kaddr, *end_kaddr; | 83 | void *kaddr, *end_kaddr; |
| 94 | pfn_t pfn, end_pfn; | 84 | pfn_t pfn, end_pfn; |
| @@ -102,21 +92,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) | |||
| 102 | return false; | 92 | return false; |
| 103 | } | 93 | } |
| 104 | 94 | ||
| 105 | q = bdev_get_queue(bdev); | 95 | err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff); |
| 106 | if (!q || !blk_queue_dax(q)) { | ||
| 107 | pr_debug("%s: error: request queue doesn't support dax\n", | ||
| 108 | bdevname(bdev, buf)); | ||
| 109 | return false; | ||
| 110 | } | ||
| 111 | |||
| 112 | err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); | ||
| 113 | if (err) { | 96 | if (err) { |
| 114 | pr_debug("%s: error: unaligned partition for dax\n", | 97 | pr_debug("%s: error: unaligned partition for dax\n", |
| 115 | bdevname(bdev, buf)); | 98 | bdevname(bdev, buf)); |
| 116 | return false; | 99 | return false; |
| 117 | } | 100 | } |
| 118 | 101 | ||
| 119 | last_page = PFN_DOWN(i_size_read(bdev->bd_inode) - 1) * 8; | 102 | last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512; |
| 120 | err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end); | 103 | err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end); |
| 121 | if (err) { | 104 | if (err) { |
| 122 | pr_debug("%s: error: unaligned partition for dax\n", | 105 | pr_debug("%s: error: unaligned partition for dax\n", |
| @@ -124,20 +107,11 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) | |||
| 124 | return false; | 107 | return false; |
| 125 | } | 108 | } |
| 126 | 109 | ||
| 127 | dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); | ||
| 128 | if (!dax_dev) { | ||
| 129 | pr_debug("%s: error: device does not support dax\n", | ||
| 130 | bdevname(bdev, buf)); | ||
| 131 | return false; | ||
| 132 | } | ||
| 133 | |||
| 134 | id = dax_read_lock(); | 110 | id = dax_read_lock(); |
| 135 | len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); | 111 | len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); |
| 136 | len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn); | 112 | len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn); |
| 137 | dax_read_unlock(id); | 113 | dax_read_unlock(id); |
| 138 | 114 | ||
| 139 | put_dax(dax_dev); | ||
| 140 | |||
| 141 | if (len < 1 || len2 < 1) { | 115 | if (len < 1 || len2 < 1) { |
| 142 | pr_debug("%s: error: dax access failed (%ld)\n", | 116 | pr_debug("%s: error: dax access failed (%ld)\n", |
| 143 | bdevname(bdev, buf), len < 1 ? len : len2); | 117 | bdevname(bdev, buf), len < 1 ? len : len2); |
| @@ -178,6 +152,49 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) | |||
| 178 | } | 152 | } |
| 179 | return true; | 153 | return true; |
| 180 | } | 154 | } |
| 155 | EXPORT_SYMBOL_GPL(__generic_fsdax_supported); | ||
| 156 | |||
| 157 | /** | ||
| 158 | * __bdev_dax_supported() - Check if the device supports dax for filesystem | ||
| 159 | * @bdev: block device to check | ||
| 160 | * @blocksize: The block size of the device | ||
| 161 | * | ||
| 162 | * This is a library function for filesystems to check if the block device | ||
| 163 | * can be mounted with dax option. | ||
| 164 | * | ||
| 165 | * Return: true if supported, false if unsupported | ||
| 166 | */ | ||
| 167 | bool __bdev_dax_supported(struct block_device *bdev, int blocksize) | ||
| 168 | { | ||
| 169 | struct dax_device *dax_dev; | ||
| 170 | struct request_queue *q; | ||
| 171 | char buf[BDEVNAME_SIZE]; | ||
| 172 | bool ret; | ||
| 173 | int id; | ||
| 174 | |||
| 175 | q = bdev_get_queue(bdev); | ||
| 176 | if (!q || !blk_queue_dax(q)) { | ||
| 177 | pr_debug("%s: error: request queue doesn't support dax\n", | ||
| 178 | bdevname(bdev, buf)); | ||
| 179 | return false; | ||
| 180 | } | ||
| 181 | |||
| 182 | dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); | ||
| 183 | if (!dax_dev) { | ||
| 184 | pr_debug("%s: error: device does not support dax\n", | ||
| 185 | bdevname(bdev, buf)); | ||
| 186 | return false; | ||
| 187 | } | ||
| 188 | |||
| 189 | id = dax_read_lock(); | ||
| 190 | ret = dax_supported(dax_dev, bdev, blocksize, 0, | ||
| 191 | i_size_read(bdev->bd_inode) / 512); | ||
| 192 | dax_read_unlock(id); | ||
| 193 | |||
| 194 | put_dax(dax_dev); | ||
| 195 | |||
| 196 | return ret; | ||
| 197 | } | ||
| 181 | EXPORT_SYMBOL_GPL(__bdev_dax_supported); | 198 | EXPORT_SYMBOL_GPL(__bdev_dax_supported); |
| 182 | #endif | 199 | #endif |
| 183 | 200 | ||
| @@ -303,6 +320,15 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, | |||
| 303 | } | 320 | } |
| 304 | EXPORT_SYMBOL_GPL(dax_direct_access); | 321 | EXPORT_SYMBOL_GPL(dax_direct_access); |
| 305 | 322 | ||
| 323 | bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, | ||
| 324 | int blocksize, sector_t start, sector_t len) | ||
| 325 | { | ||
| 326 | if (!dax_alive(dax_dev)) | ||
| 327 | return false; | ||
| 328 | |||
| 329 | return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len); | ||
| 330 | } | ||
| 331 | |||
| 306 | size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, | 332 | size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, |
| 307 | size_t bytes, struct iov_iter *i) | 333 | size_t bytes, struct iov_iter *i) |
| 308 | { | 334 | { |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index cde3b49b2a91..350cf0451456 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
| @@ -880,13 +880,17 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) | |||
| 880 | } | 880 | } |
| 881 | EXPORT_SYMBOL_GPL(dm_table_set_type); | 881 | EXPORT_SYMBOL_GPL(dm_table_set_type); |
| 882 | 882 | ||
| 883 | /* validate the dax capability of the target device span */ | ||
| 883 | static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, | 884 | static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, |
| 884 | sector_t start, sector_t len, void *data) | 885 | sector_t start, sector_t len, void *data) |
| 885 | { | 886 | { |
| 886 | return bdev_dax_supported(dev->bdev, PAGE_SIZE); | 887 | int blocksize = *(int *) data; |
| 888 | |||
| 889 | return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize, | ||
| 890 | start, len); | ||
| 887 | } | 891 | } |
| 888 | 892 | ||
| 889 | static bool dm_table_supports_dax(struct dm_table *t) | 893 | bool dm_table_supports_dax(struct dm_table *t, int blocksize) |
| 890 | { | 894 | { |
| 891 | struct dm_target *ti; | 895 | struct dm_target *ti; |
| 892 | unsigned i; | 896 | unsigned i; |
| @@ -899,7 +903,8 @@ static bool dm_table_supports_dax(struct dm_table *t) | |||
| 899 | return false; | 903 | return false; |
| 900 | 904 | ||
| 901 | if (!ti->type->iterate_devices || | 905 | if (!ti->type->iterate_devices || |
| 902 | !ti->type->iterate_devices(ti, device_supports_dax, NULL)) | 906 | !ti->type->iterate_devices(ti, device_supports_dax, |
| 907 | &blocksize)) | ||
| 903 | return false; | 908 | return false; |
| 904 | } | 909 | } |
| 905 | 910 | ||
| @@ -979,7 +984,7 @@ static int dm_table_determine_type(struct dm_table *t) | |||
| 979 | verify_bio_based: | 984 | verify_bio_based: |
| 980 | /* We must use this table as bio-based */ | 985 | /* We must use this table as bio-based */ |
| 981 | t->type = DM_TYPE_BIO_BASED; | 986 | t->type = DM_TYPE_BIO_BASED; |
| 982 | if (dm_table_supports_dax(t) || | 987 | if (dm_table_supports_dax(t, PAGE_SIZE) || |
| 983 | (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { | 988 | (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { |
| 984 | t->type = DM_TYPE_DAX_BIO_BASED; | 989 | t->type = DM_TYPE_DAX_BIO_BASED; |
| 985 | } else { | 990 | } else { |
| @@ -1905,7 +1910,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
| 1905 | } | 1910 | } |
| 1906 | blk_queue_write_cache(q, wc, fua); | 1911 | blk_queue_write_cache(q, wc, fua); |
| 1907 | 1912 | ||
| 1908 | if (dm_table_supports_dax(t)) | 1913 | if (dm_table_supports_dax(t, PAGE_SIZE)) |
| 1909 | blk_queue_flag_set(QUEUE_FLAG_DAX, q); | 1914 | blk_queue_flag_set(QUEUE_FLAG_DAX, q); |
| 1910 | else | 1915 | else |
| 1911 | blk_queue_flag_clear(QUEUE_FLAG_DAX, q); | 1916 | blk_queue_flag_clear(QUEUE_FLAG_DAX, q); |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 1fb1333fefec..b7c0ad01084d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -1107,6 +1107,25 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, | |||
| 1107 | return ret; | 1107 | return ret; |
| 1108 | } | 1108 | } |
| 1109 | 1109 | ||
| 1110 | static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev, | ||
| 1111 | int blocksize, sector_t start, sector_t len) | ||
| 1112 | { | ||
| 1113 | struct mapped_device *md = dax_get_private(dax_dev); | ||
| 1114 | struct dm_table *map; | ||
| 1115 | int srcu_idx; | ||
| 1116 | bool ret; | ||
| 1117 | |||
| 1118 | map = dm_get_live_table(md, &srcu_idx); | ||
| 1119 | if (!map) | ||
| 1120 | return false; | ||
| 1121 | |||
| 1122 | ret = dm_table_supports_dax(map, blocksize); | ||
| 1123 | |||
| 1124 | dm_put_live_table(md, srcu_idx); | ||
| 1125 | |||
| 1126 | return ret; | ||
| 1127 | } | ||
| 1128 | |||
| 1110 | static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, | 1129 | static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, |
| 1111 | void *addr, size_t bytes, struct iov_iter *i) | 1130 | void *addr, size_t bytes, struct iov_iter *i) |
| 1112 | { | 1131 | { |
| @@ -3192,6 +3211,7 @@ static const struct block_device_operations dm_blk_dops = { | |||
| 3192 | 3211 | ||
| 3193 | static const struct dax_operations dm_dax_ops = { | 3212 | static const struct dax_operations dm_dax_ops = { |
| 3194 | .direct_access = dm_dax_direct_access, | 3213 | .direct_access = dm_dax_direct_access, |
| 3214 | .dax_supported = dm_dax_supported, | ||
| 3195 | .copy_from_iter = dm_dax_copy_from_iter, | 3215 | .copy_from_iter = dm_dax_copy_from_iter, |
| 3196 | .copy_to_iter = dm_dax_copy_to_iter, | 3216 | .copy_to_iter = dm_dax_copy_to_iter, |
| 3197 | }; | 3217 | }; |
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 2d539b82ec08..17e3db54404c 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
| @@ -72,6 +72,7 @@ bool dm_table_bio_based(struct dm_table *t); | |||
| 72 | bool dm_table_request_based(struct dm_table *t); | 72 | bool dm_table_request_based(struct dm_table *t); |
| 73 | void dm_table_free_md_mempools(struct dm_table *t); | 73 | void dm_table_free_md_mempools(struct dm_table *t); |
| 74 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); | 74 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); |
| 75 | bool dm_table_supports_dax(struct dm_table *t, int blocksize); | ||
| 75 | 76 | ||
| 76 | void dm_lock_md_type(struct mapped_device *md); | 77 | void dm_lock_md_type(struct mapped_device *md); |
| 77 | void dm_unlock_md_type(struct mapped_device *md); | 78 | void dm_unlock_md_type(struct mapped_device *md); |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 0279eb1da3ef..845c5b430cdd 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
| @@ -295,6 +295,7 @@ static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, | |||
| 295 | 295 | ||
| 296 | static const struct dax_operations pmem_dax_ops = { | 296 | static const struct dax_operations pmem_dax_ops = { |
| 297 | .direct_access = pmem_dax_direct_access, | 297 | .direct_access = pmem_dax_direct_access, |
| 298 | .dax_supported = generic_fsdax_supported, | ||
| 298 | .copy_from_iter = pmem_copy_from_iter, | 299 | .copy_from_iter = pmem_copy_from_iter, |
| 299 | .copy_to_iter = pmem_copy_to_iter, | 300 | .copy_to_iter = pmem_copy_to_iter, |
| 300 | }; | 301 | }; |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 4e8aedd50cb0..d04d4378ca50 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
| @@ -59,6 +59,7 @@ static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev, | |||
| 59 | 59 | ||
| 60 | static const struct dax_operations dcssblk_dax_ops = { | 60 | static const struct dax_operations dcssblk_dax_ops = { |
| 61 | .direct_access = dcssblk_dax_direct_access, | 61 | .direct_access = dcssblk_dax_direct_access, |
| 62 | .dax_supported = generic_fsdax_supported, | ||
| 62 | .copy_from_iter = dcssblk_dax_copy_from_iter, | 63 | .copy_from_iter = dcssblk_dax_copy_from_iter, |
| 63 | .copy_to_iter = dcssblk_dax_copy_to_iter, | 64 | .copy_to_iter = dcssblk_dax_copy_to_iter, |
| 64 | }; | 65 | }; |
diff --git a/include/linux/dax.h b/include/linux/dax.h index 0dd316a74a29..becaea5f4488 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h | |||
| @@ -19,6 +19,12 @@ struct dax_operations { | |||
| 19 | */ | 19 | */ |
| 20 | long (*direct_access)(struct dax_device *, pgoff_t, long, | 20 | long (*direct_access)(struct dax_device *, pgoff_t, long, |
| 21 | void **, pfn_t *); | 21 | void **, pfn_t *); |
| 22 | /* | ||
| 23 | * Validate whether this device is usable as an fsdax backing | ||
| 24 | * device. | ||
| 25 | */ | ||
| 26 | bool (*dax_supported)(struct dax_device *, struct block_device *, int, | ||
| 27 | sector_t, sector_t); | ||
| 22 | /* copy_from_iter: required operation for fs-dax direct-i/o */ | 28 | /* copy_from_iter: required operation for fs-dax direct-i/o */ |
| 23 | size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, | 29 | size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, |
| 24 | struct iov_iter *); | 30 | struct iov_iter *); |
| @@ -75,6 +81,17 @@ static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize) | |||
| 75 | return __bdev_dax_supported(bdev, blocksize); | 81 | return __bdev_dax_supported(bdev, blocksize); |
| 76 | } | 82 | } |
| 77 | 83 | ||
| 84 | bool __generic_fsdax_supported(struct dax_device *dax_dev, | ||
| 85 | struct block_device *bdev, int blocksize, sector_t start, | ||
| 86 | sector_t sectors); | ||
| 87 | static inline bool generic_fsdax_supported(struct dax_device *dax_dev, | ||
| 88 | struct block_device *bdev, int blocksize, sector_t start, | ||
| 89 | sector_t sectors) | ||
| 90 | { | ||
| 91 | return __generic_fsdax_supported(dax_dev, bdev, blocksize, start, | ||
| 92 | sectors); | ||
| 93 | } | ||
| 94 | |||
| 78 | static inline struct dax_device *fs_dax_get_by_host(const char *host) | 95 | static inline struct dax_device *fs_dax_get_by_host(const char *host) |
| 79 | { | 96 | { |
| 80 | return dax_get_by_host(host); | 97 | return dax_get_by_host(host); |
| @@ -99,6 +116,13 @@ static inline bool bdev_dax_supported(struct block_device *bdev, | |||
| 99 | return false; | 116 | return false; |
| 100 | } | 117 | } |
| 101 | 118 | ||
| 119 | static inline bool generic_fsdax_supported(struct dax_device *dax_dev, | ||
| 120 | struct block_device *bdev, int blocksize, sector_t start, | ||
| 121 | sector_t sectors) | ||
| 122 | { | ||
| 123 | return false; | ||
| 124 | } | ||
| 125 | |||
| 102 | static inline struct dax_device *fs_dax_get_by_host(const char *host) | 126 | static inline struct dax_device *fs_dax_get_by_host(const char *host) |
| 103 | { | 127 | { |
| 104 | return NULL; | 128 | return NULL; |
| @@ -142,6 +166,8 @@ bool dax_alive(struct dax_device *dax_dev); | |||
| 142 | void *dax_get_private(struct dax_device *dax_dev); | 166 | void *dax_get_private(struct dax_device *dax_dev); |
| 143 | long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, | 167 | long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, |
| 144 | void **kaddr, pfn_t *pfn); | 168 | void **kaddr, pfn_t *pfn); |
| 169 | bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, | ||
| 170 | int blocksize, sector_t start, sector_t len); | ||
| 145 | size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, | 171 | size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, |
| 146 | size_t bytes, struct iov_iter *i); | 172 | size_t bytes, struct iov_iter *i); |
| 147 | size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, | 173 | size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, |
