diff options
author | Dan Williams <dan.j.williams@intel.com> | 2017-01-27 20:22:03 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2017-04-25 16:20:46 -0400 |
commit | d4b29fd78ea6fc2be219be3af1a992149b4ff0f6 (patch) | |
tree | 4020240085b9b61af2a27ebd454693e60f9ecd61 /drivers | |
parent | 2093f2e9dfec98e561c83e807910267bcbd8bb7b (diff) |
block: remove block_device_operations ->direct_access()
Now that all the producers and consumers of dax interfaces have been
converted to using dax_operations on a dax_device, remove the block
device direct_access enabling.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/brd.c | 15 | ||||
-rw-r--r-- | drivers/md/dm.c | 13 | ||||
-rw-r--r-- | drivers/nvdimm/pmem.c | 10 | ||||
-rw-r--r-- | drivers/s390/block/dcssblk.c | 16 |
4 files changed, 0 insertions, 54 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 60f3193c9ce2..bfa4ed2c75ef 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -395,18 +395,6 @@ static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff, | |||
395 | return 1; | 395 | return 1; |
396 | } | 396 | } |
397 | 397 | ||
398 | static long brd_blk_direct_access(struct block_device *bdev, sector_t sector, | ||
399 | void **kaddr, pfn_t *pfn, long size) | ||
400 | { | ||
401 | struct brd_device *brd = bdev->bd_disk->private_data; | ||
402 | long nr_pages = __brd_direct_access(brd, PHYS_PFN(sector * 512), | ||
403 | PHYS_PFN(size), kaddr, pfn); | ||
404 | |||
405 | if (nr_pages < 0) | ||
406 | return nr_pages; | ||
407 | return nr_pages * PAGE_SIZE; | ||
408 | } | ||
409 | |||
410 | static long brd_dax_direct_access(struct dax_device *dax_dev, | 398 | static long brd_dax_direct_access(struct dax_device *dax_dev, |
411 | pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) | 399 | pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) |
412 | { | 400 | { |
@@ -418,14 +406,11 @@ static long brd_dax_direct_access(struct dax_device *dax_dev, | |||
418 | static const struct dax_operations brd_dax_ops = { | 406 | static const struct dax_operations brd_dax_ops = { |
419 | .direct_access = brd_dax_direct_access, | 407 | .direct_access = brd_dax_direct_access, |
420 | }; | 408 | }; |
421 | #else | ||
422 | #define brd_blk_direct_access NULL | ||
423 | #endif | 409 | #endif |
424 | 410 | ||
425 | static const struct block_device_operations brd_fops = { | 411 | static const struct block_device_operations brd_fops = { |
426 | .owner = THIS_MODULE, | 412 | .owner = THIS_MODULE, |
427 | .rw_page = brd_rw_page, | 413 | .rw_page = brd_rw_page, |
428 | .direct_access = brd_blk_direct_access, | ||
429 | }; | 414 | }; |
430 | 415 | ||
431 | /* | 416 | /* |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ef4c6f8cad47..79d5f5fd823e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -957,18 +957,6 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, | |||
957 | return ret; | 957 | return ret; |
958 | } | 958 | } |
959 | 959 | ||
960 | static long dm_blk_direct_access(struct block_device *bdev, sector_t sector, | ||
961 | void **kaddr, pfn_t *pfn, long size) | ||
962 | { | ||
963 | struct mapped_device *md = bdev->bd_disk->private_data; | ||
964 | struct dax_device *dax_dev = md->dax_dev; | ||
965 | long nr_pages = size / PAGE_SIZE; | ||
966 | |||
967 | nr_pages = dm_dax_direct_access(dax_dev, sector / PAGE_SECTORS, | ||
968 | nr_pages, kaddr, pfn); | ||
969 | return nr_pages < 0 ? nr_pages : nr_pages * PAGE_SIZE; | ||
970 | } | ||
971 | |||
972 | /* | 960 | /* |
973 | * A target may call dm_accept_partial_bio only from the map routine. It is | 961 | * A target may call dm_accept_partial_bio only from the map routine. It is |
974 | * allowed for all bio types except REQ_PREFLUSH. | 962 | * allowed for all bio types except REQ_PREFLUSH. |
@@ -2823,7 +2811,6 @@ static const struct block_device_operations dm_blk_dops = { | |||
2823 | .open = dm_blk_open, | 2811 | .open = dm_blk_open, |
2824 | .release = dm_blk_close, | 2812 | .release = dm_blk_close, |
2825 | .ioctl = dm_blk_ioctl, | 2813 | .ioctl = dm_blk_ioctl, |
2826 | .direct_access = dm_blk_direct_access, | ||
2827 | .getgeo = dm_blk_getgeo, | 2814 | .getgeo = dm_blk_getgeo, |
2828 | .pr_ops = &dm_pr_ops, | 2815 | .pr_ops = &dm_pr_ops, |
2829 | .owner = THIS_MODULE | 2816 | .owner = THIS_MODULE |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index fbbcf8154eec..85b85633d674 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
@@ -220,19 +220,9 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, | |||
220 | return PHYS_PFN(pmem->size - pmem->pfn_pad - offset); | 220 | return PHYS_PFN(pmem->size - pmem->pfn_pad - offset); |
221 | } | 221 | } |
222 | 222 | ||
223 | static long pmem_blk_direct_access(struct block_device *bdev, sector_t sector, | ||
224 | void **kaddr, pfn_t *pfn, long size) | ||
225 | { | ||
226 | struct pmem_device *pmem = bdev->bd_queue->queuedata; | ||
227 | |||
228 | return __pmem_direct_access(pmem, PHYS_PFN(sector * 512), | ||
229 | PHYS_PFN(size), kaddr, pfn); | ||
230 | } | ||
231 | |||
232 | static const struct block_device_operations pmem_fops = { | 223 | static const struct block_device_operations pmem_fops = { |
233 | .owner = THIS_MODULE, | 224 | .owner = THIS_MODULE, |
234 | .rw_page = pmem_rw_page, | 225 | .rw_page = pmem_rw_page, |
235 | .direct_access = pmem_blk_direct_access, | ||
236 | .revalidate_disk = nvdimm_revalidate_disk, | 226 | .revalidate_disk = nvdimm_revalidate_disk, |
237 | }; | 227 | }; |
238 | 228 | ||
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index dc84cfd4e438..36e5280af3e4 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -31,8 +31,6 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode); | |||
31 | static void dcssblk_release(struct gendisk *disk, fmode_t mode); | 31 | static void dcssblk_release(struct gendisk *disk, fmode_t mode); |
32 | static blk_qc_t dcssblk_make_request(struct request_queue *q, | 32 | static blk_qc_t dcssblk_make_request(struct request_queue *q, |
33 | struct bio *bio); | 33 | struct bio *bio); |
34 | static long dcssblk_blk_direct_access(struct block_device *bdev, sector_t secnum, | ||
35 | void **kaddr, pfn_t *pfn, long size); | ||
36 | static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, | 34 | static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, |
37 | long nr_pages, void **kaddr, pfn_t *pfn); | 35 | long nr_pages, void **kaddr, pfn_t *pfn); |
38 | 36 | ||
@@ -43,7 +41,6 @@ static const struct block_device_operations dcssblk_devops = { | |||
43 | .owner = THIS_MODULE, | 41 | .owner = THIS_MODULE, |
44 | .open = dcssblk_open, | 42 | .open = dcssblk_open, |
45 | .release = dcssblk_release, | 43 | .release = dcssblk_release, |
46 | .direct_access = dcssblk_blk_direct_access, | ||
47 | }; | 44 | }; |
48 | 45 | ||
49 | static const struct dax_operations dcssblk_dax_ops = { | 46 | static const struct dax_operations dcssblk_dax_ops = { |
@@ -916,19 +913,6 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, | |||
916 | } | 913 | } |
917 | 914 | ||
918 | static long | 915 | static long |
919 | dcssblk_blk_direct_access(struct block_device *bdev, sector_t secnum, | ||
920 | void **kaddr, pfn_t *pfn, long size) | ||
921 | { | ||
922 | struct dcssblk_dev_info *dev_info; | ||
923 | |||
924 | dev_info = bdev->bd_disk->private_data; | ||
925 | if (!dev_info) | ||
926 | return -ENODEV; | ||
927 | return __dcssblk_direct_access(dev_info, PHYS_PFN(secnum * 512), | ||
928 | PHYS_PFN(size), kaddr, pfn) * PAGE_SIZE; | ||
929 | } | ||
930 | |||
931 | static long | ||
932 | dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, | 916 | dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, |
933 | long nr_pages, void **kaddr, pfn_t *pfn) | 917 | long nr_pages, void **kaddr, pfn_t *pfn) |
934 | { | 918 | { |