aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/device-mapper/dm-raid.txt1
-rw-r--r--drivers/dax/super.c6
-rw-r--r--drivers/md/dm-bufio.c3
-rw-r--r--drivers/md/dm-integrity.c22
-rw-r--r--drivers/md/dm-raid.c29
-rw-r--r--drivers/md/dm-table.c35
-rw-r--r--drivers/md/dm-verity-fec.c21
-rw-r--r--drivers/md/dm-zoned-metadata.c12
-rw-r--r--drivers/md/dm-zoned-reclaim.c2
-rw-r--r--drivers/md/dm-zoned-target.c8
-rw-r--r--include/linux/dax.h1
11 files changed, 94 insertions, 46 deletions
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 7e06e65586d4..4a0a7469fdd7 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -343,3 +343,4 @@ Version History
3431.11.0 Fix table line argument order 3431.11.0 Fix table line argument order
344 (wrong raid10_copies/raid10_format sequence) 344 (wrong raid10_copies/raid10_format sequence)
3451.11.1 Add raid4/5/6 journal write-back support via journal_mode option 3451.11.1 Add raid4/5/6 journal write-back support via journal_mode option
3461.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index ce9e563e6e1d..938eb4868f7f 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -278,6 +278,12 @@ void dax_write_cache(struct dax_device *dax_dev, bool wc)
278} 278}
279EXPORT_SYMBOL_GPL(dax_write_cache); 279EXPORT_SYMBOL_GPL(dax_write_cache);
280 280
281bool dax_write_cache_enabled(struct dax_device *dax_dev)
282{
283 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
284}
285EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
286
281bool dax_alive(struct dax_device *dax_dev) 287bool dax_alive(struct dax_device *dax_dev)
282{ 288{
283 lockdep_assert_held(&dax_srcu); 289 lockdep_assert_held(&dax_srcu);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 850ff6c67994..44f4a8ac95bd 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1258,8 +1258,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1258 */ 1258 */
1259int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) 1259int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1260{ 1260{
1261 blk_status_t a; 1261 int a, f;
1262 int f;
1263 unsigned long buffers_processed = 0; 1262 unsigned long buffers_processed = 0;
1264 struct dm_buffer *b, *tmp; 1263 struct dm_buffer *b, *tmp;
1265 1264
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 1b224aa9cf15..3acce09bba35 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1587,16 +1587,18 @@ retry:
1587 if (likely(ic->mode == 'J')) { 1587 if (likely(ic->mode == 'J')) {
1588 if (dio->write) { 1588 if (dio->write) {
1589 unsigned next_entry, i, pos; 1589 unsigned next_entry, i, pos;
1590 unsigned ws, we; 1590 unsigned ws, we, range_sectors;
1591 1591
1592 dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors); 1592 dio->range.n_sectors = min(dio->range.n_sectors,
1593 ic->free_sectors << ic->sb->log2_sectors_per_block);
1593 if (unlikely(!dio->range.n_sectors)) 1594 if (unlikely(!dio->range.n_sectors))
1594 goto sleep; 1595 goto sleep;
1595 ic->free_sectors -= dio->range.n_sectors; 1596 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
1597 ic->free_sectors -= range_sectors;
1596 journal_section = ic->free_section; 1598 journal_section = ic->free_section;
1597 journal_entry = ic->free_section_entry; 1599 journal_entry = ic->free_section_entry;
1598 1600
1599 next_entry = ic->free_section_entry + dio->range.n_sectors; 1601 next_entry = ic->free_section_entry + range_sectors;
1600 ic->free_section_entry = next_entry % ic->journal_section_entries; 1602 ic->free_section_entry = next_entry % ic->journal_section_entries;
1601 ic->free_section += next_entry / ic->journal_section_entries; 1603 ic->free_section += next_entry / ic->journal_section_entries;
1602 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; 1604 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
@@ -1727,6 +1729,8 @@ static void pad_uncommitted(struct dm_integrity_c *ic)
1727 wraparound_section(ic, &ic->free_section); 1729 wraparound_section(ic, &ic->free_section);
1728 ic->n_uncommitted_sections++; 1730 ic->n_uncommitted_sections++;
1729 } 1731 }
1732 WARN_ON(ic->journal_sections * ic->journal_section_entries !=
1733 (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors);
1730} 1734}
1731 1735
1732static void integrity_commit(struct work_struct *w) 1736static void integrity_commit(struct work_struct *w)
@@ -1821,6 +1825,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
1821{ 1825{
1822 unsigned i, j, n; 1826 unsigned i, j, n;
1823 struct journal_completion comp; 1827 struct journal_completion comp;
1828 struct blk_plug plug;
1829
1830 blk_start_plug(&plug);
1824 1831
1825 comp.ic = ic; 1832 comp.ic = ic;
1826 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 1833 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
@@ -1945,6 +1952,8 @@ skip_io:
1945 1952
1946 dm_bufio_write_dirty_buffers_async(ic->bufio); 1953 dm_bufio_write_dirty_buffers_async(ic->bufio);
1947 1954
1955 blk_finish_plug(&plug);
1956
1948 complete_journal_op(&comp); 1957 complete_journal_op(&comp);
1949 wait_for_completion_io(&comp.comp); 1958 wait_for_completion_io(&comp.comp);
1950 1959
@@ -3019,6 +3028,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3019 ti->error = "Block size doesn't match the information in superblock"; 3028 ti->error = "Block size doesn't match the information in superblock";
3020 goto bad; 3029 goto bad;
3021 } 3030 }
3031 if (!le32_to_cpu(ic->sb->journal_sections)) {
3032 r = -EINVAL;
3033 ti->error = "Corrupted superblock, journal_sections is 0";
3034 goto bad;
3035 }
3022 /* make sure that ti->max_io_len doesn't overflow */ 3036 /* make sure that ti->max_io_len doesn't overflow */
3023 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || 3037 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
3024 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { 3038 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 2e10c2f13a34..5bfe285ea9d1 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -208,6 +208,7 @@ struct raid_dev {
208#define RT_FLAG_RS_BITMAP_LOADED 2 208#define RT_FLAG_RS_BITMAP_LOADED 2
209#define RT_FLAG_UPDATE_SBS 3 209#define RT_FLAG_UPDATE_SBS 3
210#define RT_FLAG_RESHAPE_RS 4 210#define RT_FLAG_RESHAPE_RS 4
211#define RT_FLAG_RS_SUSPENDED 5
211 212
212/* Array elements of 64 bit needed for rebuild/failed disk bits */ 213/* Array elements of 64 bit needed for rebuild/failed disk bits */
213#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) 214#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -564,9 +565,10 @@ static const char *raid10_md_layout_to_format(int layout)
564 if (__raid10_near_copies(layout) > 1) 565 if (__raid10_near_copies(layout) > 1)
565 return "near"; 566 return "near";
566 567
567 WARN_ON(__raid10_far_copies(layout) < 2); 568 if (__raid10_far_copies(layout) > 1)
569 return "far";
568 570
569 return "far"; 571 return "unknown";
570} 572}
571 573
572/* Return md raid10 algorithm for @name */ 574/* Return md raid10 algorithm for @name */
@@ -2540,11 +2542,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2540 if (!freshest) 2542 if (!freshest)
2541 return 0; 2543 return 0;
2542 2544
2543 if (validate_raid_redundancy(rs)) {
2544 rs->ti->error = "Insufficient redundancy to activate array";
2545 return -EINVAL;
2546 }
2547
2548 /* 2545 /*
2549 * Validation of the freshest device provides the source of 2546 * Validation of the freshest device provides the source of
2550 * validation for the remaining devices. 2547 * validation for the remaining devices.
@@ -2553,6 +2550,11 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2553 if (super_validate(rs, freshest)) 2550 if (super_validate(rs, freshest))
2554 return -EINVAL; 2551 return -EINVAL;
2555 2552
2553 if (validate_raid_redundancy(rs)) {
2554 rs->ti->error = "Insufficient redundancy to activate array";
2555 return -EINVAL;
2556 }
2557
2556 rdev_for_each(rdev, mddev) 2558 rdev_for_each(rdev, mddev)
2557 if (!test_bit(Journal, &rdev->flags) && 2559 if (!test_bit(Journal, &rdev->flags) &&
2558 rdev != freshest && 2560 rdev != freshest &&
@@ -3168,6 +3170,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3168 } 3170 }
3169 3171
3170 mddev_suspend(&rs->md); 3172 mddev_suspend(&rs->md);
3173 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3171 3174
3172 /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */ 3175 /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
3173 if (rs_is_raid456(rs)) { 3176 if (rs_is_raid456(rs)) {
@@ -3625,7 +3628,7 @@ static void raid_postsuspend(struct dm_target *ti)
3625{ 3628{
3626 struct raid_set *rs = ti->private; 3629 struct raid_set *rs = ti->private;
3627 3630
3628 if (!rs->md.suspended) 3631 if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3629 mddev_suspend(&rs->md); 3632 mddev_suspend(&rs->md);
3630 3633
3631 rs->md.ro = 1; 3634 rs->md.ro = 1;
@@ -3759,7 +3762,7 @@ static int rs_start_reshape(struct raid_set *rs)
3759 return r; 3762 return r;
3760 3763
3761 /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */ 3764 /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
3762 if (mddev->suspended) 3765 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3763 mddev_resume(mddev); 3766 mddev_resume(mddev);
3764 3767
3765 /* 3768 /*
@@ -3786,8 +3789,8 @@ static int rs_start_reshape(struct raid_set *rs)
3786 } 3789 }
3787 3790
3788 /* Suspend because a resume will happen in raid_resume() */ 3791 /* Suspend because a resume will happen in raid_resume() */
3789 if (!mddev->suspended) 3792 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3790 mddev_suspend(mddev); 3793 mddev_suspend(mddev);
3791 3794
3792 /* 3795 /*
3793 * Now reshape got set up, update superblocks to 3796 * Now reshape got set up, update superblocks to
@@ -3883,13 +3886,13 @@ static void raid_resume(struct dm_target *ti)
3883 if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS)) 3886 if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
3884 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 3887 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3885 3888
3886 if (mddev->suspended) 3889 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3887 mddev_resume(mddev); 3890 mddev_resume(mddev);
3888} 3891}
3889 3892
3890static struct target_type raid_target = { 3893static struct target_type raid_target = {
3891 .name = "raid", 3894 .name = "raid",
3892 .version = {1, 11, 1}, 3895 .version = {1, 12, 1},
3893 .module = THIS_MODULE, 3896 .module = THIS_MODULE,
3894 .ctr = raid_ctr, 3897 .ctr = raid_ctr,
3895 .dtr = raid_dtr, 3898 .dtr = raid_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a39bcd9b982a..28a4071cdf85 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -20,6 +20,7 @@
20#include <linux/atomic.h> 20#include <linux/atomic.h>
21#include <linux/blk-mq.h> 21#include <linux/blk-mq.h>
22#include <linux/mount.h> 22#include <linux/mount.h>
23#include <linux/dax.h>
23 24
24#define DM_MSG_PREFIX "table" 25#define DM_MSG_PREFIX "table"
25 26
@@ -1630,6 +1631,37 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1630 return false; 1631 return false;
1631} 1632}
1632 1633
1634static int device_dax_write_cache_enabled(struct dm_target *ti,
1635 struct dm_dev *dev, sector_t start,
1636 sector_t len, void *data)
1637{
1638 struct dax_device *dax_dev = dev->dax_dev;
1639
1640 if (!dax_dev)
1641 return false;
1642
1643 if (dax_write_cache_enabled(dax_dev))
1644 return true;
1645 return false;
1646}
1647
1648static int dm_table_supports_dax_write_cache(struct dm_table *t)
1649{
1650 struct dm_target *ti;
1651 unsigned i;
1652
1653 for (i = 0; i < dm_table_get_num_targets(t); i++) {
1654 ti = dm_table_get_target(t, i);
1655
1656 if (ti->type->iterate_devices &&
1657 ti->type->iterate_devices(ti,
1658 device_dax_write_cache_enabled, NULL))
1659 return true;
1660 }
1661
1662 return false;
1663}
1664
1633static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, 1665static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1634 sector_t start, sector_t len, void *data) 1666 sector_t start, sector_t len, void *data)
1635{ 1667{
@@ -1785,6 +1817,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1785 } 1817 }
1786 blk_queue_write_cache(q, wc, fua); 1818 blk_queue_write_cache(q, wc, fua);
1787 1819
1820 if (dm_table_supports_dax_write_cache(t))
1821 dax_write_cache(t->md->dax_dev, true);
1822
1788 /* Ensure that all underlying devices are non-rotational. */ 1823 /* Ensure that all underlying devices are non-rotational. */
1789 if (dm_table_all_devices_attribute(t, device_is_nonrot)) 1824 if (dm_table_all_devices_attribute(t, device_is_nonrot))
1790 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 1825 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 504ba3fa328b..e13f90832b6b 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -308,19 +308,14 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
308{ 308{
309 unsigned n; 309 unsigned n;
310 310
311 if (!fio->rs) { 311 if (!fio->rs)
312 fio->rs = mempool_alloc(v->fec->rs_pool, 0); 312 fio->rs = mempool_alloc(v->fec->rs_pool, GFP_NOIO);
313 if (unlikely(!fio->rs)) {
314 DMERR("failed to allocate RS");
315 return -ENOMEM;
316 }
317 }
318 313
319 fec_for_each_prealloc_buffer(n) { 314 fec_for_each_prealloc_buffer(n) {
320 if (fio->bufs[n]) 315 if (fio->bufs[n])
321 continue; 316 continue;
322 317
323 fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOIO); 318 fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOWAIT);
324 if (unlikely(!fio->bufs[n])) { 319 if (unlikely(!fio->bufs[n])) {
325 DMERR("failed to allocate FEC buffer"); 320 DMERR("failed to allocate FEC buffer");
326 return -ENOMEM; 321 return -ENOMEM;
@@ -332,22 +327,16 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
332 if (fio->bufs[n]) 327 if (fio->bufs[n])
333 continue; 328 continue;
334 329
335 fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOIO); 330 fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOWAIT);
336 /* we can manage with even one buffer if necessary */ 331 /* we can manage with even one buffer if necessary */
337 if (unlikely(!fio->bufs[n])) 332 if (unlikely(!fio->bufs[n]))
338 break; 333 break;
339 } 334 }
340 fio->nbufs = n; 335 fio->nbufs = n;
341 336
342 if (!fio->output) { 337 if (!fio->output)
343 fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO); 338 fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO);
344 339
345 if (!fio->output) {
346 DMERR("failed to allocate FEC page");
347 return -ENOMEM;
348 }
349 }
350
351 return 0; 340 return 0;
352} 341}
353 342
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 884ff7c170a0..a4fa2ada6883 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -624,7 +624,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
624 624
625 ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page); 625 ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
626 if (ret == 0) 626 if (ret == 0)
627 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); 627 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
628 628
629 return ret; 629 return ret;
630} 630}
@@ -658,7 +658,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
658 658
659 /* Flush drive cache (this will also sync data) */ 659 /* Flush drive cache (this will also sync data) */
660 if (ret == 0) 660 if (ret == 0)
661 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); 661 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
662 662
663 return ret; 663 return ret;
664} 664}
@@ -722,7 +722,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
722 722
723 /* If there are no dirty metadata blocks, just flush the device cache */ 723 /* If there are no dirty metadata blocks, just flush the device cache */
724 if (list_empty(&write_list)) { 724 if (list_empty(&write_list)) {
725 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); 725 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
726 goto out; 726 goto out;
727 } 727 }
728 728
@@ -927,7 +927,7 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
927 (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift); 927 (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
928 } 928 }
929 929
930 page = alloc_page(GFP_KERNEL); 930 page = alloc_page(GFP_NOIO);
931 if (!page) 931 if (!page)
932 return -ENOMEM; 932 return -ENOMEM;
933 933
@@ -1183,7 +1183,7 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1183 1183
1184 /* Get zone information from disk */ 1184 /* Get zone information from disk */
1185 ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), 1185 ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
1186 &blkz, &nr_blkz, GFP_KERNEL); 1186 &blkz, &nr_blkz, GFP_NOIO);
1187 if (ret) { 1187 if (ret) {
1188 dmz_dev_err(zmd->dev, "Get zone %u report failed", 1188 dmz_dev_err(zmd->dev, "Get zone %u report failed",
1189 dmz_id(zmd, zone)); 1189 dmz_id(zmd, zone));
@@ -1257,7 +1257,7 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1257 1257
1258 ret = blkdev_reset_zones(dev->bdev, 1258 ret = blkdev_reset_zones(dev->bdev,
1259 dmz_start_sect(zmd, zone), 1259 dmz_start_sect(zmd, zone),
1260 dev->zone_nr_sectors, GFP_KERNEL); 1260 dev->zone_nr_sectors, GFP_NOIO);
1261 if (ret) { 1261 if (ret) {
1262 dmz_dev_err(dev, "Reset zone %u failed %d", 1262 dmz_dev_err(dev, "Reset zone %u failed %d",
1263 dmz_id(zmd, zone), ret); 1263 dmz_id(zmd, zone), ret);
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index 05c0a126f5c8..44a119e12f1a 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -75,7 +75,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
75 nr_blocks = block - wp_block; 75 nr_blocks = block - wp_block;
76 ret = blkdev_issue_zeroout(zrc->dev->bdev, 76 ret = blkdev_issue_zeroout(zrc->dev->bdev,
77 dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block), 77 dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
78 dmz_blk2sect(nr_blocks), GFP_NOFS, false); 78 dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
79 if (ret) { 79 if (ret) {
80 dmz_dev_err(zrc->dev, 80 dmz_dev_err(zrc->dev,
81 "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d", 81 "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 2b538fa817f4..b08bbbd4d902 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -541,7 +541,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
541 int ret; 541 int ret;
542 542
543 /* Create a new chunk work */ 543 /* Create a new chunk work */
544 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOFS); 544 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
545 if (!cw) 545 if (!cw)
546 goto out; 546 goto out;
547 547
@@ -588,7 +588,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
588 588
589 bio->bi_bdev = dev->bdev; 589 bio->bi_bdev = dev->bdev;
590 590
591 if (!nr_sectors && (bio_op(bio) != REQ_OP_FLUSH) && (bio_op(bio) != REQ_OP_WRITE)) 591 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
592 return DM_MAPIO_REMAPPED; 592 return DM_MAPIO_REMAPPED;
593 593
594 /* The BIO should be block aligned */ 594 /* The BIO should be block aligned */
@@ -603,7 +603,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
603 bioctx->status = BLK_STS_OK; 603 bioctx->status = BLK_STS_OK;
604 604
605 /* Set the BIO pending in the flush list */ 605 /* Set the BIO pending in the flush list */
606 if (bio_op(bio) == REQ_OP_FLUSH || (!nr_sectors && bio_op(bio) == REQ_OP_WRITE)) { 606 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
607 spin_lock(&dmz->flush_lock); 607 spin_lock(&dmz->flush_lock);
608 bio_list_add(&dmz->flush_list, bio); 608 bio_list_add(&dmz->flush_list, bio);
609 spin_unlock(&dmz->flush_lock); 609 spin_unlock(&dmz->flush_lock);
@@ -785,7 +785,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
785 785
786 /* Chunk BIO work */ 786 /* Chunk BIO work */
787 mutex_init(&dmz->chunk_lock); 787 mutex_init(&dmz->chunk_lock);
788 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOFS); 788 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
789 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 789 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
790 0, dev->name); 790 0, dev->name);
791 if (!dmz->chunk_wq) { 791 if (!dmz->chunk_wq) {
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 794811875732..df97b7af7e2c 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -87,6 +87,7 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
87void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 87void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
88 size_t size); 88 size_t size);
89void dax_write_cache(struct dax_device *dax_dev, bool wc); 89void dax_write_cache(struct dax_device *dax_dev, bool wc);
90bool dax_write_cache_enabled(struct dax_device *dax_dev);
90 91
91/* 92/*
92 * We use lowest available bit in exceptional entry for locking, one bit for 93 * We use lowest available bit in exceptional entry for locking, one bit for