diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-02-15 12:12:28 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-02-15 12:12:28 -0500 |
| commit | 24f0a48743a256bdec1bcb80708bc309da4aa261 (patch) | |
| tree | b3211465a2c59f56587c052b70ccd587d60d7f1e | |
| parent | ae3fa8bd73c9b64416816ec0e9951cd8695c9854 (diff) | |
| parent | ace74f73c200df4254788210ac70e00a7aeca4b8 (diff) | |
Merge tag 'for-linus-20190215' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
- Ensure we insert into the hctx dispatch list, if a request is marked
as DONTPREP (Jianchao)
- NVMe pull request, single missing unlock on error fix (Keith)
- MD pull request, single fix for a potentially data corrupting issue
(Nate)
- Floppy check_events regression fix (Yufen)
* tag 'for-linus-20190215' of git://git.kernel.dk/linux-block:
md/raid1: don't clear bitmap bits on interrupted recovery.
floppy: check_events callback should not return a negative number
nvme-pci: add missing unlock for reset error
blk-mq: insert rq with DONTPREP to hctx dispatch list when requeue
| -rw-r--r-- | block/blk-mq.c | 12 | ||||
| -rw-r--r-- | drivers/block/floppy.c | 2 | ||||
| -rw-r--r-- | drivers/md/raid1.c | 28 | ||||
| -rw-r--r-- | drivers/nvme/host/pci.c | 8 |
4 files changed, 34 insertions, 16 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 8f5b533764ca..9437a5eb07cf 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -737,12 +737,20 @@ static void blk_mq_requeue_work(struct work_struct *work) | |||
| 737 | spin_unlock_irq(&q->requeue_lock); | 737 | spin_unlock_irq(&q->requeue_lock); |
| 738 | 738 | ||
| 739 | list_for_each_entry_safe(rq, next, &rq_list, queuelist) { | 739 | list_for_each_entry_safe(rq, next, &rq_list, queuelist) { |
| 740 | if (!(rq->rq_flags & RQF_SOFTBARRIER)) | 740 | if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) |
| 741 | continue; | 741 | continue; |
| 742 | 742 | ||
| 743 | rq->rq_flags &= ~RQF_SOFTBARRIER; | 743 | rq->rq_flags &= ~RQF_SOFTBARRIER; |
| 744 | list_del_init(&rq->queuelist); | 744 | list_del_init(&rq->queuelist); |
| 745 | blk_mq_sched_insert_request(rq, true, false, false); | 745 | /* |
| 746 | * If RQF_DONTPREP, rq has contained some driver specific | ||
| 747 | * data, so insert it to hctx dispatch list to avoid any | ||
| 748 | * merge. | ||
| 749 | */ | ||
| 750 | if (rq->rq_flags & RQF_DONTPREP) | ||
| 751 | blk_mq_request_bypass_insert(rq, false); | ||
| 752 | else | ||
| 753 | blk_mq_sched_insert_request(rq, true, false, false); | ||
| 746 | } | 754 | } |
| 747 | 755 | ||
| 748 | while (!list_empty(&rq_list)) { | 756 | while (!list_empty(&rq_list)) { |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 6f2856c6d0f2..55481b40df9a 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
| @@ -4075,7 +4075,7 @@ static unsigned int floppy_check_events(struct gendisk *disk, | |||
| 4075 | 4075 | ||
| 4076 | if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { | 4076 | if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { |
| 4077 | if (lock_fdc(drive)) | 4077 | if (lock_fdc(drive)) |
| 4078 | return -EINTR; | 4078 | return 0; |
| 4079 | poll_drive(false, 0); | 4079 | poll_drive(false, 0); |
| 4080 | process_fd_request(); | 4080 | process_fd_request(); |
| 4081 | } | 4081 | } |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1d54109071cc..fa47249fa3e4 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio) | |||
| 1863 | reschedule_retry(r1_bio); | 1863 | reschedule_retry(r1_bio); |
| 1864 | } | 1864 | } |
| 1865 | 1865 | ||
| 1866 | static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) | ||
| 1867 | { | ||
| 1868 | sector_t sync_blocks = 0; | ||
| 1869 | sector_t s = r1_bio->sector; | ||
| 1870 | long sectors_to_go = r1_bio->sectors; | ||
| 1871 | |||
| 1872 | /* make sure these bits don't get cleared. */ | ||
| 1873 | do { | ||
| 1874 | md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); | ||
| 1875 | s += sync_blocks; | ||
| 1876 | sectors_to_go -= sync_blocks; | ||
| 1877 | } while (sectors_to_go > 0); | ||
| 1878 | } | ||
| 1879 | |||
| 1866 | static void end_sync_write(struct bio *bio) | 1880 | static void end_sync_write(struct bio *bio) |
| 1867 | { | 1881 | { |
| 1868 | int uptodate = !bio->bi_status; | 1882 | int uptodate = !bio->bi_status; |
| @@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio) | |||
| 1874 | struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; | 1888 | struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; |
| 1875 | 1889 | ||
| 1876 | if (!uptodate) { | 1890 | if (!uptodate) { |
| 1877 | sector_t sync_blocks = 0; | 1891 | abort_sync_write(mddev, r1_bio); |
| 1878 | sector_t s = r1_bio->sector; | ||
| 1879 | long sectors_to_go = r1_bio->sectors; | ||
| 1880 | /* make sure these bits doesn't get cleared. */ | ||
| 1881 | do { | ||
| 1882 | md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); | ||
| 1883 | s += sync_blocks; | ||
| 1884 | sectors_to_go -= sync_blocks; | ||
| 1885 | } while (sectors_to_go > 0); | ||
| 1886 | set_bit(WriteErrorSeen, &rdev->flags); | 1892 | set_bit(WriteErrorSeen, &rdev->flags); |
| 1887 | if (!test_and_set_bit(WantReplacement, &rdev->flags)) | 1893 | if (!test_and_set_bit(WantReplacement, &rdev->flags)) |
| 1888 | set_bit(MD_RECOVERY_NEEDED, & | 1894 | set_bit(MD_RECOVERY_NEEDED, & |
| @@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) | |||
| 2172 | (i == r1_bio->read_disk || | 2178 | (i == r1_bio->read_disk || |
| 2173 | !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) | 2179 | !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) |
| 2174 | continue; | 2180 | continue; |
| 2175 | if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) | 2181 | if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { |
| 2182 | abort_sync_write(mddev, r1_bio); | ||
| 2176 | continue; | 2183 | continue; |
| 2184 | } | ||
| 2177 | 2185 | ||
| 2178 | bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); | 2186 | bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); |
| 2179 | if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) | 2187 | if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 022ea1ee63f8..7fee665ec45e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -2560,15 +2560,15 @@ static void nvme_reset_work(struct work_struct *work) | |||
| 2560 | mutex_lock(&dev->shutdown_lock); | 2560 | mutex_lock(&dev->shutdown_lock); |
| 2561 | result = nvme_pci_enable(dev); | 2561 | result = nvme_pci_enable(dev); |
| 2562 | if (result) | 2562 | if (result) |
| 2563 | goto out; | 2563 | goto out_unlock; |
| 2564 | 2564 | ||
| 2565 | result = nvme_pci_configure_admin_queue(dev); | 2565 | result = nvme_pci_configure_admin_queue(dev); |
| 2566 | if (result) | 2566 | if (result) |
| 2567 | goto out; | 2567 | goto out_unlock; |
| 2568 | 2568 | ||
| 2569 | result = nvme_alloc_admin_tags(dev); | 2569 | result = nvme_alloc_admin_tags(dev); |
| 2570 | if (result) | 2570 | if (result) |
| 2571 | goto out; | 2571 | goto out_unlock; |
| 2572 | 2572 | ||
| 2573 | /* | 2573 | /* |
| 2574 | * Limit the max command size to prevent iod->sg allocations going | 2574 | * Limit the max command size to prevent iod->sg allocations going |
| @@ -2651,6 +2651,8 @@ static void nvme_reset_work(struct work_struct *work) | |||
| 2651 | nvme_start_ctrl(&dev->ctrl); | 2651 | nvme_start_ctrl(&dev->ctrl); |
| 2652 | return; | 2652 | return; |
| 2653 | 2653 | ||
| 2654 | out_unlock: | ||
| 2655 | mutex_unlock(&dev->shutdown_lock); | ||
| 2654 | out: | 2656 | out: |
| 2655 | nvme_remove_dead_ctrl(dev, result); | 2657 | nvme_remove_dead_ctrl(dev, result); |
| 2656 | } | 2658 | } |
