diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-02-17 14:59:23 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-02-17 14:59:23 -0500 |
commit | 2850713576e81e3b887cd92a9965fba0dd1717c0 (patch) | |
tree | 59d445abedc8b3b667f9ed7ecf6480386bfb8a27 | |
parent | c28b947d048d6539389309af70be0ac599122ec0 (diff) | |
parent | 18f922d037211a15543af935861bf92161e697e9 (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
"A collection of fixes from the past few weeks that should go into 4.5.
This contains:
- Overflow fix for sysfs discard show function from Alan.
- A stacking limit init fix for max_dev_sectors, so we don't end up
artificially capping some use cases. From Keith.
- Have blk-mq proper end unstarted requests on a dying queue, instead
of pushing that to the driver. From Keith.
- NVMe:
- Update to Kconfig description for NVME_SCSI, since it was
vague and having it on is important for some SUSE distros.
From Christoph.
- Set of fixes from Keith, around surprise removal. Also kills
the no-merge flag, so it supports merging.
- Set of fixes for lightnvm from Matias, Javier, and Wenwei.
- Fix null_blk oops when asked for lightnvm, but not available. From
Matias.
- Copy-to-user EINTR fix from Hannes, fixing a case where SG_IO fails
if interrupted by a signal.
- Two floppy fixes from Jiri, fixing signal handling and blocking
open.
- A use-after-free fix for O_DIRECT, from Mike Krinkin.
- A block module ref count fix from Roman Pen.
- An fs IO wait accounting fix for O_DSYNC from Stephane Gasparini.
- Smaller reallo fix for xen-blkfront from Bob Liu.
- Removal of an unused struct member in the deadline IO scheduler,
from Tahsin.
- Also from Tahsin, properly initialize inode struct members
associated with cgroup writeback, if enabled.
- From Tejun, ensure that we keep the superblock pinned during cgroup
writeback"
* 'for-linus' of git://git.kernel.dk/linux-block: (25 commits)
blk: fix overflow in queue_discard_max_hw_show
writeback: initialize inode members that track writeback history
writeback: keep superblock pinned during cgroup writeback association switches
bio: return EINTR if copying to user space got interrupted
NVMe: Rate limit nvme IO warnings
NVMe: Poll device while still active during remove
NVMe: Requeue requests on suspended queues
NVMe: Allow request merges
NVMe: Fix io incapable return values
blk-mq: End unstarted requests on dying queue
block: Initialize max_dev_sectors to 0
null_blk: oops when initializing without lightnvm
block: fix module reference leak on put_disk() call for cgroups throttle
nvme: fix Kconfig description for BLK_DEV_NVME_SCSI
kernel/fs: fix I/O wait not accounted for RW O_DSYNC
floppy: refactor open() flags handling
lightnvm: allow to force mm initialization
lightnvm: check overflow and correct mlc pairs
lightnvm: fix request intersection locking in rrpc
lightnvm: warn if irqs are disabled in lock laddr
...
-rw-r--r-- | block/bio.c | 9 | ||||
-rw-r--r-- | block/blk-cgroup.c | 9 | ||||
-rw-r--r-- | block/blk-mq.c | 6 | ||||
-rw-r--r-- | block/blk-settings.c | 4 | ||||
-rw-r--r-- | block/blk-sysfs.c | 5 | ||||
-rw-r--r-- | block/deadline-iosched.c | 3 | ||||
-rw-r--r-- | drivers/block/floppy.c | 67 | ||||
-rw-r--r-- | drivers/block/null_blk.c | 8 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 74 | ||||
-rw-r--r-- | drivers/lightnvm/core.c | 25 | ||||
-rw-r--r-- | drivers/lightnvm/rrpc.c | 4 | ||||
-rw-r--r-- | drivers/lightnvm/rrpc.h | 5 | ||||
-rw-r--r-- | drivers/nvme/host/Kconfig | 5 | ||||
-rw-r--r-- | drivers/nvme/host/core.c | 1 | ||||
-rw-r--r-- | drivers/nvme/host/lightnvm.c | 12 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 4 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 13 | ||||
-rw-r--r-- | fs/direct-io.c | 2 | ||||
-rw-r--r-- | fs/fs-writeback.c | 15 | ||||
-rw-r--r-- | fs/inode.c | 6 | ||||
-rw-r--r-- | include/linux/lightnvm.h | 4 |
21 files changed, 175 insertions, 106 deletions
diff --git a/block/bio.c b/block/bio.c index dbabd48b1934..cf7591551b17 100644 --- a/block/bio.c +++ b/block/bio.c | |||
@@ -874,7 +874,7 @@ int submit_bio_wait(int rw, struct bio *bio) | |||
874 | bio->bi_private = &ret; | 874 | bio->bi_private = &ret; |
875 | bio->bi_end_io = submit_bio_wait_endio; | 875 | bio->bi_end_io = submit_bio_wait_endio; |
876 | submit_bio(rw, bio); | 876 | submit_bio(rw, bio); |
877 | wait_for_completion(&ret.event); | 877 | wait_for_completion_io(&ret.event); |
878 | 878 | ||
879 | return ret.error; | 879 | return ret.error; |
880 | } | 880 | } |
@@ -1090,9 +1090,12 @@ int bio_uncopy_user(struct bio *bio) | |||
1090 | if (!bio_flagged(bio, BIO_NULL_MAPPED)) { | 1090 | if (!bio_flagged(bio, BIO_NULL_MAPPED)) { |
1091 | /* | 1091 | /* |
1092 | * if we're in a workqueue, the request is orphaned, so | 1092 | * if we're in a workqueue, the request is orphaned, so |
1093 | * don't copy into a random user address space, just free. | 1093 | * don't copy into a random user address space, just free |
1094 | * and return -EINTR so user space doesn't expect any data. | ||
1094 | */ | 1095 | */ |
1095 | if (current->mm && bio_data_dir(bio) == READ) | 1096 | if (!current->mm) |
1097 | ret = -EINTR; | ||
1098 | else if (bio_data_dir(bio) == READ) | ||
1096 | ret = bio_copy_to_iter(bio, bmd->iter); | 1099 | ret = bio_copy_to_iter(bio, bmd->iter); |
1097 | if (bmd->is_our_pages) | 1100 | if (bmd->is_our_pages) |
1098 | bio_free_pages(bio); | 1101 | bio_free_pages(bio); |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 5a37188b559f..66e6f1aae02e 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -788,6 +788,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | |||
788 | { | 788 | { |
789 | struct gendisk *disk; | 789 | struct gendisk *disk; |
790 | struct blkcg_gq *blkg; | 790 | struct blkcg_gq *blkg; |
791 | struct module *owner; | ||
791 | unsigned int major, minor; | 792 | unsigned int major, minor; |
792 | int key_len, part, ret; | 793 | int key_len, part, ret; |
793 | char *body; | 794 | char *body; |
@@ -804,7 +805,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | |||
804 | if (!disk) | 805 | if (!disk) |
805 | return -ENODEV; | 806 | return -ENODEV; |
806 | if (part) { | 807 | if (part) { |
808 | owner = disk->fops->owner; | ||
807 | put_disk(disk); | 809 | put_disk(disk); |
810 | module_put(owner); | ||
808 | return -ENODEV; | 811 | return -ENODEV; |
809 | } | 812 | } |
810 | 813 | ||
@@ -820,7 +823,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | |||
820 | ret = PTR_ERR(blkg); | 823 | ret = PTR_ERR(blkg); |
821 | rcu_read_unlock(); | 824 | rcu_read_unlock(); |
822 | spin_unlock_irq(disk->queue->queue_lock); | 825 | spin_unlock_irq(disk->queue->queue_lock); |
826 | owner = disk->fops->owner; | ||
823 | put_disk(disk); | 827 | put_disk(disk); |
828 | module_put(owner); | ||
824 | /* | 829 | /* |
825 | * If queue was bypassing, we should retry. Do so after a | 830 | * If queue was bypassing, we should retry. Do so after a |
826 | * short msleep(). It isn't strictly necessary but queue | 831 | * short msleep(). It isn't strictly necessary but queue |
@@ -851,9 +856,13 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep); | |||
851 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) | 856 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
852 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) | 857 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) |
853 | { | 858 | { |
859 | struct module *owner; | ||
860 | |||
854 | spin_unlock_irq(ctx->disk->queue->queue_lock); | 861 | spin_unlock_irq(ctx->disk->queue->queue_lock); |
855 | rcu_read_unlock(); | 862 | rcu_read_unlock(); |
863 | owner = ctx->disk->fops->owner; | ||
856 | put_disk(ctx->disk); | 864 | put_disk(ctx->disk); |
865 | module_put(owner); | ||
857 | } | 866 | } |
858 | EXPORT_SYMBOL_GPL(blkg_conf_finish); | 867 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
859 | 868 | ||
diff --git a/block/blk-mq.c b/block/blk-mq.c index 4c0622fae413..56c0a726b619 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -599,8 +599,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, | |||
599 | * If a request wasn't started before the queue was | 599 | * If a request wasn't started before the queue was |
600 | * marked dying, kill it here or it'll go unnoticed. | 600 | * marked dying, kill it here or it'll go unnoticed. |
601 | */ | 601 | */ |
602 | if (unlikely(blk_queue_dying(rq->q))) | 602 | if (unlikely(blk_queue_dying(rq->q))) { |
603 | blk_mq_complete_request(rq, -EIO); | 603 | rq->errors = -EIO; |
604 | blk_mq_end_request(rq, rq->errors); | ||
605 | } | ||
604 | return; | 606 | return; |
605 | } | 607 | } |
606 | 608 | ||
diff --git a/block/blk-settings.c b/block/blk-settings.c index dd4973583978..c7bb666aafd1 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -91,8 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim) | |||
91 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | 91 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
92 | lim->virt_boundary_mask = 0; | 92 | lim->virt_boundary_mask = 0; |
93 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; | 93 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
94 | lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors = | 94 | lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; |
95 | BLK_SAFE_MAX_SECTORS; | 95 | lim->max_dev_sectors = 0; |
96 | lim->chunk_sectors = 0; | 96 | lim->chunk_sectors = 0; |
97 | lim->max_write_same_sectors = 0; | 97 | lim->max_write_same_sectors = 0; |
98 | lim->max_discard_sectors = 0; | 98 | lim->max_discard_sectors = 0; |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index e140cc487ce1..dd93763057ce 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -147,10 +147,9 @@ static ssize_t queue_discard_granularity_show(struct request_queue *q, char *pag | |||
147 | 147 | ||
148 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) | 148 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) |
149 | { | 149 | { |
150 | unsigned long long val; | ||
151 | 150 | ||
152 | val = q->limits.max_hw_discard_sectors << 9; | 151 | return sprintf(page, "%llu\n", |
153 | return sprintf(page, "%llu\n", val); | 152 | (unsigned long long)q->limits.max_hw_discard_sectors << 9); |
154 | } | 153 | } |
155 | 154 | ||
156 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) | 155 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index a753df2b3fc2..d0dd7882d8c7 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
@@ -39,7 +39,6 @@ struct deadline_data { | |||
39 | */ | 39 | */ |
40 | struct request *next_rq[2]; | 40 | struct request *next_rq[2]; |
41 | unsigned int batching; /* number of sequential requests made */ | 41 | unsigned int batching; /* number of sequential requests made */ |
42 | sector_t last_sector; /* head position */ | ||
43 | unsigned int starved; /* times reads have starved writes */ | 42 | unsigned int starved; /* times reads have starved writes */ |
44 | 43 | ||
45 | /* | 44 | /* |
@@ -210,8 +209,6 @@ deadline_move_request(struct deadline_data *dd, struct request *rq) | |||
210 | dd->next_rq[WRITE] = NULL; | 209 | dd->next_rq[WRITE] = NULL; |
211 | dd->next_rq[data_dir] = deadline_latter_request(rq); | 210 | dd->next_rq[data_dir] = deadline_latter_request(rq); |
212 | 211 | ||
213 | dd->last_sector = rq_end_sector(rq); | ||
214 | |||
215 | /* | 212 | /* |
216 | * take it off the sort and fifo list, move | 213 | * take it off the sort and fifo list, move |
217 | * to dispatch queue | 214 | * to dispatch queue |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 9e251201dd48..84708a5f8c52 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -866,7 +866,7 @@ static void set_fdc(int drive) | |||
866 | } | 866 | } |
867 | 867 | ||
868 | /* locks the driver */ | 868 | /* locks the driver */ |
869 | static int lock_fdc(int drive, bool interruptible) | 869 | static int lock_fdc(int drive) |
870 | { | 870 | { |
871 | if (WARN(atomic_read(&usage_count) == 0, | 871 | if (WARN(atomic_read(&usage_count) == 0, |
872 | "Trying to lock fdc while usage count=0\n")) | 872 | "Trying to lock fdc while usage count=0\n")) |
@@ -2173,7 +2173,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req) | |||
2173 | { | 2173 | { |
2174 | int ret; | 2174 | int ret; |
2175 | 2175 | ||
2176 | if (lock_fdc(drive, true)) | 2176 | if (lock_fdc(drive)) |
2177 | return -EINTR; | 2177 | return -EINTR; |
2178 | 2178 | ||
2179 | set_floppy(drive); | 2179 | set_floppy(drive); |
@@ -2960,7 +2960,7 @@ static int user_reset_fdc(int drive, int arg, bool interruptible) | |||
2960 | { | 2960 | { |
2961 | int ret; | 2961 | int ret; |
2962 | 2962 | ||
2963 | if (lock_fdc(drive, interruptible)) | 2963 | if (lock_fdc(drive)) |
2964 | return -EINTR; | 2964 | return -EINTR; |
2965 | 2965 | ||
2966 | if (arg == FD_RESET_ALWAYS) | 2966 | if (arg == FD_RESET_ALWAYS) |
@@ -3243,7 +3243,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, | |||
3243 | if (!capable(CAP_SYS_ADMIN)) | 3243 | if (!capable(CAP_SYS_ADMIN)) |
3244 | return -EPERM; | 3244 | return -EPERM; |
3245 | mutex_lock(&open_lock); | 3245 | mutex_lock(&open_lock); |
3246 | if (lock_fdc(drive, true)) { | 3246 | if (lock_fdc(drive)) { |
3247 | mutex_unlock(&open_lock); | 3247 | mutex_unlock(&open_lock); |
3248 | return -EINTR; | 3248 | return -EINTR; |
3249 | } | 3249 | } |
@@ -3263,7 +3263,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, | |||
3263 | } else { | 3263 | } else { |
3264 | int oldStretch; | 3264 | int oldStretch; |
3265 | 3265 | ||
3266 | if (lock_fdc(drive, true)) | 3266 | if (lock_fdc(drive)) |
3267 | return -EINTR; | 3267 | return -EINTR; |
3268 | if (cmd != FDDEFPRM) { | 3268 | if (cmd != FDDEFPRM) { |
3269 | /* notice a disk change immediately, else | 3269 | /* notice a disk change immediately, else |
@@ -3349,7 +3349,7 @@ static int get_floppy_geometry(int drive, int type, struct floppy_struct **g) | |||
3349 | if (type) | 3349 | if (type) |
3350 | *g = &floppy_type[type]; | 3350 | *g = &floppy_type[type]; |
3351 | else { | 3351 | else { |
3352 | if (lock_fdc(drive, false)) | 3352 | if (lock_fdc(drive)) |
3353 | return -EINTR; | 3353 | return -EINTR; |
3354 | if (poll_drive(false, 0) == -EINTR) | 3354 | if (poll_drive(false, 0) == -EINTR) |
3355 | return -EINTR; | 3355 | return -EINTR; |
@@ -3433,7 +3433,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int | |||
3433 | if (UDRS->fd_ref != 1) | 3433 | if (UDRS->fd_ref != 1) |
3434 | /* somebody else has this drive open */ | 3434 | /* somebody else has this drive open */ |
3435 | return -EBUSY; | 3435 | return -EBUSY; |
3436 | if (lock_fdc(drive, true)) | 3436 | if (lock_fdc(drive)) |
3437 | return -EINTR; | 3437 | return -EINTR; |
3438 | 3438 | ||
3439 | /* do the actual eject. Fails on | 3439 | /* do the actual eject. Fails on |
@@ -3445,7 +3445,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int | |||
3445 | process_fd_request(); | 3445 | process_fd_request(); |
3446 | return ret; | 3446 | return ret; |
3447 | case FDCLRPRM: | 3447 | case FDCLRPRM: |
3448 | if (lock_fdc(drive, true)) | 3448 | if (lock_fdc(drive)) |
3449 | return -EINTR; | 3449 | return -EINTR; |
3450 | current_type[drive] = NULL; | 3450 | current_type[drive] = NULL; |
3451 | floppy_sizes[drive] = MAX_DISK_SIZE << 1; | 3451 | floppy_sizes[drive] = MAX_DISK_SIZE << 1; |
@@ -3467,7 +3467,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int | |||
3467 | UDP->flags &= ~FTD_MSG; | 3467 | UDP->flags &= ~FTD_MSG; |
3468 | return 0; | 3468 | return 0; |
3469 | case FDFMTBEG: | 3469 | case FDFMTBEG: |
3470 | if (lock_fdc(drive, true)) | 3470 | if (lock_fdc(drive)) |
3471 | return -EINTR; | 3471 | return -EINTR; |
3472 | if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) | 3472 | if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) |
3473 | return -EINTR; | 3473 | return -EINTR; |
@@ -3484,7 +3484,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int | |||
3484 | return do_format(drive, &inparam.f); | 3484 | return do_format(drive, &inparam.f); |
3485 | case FDFMTEND: | 3485 | case FDFMTEND: |
3486 | case FDFLUSH: | 3486 | case FDFLUSH: |
3487 | if (lock_fdc(drive, true)) | 3487 | if (lock_fdc(drive)) |
3488 | return -EINTR; | 3488 | return -EINTR; |
3489 | return invalidate_drive(bdev); | 3489 | return invalidate_drive(bdev); |
3490 | case FDSETEMSGTRESH: | 3490 | case FDSETEMSGTRESH: |
@@ -3507,7 +3507,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int | |||
3507 | outparam = UDP; | 3507 | outparam = UDP; |
3508 | break; | 3508 | break; |
3509 | case FDPOLLDRVSTAT: | 3509 | case FDPOLLDRVSTAT: |
3510 | if (lock_fdc(drive, true)) | 3510 | if (lock_fdc(drive)) |
3511 | return -EINTR; | 3511 | return -EINTR; |
3512 | if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) | 3512 | if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) |
3513 | return -EINTR; | 3513 | return -EINTR; |
@@ -3530,7 +3530,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int | |||
3530 | case FDRAWCMD: | 3530 | case FDRAWCMD: |
3531 | if (type) | 3531 | if (type) |
3532 | return -EINVAL; | 3532 | return -EINVAL; |
3533 | if (lock_fdc(drive, true)) | 3533 | if (lock_fdc(drive)) |
3534 | return -EINTR; | 3534 | return -EINTR; |
3535 | set_floppy(drive); | 3535 | set_floppy(drive); |
3536 | i = raw_cmd_ioctl(cmd, (void __user *)param); | 3536 | i = raw_cmd_ioctl(cmd, (void __user *)param); |
@@ -3539,7 +3539,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int | |||
3539 | process_fd_request(); | 3539 | process_fd_request(); |
3540 | return i; | 3540 | return i; |
3541 | case FDTWADDLE: | 3541 | case FDTWADDLE: |
3542 | if (lock_fdc(drive, true)) | 3542 | if (lock_fdc(drive)) |
3543 | return -EINTR; | 3543 | return -EINTR; |
3544 | twaddle(); | 3544 | twaddle(); |
3545 | process_fd_request(); | 3545 | process_fd_request(); |
@@ -3663,6 +3663,11 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) | |||
3663 | 3663 | ||
3664 | opened_bdev[drive] = bdev; | 3664 | opened_bdev[drive] = bdev; |
3665 | 3665 | ||
3666 | if (!(mode & (FMODE_READ|FMODE_WRITE))) { | ||
3667 | res = -EINVAL; | ||
3668 | goto out; | ||
3669 | } | ||
3670 | |||
3666 | res = -ENXIO; | 3671 | res = -ENXIO; |
3667 | 3672 | ||
3668 | if (!floppy_track_buffer) { | 3673 | if (!floppy_track_buffer) { |
@@ -3706,21 +3711,20 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) | |||
3706 | if (UFDCS->rawcmd == 1) | 3711 | if (UFDCS->rawcmd == 1) |
3707 | UFDCS->rawcmd = 2; | 3712 | UFDCS->rawcmd = 2; |
3708 | 3713 | ||
3709 | if (!(mode & FMODE_NDELAY)) { | 3714 | UDRS->last_checked = 0; |
3710 | if (mode & (FMODE_READ|FMODE_WRITE)) { | 3715 | clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); |
3711 | UDRS->last_checked = 0; | 3716 | check_disk_change(bdev); |
3712 | clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); | 3717 | if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) |
3713 | check_disk_change(bdev); | 3718 | goto out; |
3714 | if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) | 3719 | if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) |
3715 | goto out; | 3720 | goto out; |
3716 | if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) | 3721 | |
3717 | goto out; | 3722 | res = -EROFS; |
3718 | } | 3723 | |
3719 | res = -EROFS; | 3724 | if ((mode & FMODE_WRITE) && |
3720 | if ((mode & FMODE_WRITE) && | 3725 | !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) |
3721 | !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) | 3726 | goto out; |
3722 | goto out; | 3727 | |
3723 | } | ||
3724 | mutex_unlock(&open_lock); | 3728 | mutex_unlock(&open_lock); |
3725 | mutex_unlock(&floppy_mutex); | 3729 | mutex_unlock(&floppy_mutex); |
3726 | return 0; | 3730 | return 0; |
@@ -3748,7 +3752,8 @@ static unsigned int floppy_check_events(struct gendisk *disk, | |||
3748 | return DISK_EVENT_MEDIA_CHANGE; | 3752 | return DISK_EVENT_MEDIA_CHANGE; |
3749 | 3753 | ||
3750 | if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { | 3754 | if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { |
3751 | lock_fdc(drive, false); | 3755 | if (lock_fdc(drive)) |
3756 | return -EINTR; | ||
3752 | poll_drive(false, 0); | 3757 | poll_drive(false, 0); |
3753 | process_fd_request(); | 3758 | process_fd_request(); |
3754 | } | 3759 | } |
@@ -3847,7 +3852,9 @@ static int floppy_revalidate(struct gendisk *disk) | |||
3847 | "VFS: revalidate called on non-open device.\n")) | 3852 | "VFS: revalidate called on non-open device.\n")) |
3848 | return -EFAULT; | 3853 | return -EFAULT; |
3849 | 3854 | ||
3850 | lock_fdc(drive, false); | 3855 | res = lock_fdc(drive); |
3856 | if (res) | ||
3857 | return res; | ||
3851 | cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || | 3858 | cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || |
3852 | test_bit(FD_VERIFY_BIT, &UDRS->flags)); | 3859 | test_bit(FD_VERIFY_BIT, &UDRS->flags)); |
3853 | if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) { | 3860 | if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) { |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 8ba1e97d573c..64a7b5971b57 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -478,7 +478,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) | |||
478 | id->ver_id = 0x1; | 478 | id->ver_id = 0x1; |
479 | id->vmnt = 0; | 479 | id->vmnt = 0; |
480 | id->cgrps = 1; | 480 | id->cgrps = 1; |
481 | id->cap = 0x3; | 481 | id->cap = 0x2; |
482 | id->dom = 0x1; | 482 | id->dom = 0x1; |
483 | 483 | ||
484 | id->ppaf.blk_offset = 0; | 484 | id->ppaf.blk_offset = 0; |
@@ -707,9 +707,7 @@ static int null_add_dev(void) | |||
707 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); | 707 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); |
708 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); | 708 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); |
709 | 709 | ||
710 | |||
711 | mutex_lock(&lock); | 710 | mutex_lock(&lock); |
712 | list_add_tail(&nullb->list, &nullb_list); | ||
713 | nullb->index = nullb_indexes++; | 711 | nullb->index = nullb_indexes++; |
714 | mutex_unlock(&lock); | 712 | mutex_unlock(&lock); |
715 | 713 | ||
@@ -743,6 +741,10 @@ static int null_add_dev(void) | |||
743 | strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); | 741 | strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); |
744 | 742 | ||
745 | add_disk(disk); | 743 | add_disk(disk); |
744 | |||
745 | mutex_lock(&lock); | ||
746 | list_add_tail(&nullb->list, &nullb_list); | ||
747 | mutex_unlock(&lock); | ||
746 | done: | 748 | done: |
747 | return 0; | 749 | return 0; |
748 | 750 | ||
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8a8dc91c39f7..83eb9e6bf8b0 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -1873,6 +1873,43 @@ again: | |||
1873 | return err; | 1873 | return err; |
1874 | } | 1874 | } |
1875 | 1875 | ||
1876 | static int negotiate_mq(struct blkfront_info *info) | ||
1877 | { | ||
1878 | unsigned int backend_max_queues = 0; | ||
1879 | int err; | ||
1880 | unsigned int i; | ||
1881 | |||
1882 | BUG_ON(info->nr_rings); | ||
1883 | |||
1884 | /* Check if backend supports multiple queues. */ | ||
1885 | err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, | ||
1886 | "multi-queue-max-queues", "%u", &backend_max_queues); | ||
1887 | if (err < 0) | ||
1888 | backend_max_queues = 1; | ||
1889 | |||
1890 | info->nr_rings = min(backend_max_queues, xen_blkif_max_queues); | ||
1891 | /* We need at least one ring. */ | ||
1892 | if (!info->nr_rings) | ||
1893 | info->nr_rings = 1; | ||
1894 | |||
1895 | info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL); | ||
1896 | if (!info->rinfo) { | ||
1897 | xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); | ||
1898 | return -ENOMEM; | ||
1899 | } | ||
1900 | |||
1901 | for (i = 0; i < info->nr_rings; i++) { | ||
1902 | struct blkfront_ring_info *rinfo; | ||
1903 | |||
1904 | rinfo = &info->rinfo[i]; | ||
1905 | INIT_LIST_HEAD(&rinfo->indirect_pages); | ||
1906 | INIT_LIST_HEAD(&rinfo->grants); | ||
1907 | rinfo->dev_info = info; | ||
1908 | INIT_WORK(&rinfo->work, blkif_restart_queue); | ||
1909 | spin_lock_init(&rinfo->ring_lock); | ||
1910 | } | ||
1911 | return 0; | ||
1912 | } | ||
1876 | /** | 1913 | /** |
1877 | * Entry point to this code when a new device is created. Allocate the basic | 1914 | * Entry point to this code when a new device is created. Allocate the basic |
1878 | * structures and the ring buffer for communication with the backend, and | 1915 | * structures and the ring buffer for communication with the backend, and |
@@ -1883,9 +1920,7 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
1883 | const struct xenbus_device_id *id) | 1920 | const struct xenbus_device_id *id) |
1884 | { | 1921 | { |
1885 | int err, vdevice; | 1922 | int err, vdevice; |
1886 | unsigned int r_index; | ||
1887 | struct blkfront_info *info; | 1923 | struct blkfront_info *info; |
1888 | unsigned int backend_max_queues = 0; | ||
1889 | 1924 | ||
1890 | /* FIXME: Use dynamic device id if this is not set. */ | 1925 | /* FIXME: Use dynamic device id if this is not set. */ |
1891 | err = xenbus_scanf(XBT_NIL, dev->nodename, | 1926 | err = xenbus_scanf(XBT_NIL, dev->nodename, |
@@ -1936,33 +1971,10 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
1936 | } | 1971 | } |
1937 | 1972 | ||
1938 | info->xbdev = dev; | 1973 | info->xbdev = dev; |
1939 | /* Check if backend supports multiple queues. */ | 1974 | err = negotiate_mq(info); |
1940 | err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, | 1975 | if (err) { |
1941 | "multi-queue-max-queues", "%u", &backend_max_queues); | ||
1942 | if (err < 0) | ||
1943 | backend_max_queues = 1; | ||
1944 | |||
1945 | info->nr_rings = min(backend_max_queues, xen_blkif_max_queues); | ||
1946 | /* We need at least one ring. */ | ||
1947 | if (!info->nr_rings) | ||
1948 | info->nr_rings = 1; | ||
1949 | |||
1950 | info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL); | ||
1951 | if (!info->rinfo) { | ||
1952 | xenbus_dev_fatal(dev, -ENOMEM, "allocating ring_info structure"); | ||
1953 | kfree(info); | 1976 | kfree(info); |
1954 | return -ENOMEM; | 1977 | return err; |
1955 | } | ||
1956 | |||
1957 | for (r_index = 0; r_index < info->nr_rings; r_index++) { | ||
1958 | struct blkfront_ring_info *rinfo; | ||
1959 | |||
1960 | rinfo = &info->rinfo[r_index]; | ||
1961 | INIT_LIST_HEAD(&rinfo->indirect_pages); | ||
1962 | INIT_LIST_HEAD(&rinfo->grants); | ||
1963 | rinfo->dev_info = info; | ||
1964 | INIT_WORK(&rinfo->work, blkif_restart_queue); | ||
1965 | spin_lock_init(&rinfo->ring_lock); | ||
1966 | } | 1978 | } |
1967 | 1979 | ||
1968 | mutex_init(&info->mutex); | 1980 | mutex_init(&info->mutex); |
@@ -2123,12 +2135,16 @@ static int blkif_recover(struct blkfront_info *info) | |||
2123 | static int blkfront_resume(struct xenbus_device *dev) | 2135 | static int blkfront_resume(struct xenbus_device *dev) |
2124 | { | 2136 | { |
2125 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); | 2137 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); |
2126 | int err; | 2138 | int err = 0; |
2127 | 2139 | ||
2128 | dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); | 2140 | dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); |
2129 | 2141 | ||
2130 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); | 2142 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); |
2131 | 2143 | ||
2144 | err = negotiate_mq(info); | ||
2145 | if (err) | ||
2146 | return err; | ||
2147 | |||
2132 | err = talk_to_blkback(dev, info); | 2148 | err = talk_to_blkback(dev, info); |
2133 | 2149 | ||
2134 | /* | 2150 | /* |
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 33224cb91c5b..9f6acd5d1d2e 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c | |||
@@ -572,11 +572,13 @@ int nvm_register(struct request_queue *q, char *disk_name, | |||
572 | } | 572 | } |
573 | } | 573 | } |
574 | 574 | ||
575 | ret = nvm_get_sysblock(dev, &dev->sb); | 575 | if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) { |
576 | if (!ret) | 576 | ret = nvm_get_sysblock(dev, &dev->sb); |
577 | pr_err("nvm: device not initialized.\n"); | 577 | if (!ret) |
578 | else if (ret < 0) | 578 | pr_err("nvm: device not initialized.\n"); |
579 | pr_err("nvm: err (%d) on device initialization\n", ret); | 579 | else if (ret < 0) |
580 | pr_err("nvm: err (%d) on device initialization\n", ret); | ||
581 | } | ||
580 | 582 | ||
581 | /* register device with a supported media manager */ | 583 | /* register device with a supported media manager */ |
582 | down_write(&nvm_lock); | 584 | down_write(&nvm_lock); |
@@ -1055,9 +1057,11 @@ static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init) | |||
1055 | strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN); | 1057 | strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN); |
1056 | info.fs_ppa.ppa = -1; | 1058 | info.fs_ppa.ppa = -1; |
1057 | 1059 | ||
1058 | ret = nvm_init_sysblock(dev, &info); | 1060 | if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) { |
1059 | if (ret) | 1061 | ret = nvm_init_sysblock(dev, &info); |
1060 | return ret; | 1062 | if (ret) |
1063 | return ret; | ||
1064 | } | ||
1061 | 1065 | ||
1062 | memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info)); | 1066 | memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info)); |
1063 | 1067 | ||
@@ -1117,7 +1121,10 @@ static long nvm_ioctl_dev_factory(struct file *file, void __user *arg) | |||
1117 | dev->mt = NULL; | 1121 | dev->mt = NULL; |
1118 | } | 1122 | } |
1119 | 1123 | ||
1120 | return nvm_dev_factory(dev, fact.flags); | 1124 | if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) |
1125 | return nvm_dev_factory(dev, fact.flags); | ||
1126 | |||
1127 | return 0; | ||
1121 | } | 1128 | } |
1122 | 1129 | ||
1123 | static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) | 1130 | static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) |
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index d8c75958ced3..307db1ea22de 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c | |||
@@ -300,8 +300,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) | |||
300 | } | 300 | } |
301 | 301 | ||
302 | page = mempool_alloc(rrpc->page_pool, GFP_NOIO); | 302 | page = mempool_alloc(rrpc->page_pool, GFP_NOIO); |
303 | if (!page) | 303 | if (!page) { |
304 | bio_put(bio); | ||
304 | return -ENOMEM; | 305 | return -ENOMEM; |
306 | } | ||
305 | 307 | ||
306 | while ((slot = find_first_zero_bit(rblk->invalid_pages, | 308 | while ((slot = find_first_zero_bit(rblk->invalid_pages, |
307 | nr_pgs_per_blk)) < nr_pgs_per_blk) { | 309 | nr_pgs_per_blk)) < nr_pgs_per_blk) { |
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h index ef13ac7700c8..f7b37336353f 100644 --- a/drivers/lightnvm/rrpc.h +++ b/drivers/lightnvm/rrpc.h | |||
@@ -174,8 +174,7 @@ static inline sector_t rrpc_get_sector(sector_t laddr) | |||
174 | static inline int request_intersects(struct rrpc_inflight_rq *r, | 174 | static inline int request_intersects(struct rrpc_inflight_rq *r, |
175 | sector_t laddr_start, sector_t laddr_end) | 175 | sector_t laddr_start, sector_t laddr_end) |
176 | { | 176 | { |
177 | return (laddr_end >= r->l_start && laddr_end <= r->l_end) && | 177 | return (laddr_end >= r->l_start) && (laddr_start <= r->l_end); |
178 | (laddr_start >= r->l_start && laddr_start <= r->l_end); | ||
179 | } | 178 | } |
180 | 179 | ||
181 | static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr, | 180 | static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr, |
@@ -184,6 +183,8 @@ static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr, | |||
184 | sector_t laddr_end = laddr + pages - 1; | 183 | sector_t laddr_end = laddr + pages - 1; |
185 | struct rrpc_inflight_rq *rtmp; | 184 | struct rrpc_inflight_rq *rtmp; |
186 | 185 | ||
186 | WARN_ON(irqs_disabled()); | ||
187 | |||
187 | spin_lock_irq(&rrpc->inflights.lock); | 188 | spin_lock_irq(&rrpc->inflights.lock); |
188 | list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) { | 189 | list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) { |
189 | if (unlikely(request_intersects(rtmp, laddr, laddr_end))) { | 190 | if (unlikely(request_intersects(rtmp, laddr, laddr_end))) { |
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index 5d6237391dcd..b586d84f2518 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig | |||
@@ -17,5 +17,6 @@ config BLK_DEV_NVME_SCSI | |||
17 | and block devices nodes, as well a a translation for a small | 17 | and block devices nodes, as well a a translation for a small |
18 | number of selected SCSI commands to NVMe commands to the NVMe | 18 | number of selected SCSI commands to NVMe commands to the NVMe |
19 | driver. If you don't know what this means you probably want | 19 | driver. If you don't know what this means you probably want |
20 | to say N here, and if you know what it means you probably | 20 | to say N here, unless you run a distro that abuses the SCSI |
21 | want to say N as well. | 21 | emulation to provide stable device names for mount by id, like |
22 | some OpenSuSE and SLES versions. | ||
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c5bf001af559..3cd921e6121e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -1121,7 +1121,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) | |||
1121 | ns->queue = blk_mq_init_queue(ctrl->tagset); | 1121 | ns->queue = blk_mq_init_queue(ctrl->tagset); |
1122 | if (IS_ERR(ns->queue)) | 1122 | if (IS_ERR(ns->queue)) |
1123 | goto out_free_ns; | 1123 | goto out_free_ns; |
1124 | queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); | ||
1125 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); | 1124 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); |
1126 | ns->queue->queuedata = ns; | 1125 | ns->queue->queuedata = ns; |
1127 | ns->ctrl = ctrl; | 1126 | ns->ctrl = ctrl; |
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 5cd3725e2fa4..6bb15e4926dc 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c | |||
@@ -146,9 +146,10 @@ struct nvme_nvm_command { | |||
146 | }; | 146 | }; |
147 | }; | 147 | }; |
148 | 148 | ||
149 | #define NVME_NVM_LP_MLC_PAIRS 886 | ||
149 | struct nvme_nvm_lp_mlc { | 150 | struct nvme_nvm_lp_mlc { |
150 | __u16 num_pairs; | 151 | __u16 num_pairs; |
151 | __u8 pairs[886]; | 152 | __u8 pairs[NVME_NVM_LP_MLC_PAIRS]; |
152 | }; | 153 | }; |
153 | 154 | ||
154 | struct nvme_nvm_lp_tbl { | 155 | struct nvme_nvm_lp_tbl { |
@@ -282,9 +283,14 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) | |||
282 | memcpy(dst->lptbl.id, src->lptbl.id, 8); | 283 | memcpy(dst->lptbl.id, src->lptbl.id, 8); |
283 | dst->lptbl.mlc.num_pairs = | 284 | dst->lptbl.mlc.num_pairs = |
284 | le16_to_cpu(src->lptbl.mlc.num_pairs); | 285 | le16_to_cpu(src->lptbl.mlc.num_pairs); |
285 | /* 4 bits per pair */ | 286 | |
287 | if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) { | ||
288 | pr_err("nvm: number of MLC pairs not supported\n"); | ||
289 | return -EINVAL; | ||
290 | } | ||
291 | |||
286 | memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs, | 292 | memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs, |
287 | dst->lptbl.mlc.num_pairs >> 1); | 293 | dst->lptbl.mlc.num_pairs); |
288 | } | 294 | } |
289 | } | 295 | } |
290 | 296 | ||
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 4fb5bb737868..9664d07d807d 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -139,9 +139,9 @@ static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl) | |||
139 | u32 val = 0; | 139 | u32 val = 0; |
140 | 140 | ||
141 | if (ctrl->ops->io_incapable(ctrl)) | 141 | if (ctrl->ops->io_incapable(ctrl)) |
142 | return false; | 142 | return true; |
143 | if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) | 143 | if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) |
144 | return false; | 144 | return true; |
145 | return val & NVME_CSTS_CFS; | 145 | return val & NVME_CSTS_CFS; |
146 | } | 146 | } |
147 | 147 | ||
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 72ef8322d32a..a128672472ec 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -678,6 +678,11 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
678 | blk_mq_start_request(req); | 678 | blk_mq_start_request(req); |
679 | 679 | ||
680 | spin_lock_irq(&nvmeq->q_lock); | 680 | spin_lock_irq(&nvmeq->q_lock); |
681 | if (unlikely(nvmeq->cq_vector < 0)) { | ||
682 | ret = BLK_MQ_RQ_QUEUE_BUSY; | ||
683 | spin_unlock_irq(&nvmeq->q_lock); | ||
684 | goto out; | ||
685 | } | ||
681 | __nvme_submit_cmd(nvmeq, &cmnd); | 686 | __nvme_submit_cmd(nvmeq, &cmnd); |
682 | nvme_process_cq(nvmeq); | 687 | nvme_process_cq(nvmeq); |
683 | spin_unlock_irq(&nvmeq->q_lock); | 688 | spin_unlock_irq(&nvmeq->q_lock); |
@@ -999,7 +1004,7 @@ static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved | |||
999 | if (!blk_mq_request_started(req)) | 1004 | if (!blk_mq_request_started(req)) |
1000 | return; | 1005 | return; |
1001 | 1006 | ||
1002 | dev_warn(nvmeq->q_dmadev, | 1007 | dev_dbg_ratelimited(nvmeq->q_dmadev, |
1003 | "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid); | 1008 | "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid); |
1004 | 1009 | ||
1005 | status = NVME_SC_ABORT_REQ; | 1010 | status = NVME_SC_ABORT_REQ; |
@@ -2111,16 +2116,12 @@ static void nvme_remove(struct pci_dev *pdev) | |||
2111 | { | 2116 | { |
2112 | struct nvme_dev *dev = pci_get_drvdata(pdev); | 2117 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
2113 | 2118 | ||
2114 | spin_lock(&dev_list_lock); | ||
2115 | list_del_init(&dev->node); | ||
2116 | spin_unlock(&dev_list_lock); | ||
2117 | |||
2118 | pci_set_drvdata(pdev, NULL); | 2119 | pci_set_drvdata(pdev, NULL); |
2119 | flush_work(&dev->reset_work); | ||
2120 | flush_work(&dev->scan_work); | 2120 | flush_work(&dev->scan_work); |
2121 | nvme_remove_namespaces(&dev->ctrl); | 2121 | nvme_remove_namespaces(&dev->ctrl); |
2122 | nvme_uninit_ctrl(&dev->ctrl); | 2122 | nvme_uninit_ctrl(&dev->ctrl); |
2123 | nvme_dev_disable(dev, true); | 2123 | nvme_dev_disable(dev, true); |
2124 | flush_work(&dev->reset_work); | ||
2124 | nvme_dev_remove_admin(dev); | 2125 | nvme_dev_remove_admin(dev); |
2125 | nvme_free_queues(dev, 0); | 2126 | nvme_free_queues(dev, 0); |
2126 | nvme_release_cmb(dev); | 2127 | nvme_release_cmb(dev); |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 1b2f7ffc8b84..d6a9012d42ad 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -472,8 +472,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio) | |||
472 | dio->io_error = -EIO; | 472 | dio->io_error = -EIO; |
473 | 473 | ||
474 | if (dio->is_async && dio->rw == READ && dio->should_dirty) { | 474 | if (dio->is_async && dio->rw == READ && dio->should_dirty) { |
475 | bio_check_pages_dirty(bio); /* transfers ownership */ | ||
476 | err = bio->bi_error; | 475 | err = bio->bi_error; |
476 | bio_check_pages_dirty(bio); /* transfers ownership */ | ||
477 | } else { | 477 | } else { |
478 | bio_for_each_segment_all(bvec, bio, i) { | 478 | bio_for_each_segment_all(bvec, bio, i) { |
479 | struct page *page = bvec->bv_page; | 479 | struct page *page = bvec->bv_page; |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 6915c950e6e8..1f76d8950a57 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -317,6 +317,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) | |||
317 | struct inode_switch_wbs_context *isw = | 317 | struct inode_switch_wbs_context *isw = |
318 | container_of(work, struct inode_switch_wbs_context, work); | 318 | container_of(work, struct inode_switch_wbs_context, work); |
319 | struct inode *inode = isw->inode; | 319 | struct inode *inode = isw->inode; |
320 | struct super_block *sb = inode->i_sb; | ||
320 | struct address_space *mapping = inode->i_mapping; | 321 | struct address_space *mapping = inode->i_mapping; |
321 | struct bdi_writeback *old_wb = inode->i_wb; | 322 | struct bdi_writeback *old_wb = inode->i_wb; |
322 | struct bdi_writeback *new_wb = isw->new_wb; | 323 | struct bdi_writeback *new_wb = isw->new_wb; |
@@ -423,6 +424,7 @@ skip_switch: | |||
423 | wb_put(new_wb); | 424 | wb_put(new_wb); |
424 | 425 | ||
425 | iput(inode); | 426 | iput(inode); |
427 | deactivate_super(sb); | ||
426 | kfree(isw); | 428 | kfree(isw); |
427 | } | 429 | } |
428 | 430 | ||
@@ -469,11 +471,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) | |||
469 | 471 | ||
470 | /* while holding I_WB_SWITCH, no one else can update the association */ | 472 | /* while holding I_WB_SWITCH, no one else can update the association */ |
471 | spin_lock(&inode->i_lock); | 473 | spin_lock(&inode->i_lock); |
474 | |||
472 | if (inode->i_state & (I_WB_SWITCH | I_FREEING) || | 475 | if (inode->i_state & (I_WB_SWITCH | I_FREEING) || |
473 | inode_to_wb(inode) == isw->new_wb) { | 476 | inode_to_wb(inode) == isw->new_wb) |
474 | spin_unlock(&inode->i_lock); | 477 | goto out_unlock; |
475 | goto out_free; | 478 | |
476 | } | 479 | if (!atomic_inc_not_zero(&inode->i_sb->s_active)) |
480 | goto out_unlock; | ||
481 | |||
477 | inode->i_state |= I_WB_SWITCH; | 482 | inode->i_state |= I_WB_SWITCH; |
478 | spin_unlock(&inode->i_lock); | 483 | spin_unlock(&inode->i_lock); |
479 | 484 | ||
@@ -489,6 +494,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) | |||
489 | call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); | 494 | call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); |
490 | return; | 495 | return; |
491 | 496 | ||
497 | out_unlock: | ||
498 | spin_unlock(&inode->i_lock); | ||
492 | out_free: | 499 | out_free: |
493 | if (isw->new_wb) | 500 | if (isw->new_wb) |
494 | wb_put(isw->new_wb); | 501 | wb_put(isw->new_wb); |
diff --git a/fs/inode.c b/fs/inode.c index 9f62db3bcc3e..69b8b526c194 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -154,6 +154,12 @@ int inode_init_always(struct super_block *sb, struct inode *inode) | |||
154 | inode->i_rdev = 0; | 154 | inode->i_rdev = 0; |
155 | inode->dirtied_when = 0; | 155 | inode->dirtied_when = 0; |
156 | 156 | ||
157 | #ifdef CONFIG_CGROUP_WRITEBACK | ||
158 | inode->i_wb_frn_winner = 0; | ||
159 | inode->i_wb_frn_avg_time = 0; | ||
160 | inode->i_wb_frn_history = 0; | ||
161 | #endif | ||
162 | |||
157 | if (security_inode_alloc(inode)) | 163 | if (security_inode_alloc(inode)) |
158 | goto out; | 164 | goto out; |
159 | spin_lock_init(&inode->i_lock); | 165 | spin_lock_init(&inode->i_lock); |
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index d6750111e48e..2190419bdf0a 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h | |||
@@ -135,6 +135,10 @@ enum { | |||
135 | /* Memory types */ | 135 | /* Memory types */ |
136 | NVM_ID_FMTYPE_SLC = 0, | 136 | NVM_ID_FMTYPE_SLC = 0, |
137 | NVM_ID_FMTYPE_MLC = 1, | 137 | NVM_ID_FMTYPE_MLC = 1, |
138 | |||
139 | /* Device capabilities */ | ||
140 | NVM_ID_DCAP_BBLKMGMT = 0x1, | ||
141 | NVM_UD_DCAP_ECC = 0x2, | ||
138 | }; | 142 | }; |
139 | 143 | ||
140 | struct nvm_id_lp_mlc { | 144 | struct nvm_id_lp_mlc { |