diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-12 18:54:30 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-12 18:54:30 -0500 |
| commit | 5e2078b2899da31ff4c9854e932b186685d5bea0 (patch) | |
| tree | 8184fc55e70840c5f7345044e4d2dc28cb12120f | |
| parent | a4d8c7c9f7754405c52c59e1b1e984df5749d7bb (diff) | |
| parent | e3a7a3bf362e2a8acc301e5eaec2631e740a8a95 (diff) | |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull misc block fixes from Jens Axboe:
"Stuff that got collected after the merge window opened. This
contains:
- NVMe:
- Fix for non-striped transfer size setting for NVMe from
Sathyavathi.
- (Some) support for the weird Apple nvme controller in the
macbooks. From Stephan Günther.
- The error value leak for dax from Al.
- A few minor blk-mq tweaks from me.
- Add the new linux-block@vger.kernel.org mailing list to the
MAINTAINERS file.
- Discard fix for brd, from Jan.
- A kerneldoc warning for block core from Randy.
- An older fix from Vivek, converting a WARN_ON() to a rate limited
printk when a device is hot removed with dirty inodes"
* 'for-linus' of git://git.kernel.dk/linux-block:
block: don't hardcode blk_qc_t -> tag mask
dax_io(): don't let non-error value escape via retval instead of EFAULT
block: fix blk-core.c kernel-doc warning
fs/block_dev.c: Remove WARN_ON() when inode writeback fails
NVMe: add support for Apple NVMe controller
NVMe: use split lo_hi_{read,write}q
blk-mq: mark __blk_mq_complete_request() static
MAINTAINERS: add reference to new linux-block list
NVMe: Increase the max transfer size when mdts is 0
brd: Refuse improperly aligned discard requests
| -rw-r--r-- | MAINTAINERS | 1 | ||||
| -rw-r--r-- | block/blk-core.c | 3 | ||||
| -rw-r--r-- | block/blk-mq.c | 2 | ||||
| -rw-r--r-- | block/blk-mq.h | 1 | ||||
| -rw-r--r-- | drivers/block/brd.c | 3 | ||||
| -rw-r--r-- | drivers/nvme/host/pci.c | 15 | ||||
| -rw-r--r-- | fs/block_dev.c | 15 | ||||
| -rw-r--r-- | fs/dax.c | 4 | ||||
| -rw-r--r-- | include/linux/blk_types.h | 2 |
9 files changed, 33 insertions, 13 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 8d941d6818cd..ecc43c255eb8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -2210,6 +2210,7 @@ F: drivers/leds/leds-blinkm.c | |||
| 2210 | 2210 | ||
| 2211 | BLOCK LAYER | 2211 | BLOCK LAYER |
| 2212 | M: Jens Axboe <axboe@kernel.dk> | 2212 | M: Jens Axboe <axboe@kernel.dk> |
| 2213 | L: linux-block@vger.kernel.org | ||
| 2213 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git | 2214 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git |
| 2214 | S: Maintained | 2215 | S: Maintained |
| 2215 | F: block/ | 2216 | F: block/ |
diff --git a/block/blk-core.c b/block/blk-core.c index 2bbf08cd2900..5131993b23a1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -1575,6 +1575,9 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, | |||
| 1575 | * @q: request_queue new bio is being queued at | 1575 | * @q: request_queue new bio is being queued at |
| 1576 | * @bio: new bio being queued | 1576 | * @bio: new bio being queued |
| 1577 | * @request_count: out parameter for number of traversed plugged requests | 1577 | * @request_count: out parameter for number of traversed plugged requests |
| 1578 | * @same_queue_rq: pointer to &struct request that gets filled in when | ||
| 1579 | * another request associated with @q is found on the plug list | ||
| 1580 | * (optional, may be %NULL) | ||
| 1578 | * | 1581 | * |
| 1579 | * Determine whether @bio being queued on @q can be merged with a request | 1582 | * Determine whether @bio being queued on @q can be merged with a request |
| 1580 | * on %current's plugged list. Returns %true if merge was successful, | 1583 | * on %current's plugged list. Returns %true if merge was successful, |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 86bd5b25288e..3ae09de62f19 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -358,7 +358,7 @@ static void blk_mq_ipi_complete_request(struct request *rq) | |||
| 358 | put_cpu(); | 358 | put_cpu(); |
| 359 | } | 359 | } |
| 360 | 360 | ||
| 361 | void __blk_mq_complete_request(struct request *rq) | 361 | static void __blk_mq_complete_request(struct request *rq) |
| 362 | { | 362 | { |
| 363 | struct request_queue *q = rq->q; | 363 | struct request_queue *q = rq->q; |
| 364 | 364 | ||
diff --git a/block/blk-mq.h b/block/blk-mq.h index b44dce165761..713820b47b31 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
| @@ -25,7 +25,6 @@ struct blk_mq_ctx { | |||
| 25 | struct kobject kobj; | 25 | struct kobject kobj; |
| 26 | } ____cacheline_aligned_in_smp; | 26 | } ____cacheline_aligned_in_smp; |
| 27 | 27 | ||
| 28 | void __blk_mq_complete_request(struct request *rq); | ||
| 29 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); | 28 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
| 30 | void blk_mq_freeze_queue(struct request_queue *q); | 29 | void blk_mq_freeze_queue(struct request_queue *q); |
| 31 | void blk_mq_free_queue(struct request_queue *q); | 30 | void blk_mq_free_queue(struct request_queue *q); |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index c9f9c30d6467..a5880f4ab40e 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
| @@ -337,6 +337,9 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) | |||
| 337 | goto io_error; | 337 | goto io_error; |
| 338 | 338 | ||
| 339 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { | 339 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { |
| 340 | if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) || | ||
| 341 | bio->bi_iter.bi_size & PAGE_MASK) | ||
| 342 | goto io_error; | ||
| 340 | discard_from_brd(brd, sector, bio->bi_iter.bi_size); | 343 | discard_from_brd(brd, sector, bio->bi_iter.bi_size); |
| 341 | goto out; | 344 | goto out; |
| 342 | } | 345 | } |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 3dfc28875cc3..8187df204695 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -1725,7 +1725,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | |||
| 1725 | { | 1725 | { |
| 1726 | int result; | 1726 | int result; |
| 1727 | u32 aqa; | 1727 | u32 aqa; |
| 1728 | u64 cap = readq(&dev->bar->cap); | 1728 | u64 cap = lo_hi_readq(&dev->bar->cap); |
| 1729 | struct nvme_queue *nvmeq; | 1729 | struct nvme_queue *nvmeq; |
| 1730 | unsigned page_shift = PAGE_SHIFT; | 1730 | unsigned page_shift = PAGE_SHIFT; |
| 1731 | unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; | 1731 | unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; |
| @@ -1774,8 +1774,8 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | |||
| 1774 | dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; | 1774 | dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; |
| 1775 | 1775 | ||
| 1776 | writel(aqa, &dev->bar->aqa); | 1776 | writel(aqa, &dev->bar->aqa); |
| 1777 | writeq(nvmeq->sq_dma_addr, &dev->bar->asq); | 1777 | lo_hi_writeq(nvmeq->sq_dma_addr, &dev->bar->asq); |
| 1778 | writeq(nvmeq->cq_dma_addr, &dev->bar->acq); | 1778 | lo_hi_writeq(nvmeq->cq_dma_addr, &dev->bar->acq); |
| 1779 | 1779 | ||
| 1780 | result = nvme_enable_ctrl(dev, cap); | 1780 | result = nvme_enable_ctrl(dev, cap); |
| 1781 | if (result) | 1781 | if (result) |
| @@ -2606,7 +2606,7 @@ static int nvme_dev_add(struct nvme_dev *dev) | |||
| 2606 | struct pci_dev *pdev = to_pci_dev(dev->dev); | 2606 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
| 2607 | int res; | 2607 | int res; |
| 2608 | struct nvme_id_ctrl *ctrl; | 2608 | struct nvme_id_ctrl *ctrl; |
| 2609 | int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; | 2609 | int shift = NVME_CAP_MPSMIN(lo_hi_readq(&dev->bar->cap)) + 12; |
| 2610 | 2610 | ||
| 2611 | res = nvme_identify_ctrl(dev, &ctrl); | 2611 | res = nvme_identify_ctrl(dev, &ctrl); |
| 2612 | if (res) { | 2612 | if (res) { |
| @@ -2622,6 +2622,8 @@ static int nvme_dev_add(struct nvme_dev *dev) | |||
| 2622 | memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); | 2622 | memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); |
| 2623 | if (ctrl->mdts) | 2623 | if (ctrl->mdts) |
| 2624 | dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); | 2624 | dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); |
| 2625 | else | ||
| 2626 | dev->max_hw_sectors = UINT_MAX; | ||
| 2625 | if ((pdev->vendor == PCI_VENDOR_ID_INTEL) && | 2627 | if ((pdev->vendor == PCI_VENDOR_ID_INTEL) && |
| 2626 | (pdev->device == 0x0953) && ctrl->vs[3]) { | 2628 | (pdev->device == 0x0953) && ctrl->vs[3]) { |
| 2627 | unsigned int max_hw_sectors; | 2629 | unsigned int max_hw_sectors; |
| @@ -2695,7 +2697,7 @@ static int nvme_dev_map(struct nvme_dev *dev) | |||
| 2695 | goto unmap; | 2697 | goto unmap; |
| 2696 | } | 2698 | } |
| 2697 | 2699 | ||
| 2698 | cap = readq(&dev->bar->cap); | 2700 | cap = lo_hi_readq(&dev->bar->cap); |
| 2699 | dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); | 2701 | dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); |
| 2700 | dev->db_stride = 1 << NVME_CAP_STRIDE(cap); | 2702 | dev->db_stride = 1 << NVME_CAP_STRIDE(cap); |
| 2701 | dev->dbs = ((void __iomem *)dev->bar) + 4096; | 2703 | dev->dbs = ((void __iomem *)dev->bar) + 4096; |
| @@ -2758,7 +2760,7 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev) | |||
| 2758 | * queues than admin tags. | 2760 | * queues than admin tags. |
| 2759 | */ | 2761 | */ |
| 2760 | set_current_state(TASK_RUNNING); | 2762 | set_current_state(TASK_RUNNING); |
| 2761 | nvme_disable_ctrl(dev, readq(&dev->bar->cap)); | 2763 | nvme_disable_ctrl(dev, lo_hi_readq(&dev->bar->cap)); |
| 2762 | nvme_clear_queue(dev->queues[0]); | 2764 | nvme_clear_queue(dev->queues[0]); |
| 2763 | flush_kthread_worker(dq->worker); | 2765 | flush_kthread_worker(dq->worker); |
| 2764 | nvme_disable_queue(dev, 0); | 2766 | nvme_disable_queue(dev, 0); |
| @@ -3401,6 +3403,7 @@ static const struct pci_error_handlers nvme_err_handler = { | |||
| 3401 | 3403 | ||
| 3402 | static const struct pci_device_id nvme_id_table[] = { | 3404 | static const struct pci_device_id nvme_id_table[] = { |
| 3403 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, | 3405 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, |
| 3406 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, | ||
| 3404 | { 0, } | 3407 | { 0, } |
| 3405 | }; | 3408 | }; |
| 3406 | MODULE_DEVICE_TABLE(pci, nvme_id_table); | 3409 | MODULE_DEVICE_TABLE(pci, nvme_id_table); |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 0a793c7930eb..bb0dfb1c7af1 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -50,12 +50,21 @@ struct block_device *I_BDEV(struct inode *inode) | |||
| 50 | } | 50 | } |
| 51 | EXPORT_SYMBOL(I_BDEV); | 51 | EXPORT_SYMBOL(I_BDEV); |
| 52 | 52 | ||
| 53 | static void bdev_write_inode(struct inode *inode) | 53 | static void bdev_write_inode(struct block_device *bdev) |
| 54 | { | 54 | { |
| 55 | struct inode *inode = bdev->bd_inode; | ||
| 56 | int ret; | ||
| 57 | |||
| 55 | spin_lock(&inode->i_lock); | 58 | spin_lock(&inode->i_lock); |
| 56 | while (inode->i_state & I_DIRTY) { | 59 | while (inode->i_state & I_DIRTY) { |
| 57 | spin_unlock(&inode->i_lock); | 60 | spin_unlock(&inode->i_lock); |
| 58 | WARN_ON_ONCE(write_inode_now(inode, true)); | 61 | ret = write_inode_now(inode, true); |
| 62 | if (ret) { | ||
| 63 | char name[BDEVNAME_SIZE]; | ||
| 64 | pr_warn_ratelimited("VFS: Dirty inode writeback failed " | ||
| 65 | "for block device %s (err=%d).\n", | ||
| 66 | bdevname(bdev, name), ret); | ||
| 67 | } | ||
| 59 | spin_lock(&inode->i_lock); | 68 | spin_lock(&inode->i_lock); |
| 60 | } | 69 | } |
| 61 | spin_unlock(&inode->i_lock); | 70 | spin_unlock(&inode->i_lock); |
| @@ -1504,7 +1513,7 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1504 | * ->release can cause the queue to disappear, so flush all | 1513 | * ->release can cause the queue to disappear, so flush all |
| 1505 | * dirty data before. | 1514 | * dirty data before. |
| 1506 | */ | 1515 | */ |
| 1507 | bdev_write_inode(bdev->bd_inode); | 1516 | bdev_write_inode(bdev); |
| 1508 | } | 1517 | } |
| 1509 | if (bdev->bd_contains == bdev) { | 1518 | if (bdev->bd_contains == bdev) { |
| 1510 | if (disk->fops->release) | 1519 | if (disk->fops->release) |
| @@ -174,8 +174,10 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, | |||
| 174 | else | 174 | else |
| 175 | len = iov_iter_zero(max - pos, iter); | 175 | len = iov_iter_zero(max - pos, iter); |
| 176 | 176 | ||
| 177 | if (!len) | 177 | if (!len) { |
| 178 | retval = -EFAULT; | ||
| 178 | break; | 179 | break; |
| 180 | } | ||
| 179 | 181 | ||
| 180 | pos += len; | 182 | pos += len; |
| 181 | addr += len; | 183 | addr += len; |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 641e5a3ed58c..0fb65843ec1e 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
| @@ -265,7 +265,7 @@ static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) | |||
| 265 | 265 | ||
| 266 | static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) | 266 | static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) |
| 267 | { | 267 | { |
| 268 | return cookie & 0xffff; | 268 | return cookie & ((1u << BLK_QC_T_SHIFT) - 1); |
| 269 | } | 269 | } |
| 270 | 270 | ||
| 271 | #endif /* __LINUX_BLK_TYPES_H */ | 271 | #endif /* __LINUX_BLK_TYPES_H */ |
