diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-21 22:58:02 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-21 22:58:02 -0500 |
commit | 3e1e21c7bfcfa9bf06c07f48a13faca2f62b3339 (patch) | |
tree | b26e480594c8e978c48118e2e3d624d1386f51df /block | |
parent | 0a13daedf7ffc71b0c374a036355da7fddb20d6d (diff) | |
parent | a9cf8284b45110a4d98aea180a89c857e53bf850 (diff) |
Merge branch 'for-4.5/nvme' of git://git.kernel.dk/linux-block
Pull NVMe updates from Jens Axboe:
"Last branch for this series is the nvme changes. It's in a separate
branch to avoid splitting too much between core and NVMe changes,
since NVMe is still helping drive some blk-mq changes. That said, not
a huge amount of core changes in here. The grunt of the work is the
continued split of the code"
* 'for-4.5/nvme' of git://git.kernel.dk/linux-block: (67 commits)
uapi: update install list after nvme.h rename
NVMe: Export NVMe attributes to sysfs group
NVMe: Shutdown controller only for power-off
NVMe: IO queue deletion re-write
NVMe: Remove queue freezing on resets
NVMe: Use a retryable error code on reset
NVMe: Fix admin queue ring wrap
nvme: make SG_IO support optional
nvme: fixes for NVME_IOCTL_IO_CMD on the char device
nvme: synchronize access to ctrl->namespaces
nvme: Move nvme_freeze/unfreeze_queues to nvme core
PCI/AER: include header file
NVMe: Export namespace attributes to sysfs
NVMe: Add pci error handlers
block: remove REQ_NO_TIMEOUT flag
nvme: merge iod and cmd_info
nvme: meta_sg doesn't have to be an array
nvme: properly free resources for cancelled command
nvme: simplify completion handling
nvme: special case AEN requests
...
Diffstat (limited to 'block')
-rw-r--r-- | block/bio-integrity.c | 13 | ||||
-rw-r--r-- | block/blk-core.c | 8 | ||||
-rw-r--r-- | block/blk-mq.c | 13 | ||||
-rw-r--r-- | block/blk-timeout.c | 11 | ||||
-rw-r--r-- | block/blk.h | 2 |
5 files changed, 29 insertions, 18 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c index f6325d573c10..711e4d8de6fa 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c | |||
@@ -66,7 +66,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, | |||
66 | } | 66 | } |
67 | 67 | ||
68 | if (unlikely(!bip)) | 68 | if (unlikely(!bip)) |
69 | return NULL; | 69 | return ERR_PTR(-ENOMEM); |
70 | 70 | ||
71 | memset(bip, 0, sizeof(*bip)); | 71 | memset(bip, 0, sizeof(*bip)); |
72 | 72 | ||
@@ -89,7 +89,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, | |||
89 | return bip; | 89 | return bip; |
90 | err: | 90 | err: |
91 | mempool_free(bip, bs->bio_integrity_pool); | 91 | mempool_free(bip, bs->bio_integrity_pool); |
92 | return NULL; | 92 | return ERR_PTR(-ENOMEM); |
93 | } | 93 | } |
94 | EXPORT_SYMBOL(bio_integrity_alloc); | 94 | EXPORT_SYMBOL(bio_integrity_alloc); |
95 | 95 | ||
@@ -298,10 +298,10 @@ int bio_integrity_prep(struct bio *bio) | |||
298 | 298 | ||
299 | /* Allocate bio integrity payload and integrity vectors */ | 299 | /* Allocate bio integrity payload and integrity vectors */ |
300 | bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages); | 300 | bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages); |
301 | if (unlikely(bip == NULL)) { | 301 | if (IS_ERR(bip)) { |
302 | printk(KERN_ERR "could not allocate data integrity bioset\n"); | 302 | printk(KERN_ERR "could not allocate data integrity bioset\n"); |
303 | kfree(buf); | 303 | kfree(buf); |
304 | return -EIO; | 304 | return PTR_ERR(bip); |
305 | } | 305 | } |
306 | 306 | ||
307 | bip->bip_flags |= BIP_BLOCK_INTEGRITY; | 307 | bip->bip_flags |= BIP_BLOCK_INTEGRITY; |
@@ -465,9 +465,8 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src, | |||
465 | BUG_ON(bip_src == NULL); | 465 | BUG_ON(bip_src == NULL); |
466 | 466 | ||
467 | bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); | 467 | bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); |
468 | 468 | if (IS_ERR(bip)) | |
469 | if (bip == NULL) | 469 | return PTR_ERR(bip); |
470 | return -EIO; | ||
471 | 470 | ||
472 | memcpy(bip->bip_vec, bip_src->bip_vec, | 471 | memcpy(bip->bip_vec, bip_src->bip_vec, |
473 | bip_src->bip_vcnt * sizeof(struct bio_vec)); | 472 | bip_src->bip_vcnt * sizeof(struct bio_vec)); |
diff --git a/block/blk-core.c b/block/blk-core.c index 476244d59309..ab51685988c2 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -680,6 +680,13 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref) | |||
680 | wake_up_all(&q->mq_freeze_wq); | 680 | wake_up_all(&q->mq_freeze_wq); |
681 | } | 681 | } |
682 | 682 | ||
683 | static void blk_rq_timed_out_timer(unsigned long data) | ||
684 | { | ||
685 | struct request_queue *q = (struct request_queue *)data; | ||
686 | |||
687 | kblockd_schedule_work(&q->timeout_work); | ||
688 | } | ||
689 | |||
683 | struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | 690 | struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) |
684 | { | 691 | { |
685 | struct request_queue *q; | 692 | struct request_queue *q; |
@@ -841,6 +848,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, | |||
841 | if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) | 848 | if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) |
842 | goto fail; | 849 | goto fail; |
843 | 850 | ||
851 | INIT_WORK(&q->timeout_work, blk_timeout_work); | ||
844 | q->request_fn = rfn; | 852 | q->request_fn = rfn; |
845 | q->prep_rq_fn = NULL; | 853 | q->prep_rq_fn = NULL; |
846 | q->unprep_rq_fn = NULL; | 854 | q->unprep_rq_fn = NULL; |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 6889d7183a2a..4c0622fae413 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -603,8 +603,6 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, | |||
603 | blk_mq_complete_request(rq, -EIO); | 603 | blk_mq_complete_request(rq, -EIO); |
604 | return; | 604 | return; |
605 | } | 605 | } |
606 | if (rq->cmd_flags & REQ_NO_TIMEOUT) | ||
607 | return; | ||
608 | 606 | ||
609 | if (time_after_eq(jiffies, rq->deadline)) { | 607 | if (time_after_eq(jiffies, rq->deadline)) { |
610 | if (!blk_mark_rq_complete(rq)) | 608 | if (!blk_mark_rq_complete(rq)) |
@@ -615,15 +613,19 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, | |||
615 | } | 613 | } |
616 | } | 614 | } |
617 | 615 | ||
618 | static void blk_mq_rq_timer(unsigned long priv) | 616 | static void blk_mq_timeout_work(struct work_struct *work) |
619 | { | 617 | { |
620 | struct request_queue *q = (struct request_queue *)priv; | 618 | struct request_queue *q = |
619 | container_of(work, struct request_queue, timeout_work); | ||
621 | struct blk_mq_timeout_data data = { | 620 | struct blk_mq_timeout_data data = { |
622 | .next = 0, | 621 | .next = 0, |
623 | .next_set = 0, | 622 | .next_set = 0, |
624 | }; | 623 | }; |
625 | int i; | 624 | int i; |
626 | 625 | ||
626 | if (blk_queue_enter(q, true)) | ||
627 | return; | ||
628 | |||
627 | blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); | 629 | blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); |
628 | 630 | ||
629 | if (data.next_set) { | 631 | if (data.next_set) { |
@@ -638,6 +640,7 @@ static void blk_mq_rq_timer(unsigned long priv) | |||
638 | blk_mq_tag_idle(hctx); | 640 | blk_mq_tag_idle(hctx); |
639 | } | 641 | } |
640 | } | 642 | } |
643 | blk_queue_exit(q); | ||
641 | } | 644 | } |
642 | 645 | ||
643 | /* | 646 | /* |
@@ -2008,7 +2011,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, | |||
2008 | hctxs[i]->queue_num = i; | 2011 | hctxs[i]->queue_num = i; |
2009 | } | 2012 | } |
2010 | 2013 | ||
2011 | setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); | 2014 | INIT_WORK(&q->timeout_work, blk_mq_timeout_work); |
2012 | blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); | 2015 | blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); |
2013 | 2016 | ||
2014 | q->nr_queues = nr_cpu_ids; | 2017 | q->nr_queues = nr_cpu_ids; |
diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 3610af561748..a30441a200c0 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c | |||
@@ -127,13 +127,16 @@ static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout | |||
127 | } | 127 | } |
128 | } | 128 | } |
129 | 129 | ||
130 | void blk_rq_timed_out_timer(unsigned long data) | 130 | void blk_timeout_work(struct work_struct *work) |
131 | { | 131 | { |
132 | struct request_queue *q = (struct request_queue *) data; | 132 | struct request_queue *q = |
133 | container_of(work, struct request_queue, timeout_work); | ||
133 | unsigned long flags, next = 0; | 134 | unsigned long flags, next = 0; |
134 | struct request *rq, *tmp; | 135 | struct request *rq, *tmp; |
135 | int next_set = 0; | 136 | int next_set = 0; |
136 | 137 | ||
138 | if (blk_queue_enter(q, true)) | ||
139 | return; | ||
137 | spin_lock_irqsave(q->queue_lock, flags); | 140 | spin_lock_irqsave(q->queue_lock, flags); |
138 | 141 | ||
139 | list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) | 142 | list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) |
@@ -143,6 +146,7 @@ void blk_rq_timed_out_timer(unsigned long data) | |||
143 | mod_timer(&q->timeout, round_jiffies_up(next)); | 146 | mod_timer(&q->timeout, round_jiffies_up(next)); |
144 | 147 | ||
145 | spin_unlock_irqrestore(q->queue_lock, flags); | 148 | spin_unlock_irqrestore(q->queue_lock, flags); |
149 | blk_queue_exit(q); | ||
146 | } | 150 | } |
147 | 151 | ||
148 | /** | 152 | /** |
@@ -193,9 +197,6 @@ void blk_add_timer(struct request *req) | |||
193 | struct request_queue *q = req->q; | 197 | struct request_queue *q = req->q; |
194 | unsigned long expiry; | 198 | unsigned long expiry; |
195 | 199 | ||
196 | if (req->cmd_flags & REQ_NO_TIMEOUT) | ||
197 | return; | ||
198 | |||
199 | /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ | 200 | /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ |
200 | if (!q->mq_ops && !q->rq_timed_out_fn) | 201 | if (!q->mq_ops && !q->rq_timed_out_fn) |
201 | return; | 202 | return; |
diff --git a/block/blk.h b/block/blk.h index c43926d3d74d..70e4aee9cdcb 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -93,7 +93,7 @@ static inline void blk_flush_integrity(void) | |||
93 | } | 93 | } |
94 | #endif | 94 | #endif |
95 | 95 | ||
96 | void blk_rq_timed_out_timer(unsigned long data); | 96 | void blk_timeout_work(struct work_struct *work); |
97 | unsigned long blk_rq_timeout(unsigned long timeout); | 97 | unsigned long blk_rq_timeout(unsigned long timeout); |
98 | void blk_add_timer(struct request *req); | 98 | void blk_add_timer(struct request *req); |
99 | void blk_delete_timer(struct request *); | 99 | void blk_delete_timer(struct request *); |