diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 18:32:19 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 18:32:19 -0500 |
commit | e2c5923c349c1738fe8fda980874d93f6fb2e5b6 (patch) | |
tree | b97a90170c45211bcc437761653aa8016c34afcd /block/blk-mq.h | |
parent | abc36be236358162202e86ad88616ff95a755101 (diff) | |
parent | a04b5de5050ab8b891128eb2c47a0916fe8622e1 (diff) |
Merge branch 'for-4.15/block' of git://git.kernel.dk/linux-block
Pull core block layer updates from Jens Axboe:
"This is the main pull request for block storage for 4.15-rc1.
Nothing out of the ordinary in here, and no API changes or anything
like that. Just various new features for drivers, core changes, etc.
In particular, this pull request contains:
- A patch series from Bart, closing the whole on blk/scsi-mq queue
quescing.
- A series from Christoph, building towards hidden gendisks (for
multipath) and ability to move bio chains around.
- NVMe
- Support for native multipath for NVMe (Christoph).
- Userspace notifications for AENs (Keith).
- Command side-effects support (Keith).
- SGL support (Chaitanya Kulkarni)
- FC fixes and improvements (James Smart)
- Lots of fixes and tweaks (Various)
- bcache
- New maintainer (Michael Lyle)
- Writeback control improvements (Michael)
- Various fixes (Coly, Elena, Eric, Liang, et al)
- lightnvm updates, mostly centered around the pblk interface
(Javier, Hans, and Rakesh).
- Removal of unused bio/bvec kmap atomic interfaces (me, Christoph)
- Writeback series that fix the much discussed hundreds of millions
of sync-all units. This goes all the way, as discussed previously
(me).
- Fix for missing wakeup on writeback timer adjustments (Yafang
Shao).
- Fix laptop mode on blk-mq (me).
- {mq,name} tupple lookup for IO schedulers, allowing us to have
alias names. This means you can use 'deadline' on both !mq and on
mq (where it's called mq-deadline). (me).
- blktrace race fix, oopsing on sg load (me).
- blk-mq optimizations (me).
- Obscure waitqueue race fix for kyber (Omar).
- NBD fixes (Josef).
- Disable writeback throttling by default on bfq, like we do on cfq
(Luca Miccio).
- Series from Ming that enable us to treat flush requests on blk-mq
like any other request. This is a really nice cleanup.
- Series from Ming that improves merging on blk-mq with schedulers,
getting us closer to flipping the switch on scsi-mq again.
- BFQ updates (Paolo).
- blk-mq atomic flags memory ordering fixes (Peter Z).
- Loop cgroup support (Shaohua).
- Lots of minor fixes from lots of different folks, both for core and
driver code"
* 'for-4.15/block' of git://git.kernel.dk/linux-block: (294 commits)
nvme: fix visibility of "uuid" ns attribute
blk-mq: fixup some comment typos and lengths
ide: ide-atapi: fix compile error with defining macro DEBUG
blk-mq: improve tag waiting setup for non-shared tags
brd: remove unused brd_mutex
blk-mq: only run the hardware queue if IO is pending
block: avoid null pointer dereference on null disk
fs: guard_bio_eod() needs to consider partitions
xtensa/simdisk: fix compile error
nvme: expose subsys attribute to sysfs
nvme: create 'slaves' and 'holders' entries for hidden controllers
block: create 'slaves' and 'holders' entries for hidden gendisks
nvme: also expose the namespace identification sysfs files for mpath nodes
nvme: implement multipath access to nvme subsystems
nvme: track shared namespaces
nvme: introduce a nvme_ns_ids structure
nvme: track subsystems
block, nvme: Introduce blk_mq_req_flags_t
block, scsi: Make SCSI quiesce and resume work reliably
block: Add the QUEUE_FLAG_PREEMPT_ONLY request queue flag
...
Diffstat (limited to 'block/blk-mq.h')
-rw-r--r-- | block/blk-mq.h | 60 |
1 files changed, 55 insertions, 5 deletions
diff --git a/block/blk-mq.h b/block/blk-mq.h index 4933af9d61f7..6c7c3ff5bf62 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
@@ -3,6 +3,7 @@ | |||
3 | #define INT_BLK_MQ_H | 3 | #define INT_BLK_MQ_H |
4 | 4 | ||
5 | #include "blk-stat.h" | 5 | #include "blk-stat.h" |
6 | #include "blk-mq-tag.h" | ||
6 | 7 | ||
7 | struct blk_mq_tag_set; | 8 | struct blk_mq_tag_set; |
8 | 9 | ||
@@ -26,16 +27,16 @@ struct blk_mq_ctx { | |||
26 | struct kobject kobj; | 27 | struct kobject kobj; |
27 | } ____cacheline_aligned_in_smp; | 28 | } ____cacheline_aligned_in_smp; |
28 | 29 | ||
29 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); | ||
30 | void blk_mq_freeze_queue(struct request_queue *q); | 30 | void blk_mq_freeze_queue(struct request_queue *q); |
31 | void blk_mq_free_queue(struct request_queue *q); | 31 | void blk_mq_free_queue(struct request_queue *q); |
32 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); | 32 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); |
33 | void blk_mq_wake_waiters(struct request_queue *q); | 33 | void blk_mq_wake_waiters(struct request_queue *q); |
34 | bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *); | 34 | bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool); |
35 | void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); | 35 | void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); |
36 | bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx); | ||
37 | bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, | 36 | bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, |
38 | bool wait); | 37 | bool wait); |
38 | struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, | ||
39 | struct blk_mq_ctx *start); | ||
39 | 40 | ||
40 | /* | 41 | /* |
41 | * Internal helpers for allocating/freeing the request map | 42 | * Internal helpers for allocating/freeing the request map |
@@ -55,7 +56,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, | |||
55 | */ | 56 | */ |
56 | void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, | 57 | void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, |
57 | bool at_head); | 58 | bool at_head); |
58 | void blk_mq_request_bypass_insert(struct request *rq); | 59 | void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); |
59 | void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, | 60 | void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, |
60 | struct list_head *list); | 61 | struct list_head *list); |
61 | 62 | ||
@@ -109,7 +110,7 @@ static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) | |||
109 | struct blk_mq_alloc_data { | 110 | struct blk_mq_alloc_data { |
110 | /* input parameter */ | 111 | /* input parameter */ |
111 | struct request_queue *q; | 112 | struct request_queue *q; |
112 | unsigned int flags; | 113 | blk_mq_req_flags_t flags; |
113 | unsigned int shallow_depth; | 114 | unsigned int shallow_depth; |
114 | 115 | ||
115 | /* input & output parameter */ | 116 | /* input & output parameter */ |
@@ -138,4 +139,53 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) | |||
138 | void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, | 139 | void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, |
139 | unsigned int inflight[2]); | 140 | unsigned int inflight[2]); |
140 | 141 | ||
142 | static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx) | ||
143 | { | ||
144 | struct request_queue *q = hctx->queue; | ||
145 | |||
146 | if (q->mq_ops->put_budget) | ||
147 | q->mq_ops->put_budget(hctx); | ||
148 | } | ||
149 | |||
150 | static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx) | ||
151 | { | ||
152 | struct request_queue *q = hctx->queue; | ||
153 | |||
154 | if (q->mq_ops->get_budget) | ||
155 | return q->mq_ops->get_budget(hctx); | ||
156 | return true; | ||
157 | } | ||
158 | |||
159 | static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, | ||
160 | struct request *rq) | ||
161 | { | ||
162 | blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag); | ||
163 | rq->tag = -1; | ||
164 | |||
165 | if (rq->rq_flags & RQF_MQ_INFLIGHT) { | ||
166 | rq->rq_flags &= ~RQF_MQ_INFLIGHT; | ||
167 | atomic_dec(&hctx->nr_active); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx, | ||
172 | struct request *rq) | ||
173 | { | ||
174 | if (rq->tag == -1 || rq->internal_tag == -1) | ||
175 | return; | ||
176 | |||
177 | __blk_mq_put_driver_tag(hctx, rq); | ||
178 | } | ||
179 | |||
180 | static inline void blk_mq_put_driver_tag(struct request *rq) | ||
181 | { | ||
182 | struct blk_mq_hw_ctx *hctx; | ||
183 | |||
184 | if (rq->tag == -1 || rq->internal_tag == -1) | ||
185 | return; | ||
186 | |||
187 | hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); | ||
188 | __blk_mq_put_driver_tag(hctx, rq); | ||
189 | } | ||
190 | |||
141 | #endif | 191 | #endif |