diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 18:32:19 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 18:32:19 -0500 |
commit | e2c5923c349c1738fe8fda980874d93f6fb2e5b6 (patch) | |
tree | b97a90170c45211bcc437761653aa8016c34afcd /block/blk.h | |
parent | abc36be236358162202e86ad88616ff95a755101 (diff) | |
parent | a04b5de5050ab8b891128eb2c47a0916fe8622e1 (diff) |
Merge branch 'for-4.15/block' of git://git.kernel.dk/linux-block
Pull core block layer updates from Jens Axboe:
"This is the main pull request for block storage for 4.15-rc1.
Nothing out of the ordinary in here, and no API changes or anything
like that. Just various new features for drivers, core changes, etc.
In particular, this pull request contains:
- A patch series from Bart, closing the whole on blk/scsi-mq queue
quescing.
- A series from Christoph, building towards hidden gendisks (for
multipath) and ability to move bio chains around.
- NVMe
- Support for native multipath for NVMe (Christoph).
- Userspace notifications for AENs (Keith).
- Command side-effects support (Keith).
- SGL support (Chaitanya Kulkarni)
- FC fixes and improvements (James Smart)
- Lots of fixes and tweaks (Various)
- bcache
- New maintainer (Michael Lyle)
- Writeback control improvements (Michael)
- Various fixes (Coly, Elena, Eric, Liang, et al)
- lightnvm updates, mostly centered around the pblk interface
(Javier, Hans, and Rakesh).
- Removal of unused bio/bvec kmap atomic interfaces (me, Christoph)
- Writeback series that fix the much discussed hundreds of millions
of sync-all units. This goes all the way, as discussed previously
(me).
- Fix for missing wakeup on writeback timer adjustments (Yafang
Shao).
- Fix laptop mode on blk-mq (me).
- {mq,name} tupple lookup for IO schedulers, allowing us to have
alias names. This means you can use 'deadline' on both !mq and on
mq (where it's called mq-deadline). (me).
- blktrace race fix, oopsing on sg load (me).
- blk-mq optimizations (me).
- Obscure waitqueue race fix for kyber (Omar).
- NBD fixes (Josef).
- Disable writeback throttling by default on bfq, like we do on cfq
(Luca Miccio).
- Series from Ming that enable us to treat flush requests on blk-mq
like any other request. This is a really nice cleanup.
- Series from Ming that improves merging on blk-mq with schedulers,
getting us closer to flipping the switch on scsi-mq again.
- BFQ updates (Paolo).
- blk-mq atomic flags memory ordering fixes (Peter Z).
- Loop cgroup support (Shaohua).
- Lots of minor fixes from lots of different folks, both for core and
driver code"
* 'for-4.15/block' of git://git.kernel.dk/linux-block: (294 commits)
nvme: fix visibility of "uuid" ns attribute
blk-mq: fixup some comment typos and lengths
ide: ide-atapi: fix compile error with defining macro DEBUG
blk-mq: improve tag waiting setup for non-shared tags
brd: remove unused brd_mutex
blk-mq: only run the hardware queue if IO is pending
block: avoid null pointer dereference on null disk
fs: guard_bio_eod() needs to consider partitions
xtensa/simdisk: fix compile error
nvme: expose subsys attribute to sysfs
nvme: create 'slaves' and 'holders' entries for hidden controllers
block: create 'slaves' and 'holders' entries for hidden gendisks
nvme: also expose the namespace identification sysfs files for mpath nodes
nvme: implement multipath access to nvme subsystems
nvme: track shared namespaces
nvme: introduce a nvme_ns_ids structure
nvme: track subsystems
block, nvme: Introduce blk_mq_req_flags_t
block, scsi: Make SCSI quiesce and resume work reliably
block: Add the QUEUE_FLAG_PREEMPT_ONLY request queue flag
...
Diffstat (limited to 'block/blk.h')
-rw-r--r-- | block/blk.h | 46 |
1 files changed, 7 insertions, 39 deletions
diff --git a/block/blk.h b/block/blk.h index 85be8b232b37..3f1446937aec 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -123,8 +123,15 @@ void blk_account_io_done(struct request *req); | |||
123 | * Internal atomic flags for request handling | 123 | * Internal atomic flags for request handling |
124 | */ | 124 | */ |
125 | enum rq_atomic_flags { | 125 | enum rq_atomic_flags { |
126 | /* | ||
127 | * Keep these two bits first - not because we depend on the | ||
128 | * value of them, but we do depend on them being in the same | ||
129 | * byte of storage to ensure ordering on writes. Keeping them | ||
130 | * first will achieve that nicely. | ||
131 | */ | ||
126 | REQ_ATOM_COMPLETE = 0, | 132 | REQ_ATOM_COMPLETE = 0, |
127 | REQ_ATOM_STARTED, | 133 | REQ_ATOM_STARTED, |
134 | |||
128 | REQ_ATOM_POLL_SLEPT, | 135 | REQ_ATOM_POLL_SLEPT, |
129 | }; | 136 | }; |
130 | 137 | ||
@@ -149,45 +156,6 @@ static inline void blk_clear_rq_complete(struct request *rq) | |||
149 | 156 | ||
150 | void blk_insert_flush(struct request *rq); | 157 | void blk_insert_flush(struct request *rq); |
151 | 158 | ||
152 | static inline struct request *__elv_next_request(struct request_queue *q) | ||
153 | { | ||
154 | struct request *rq; | ||
155 | struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); | ||
156 | |||
157 | WARN_ON_ONCE(q->mq_ops); | ||
158 | |||
159 | while (1) { | ||
160 | if (!list_empty(&q->queue_head)) { | ||
161 | rq = list_entry_rq(q->queue_head.next); | ||
162 | return rq; | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * Flush request is running and flush request isn't queueable | ||
167 | * in the drive, we can hold the queue till flush request is | ||
168 | * finished. Even we don't do this, driver can't dispatch next | ||
169 | * requests and will requeue them. And this can improve | ||
170 | * throughput too. For example, we have request flush1, write1, | ||
171 | * flush 2. flush1 is dispatched, then queue is hold, write1 | ||
172 | * isn't inserted to queue. After flush1 is finished, flush2 | ||
173 | * will be dispatched. Since disk cache is already clean, | ||
174 | * flush2 will be finished very soon, so looks like flush2 is | ||
175 | * folded to flush1. | ||
176 | * Since the queue is hold, a flag is set to indicate the queue | ||
177 | * should be restarted later. Please see flush_end_io() for | ||
178 | * details. | ||
179 | */ | ||
180 | if (fq->flush_pending_idx != fq->flush_running_idx && | ||
181 | !queue_flush_queueable(q)) { | ||
182 | fq->flush_queue_delayed = 1; | ||
183 | return NULL; | ||
184 | } | ||
185 | if (unlikely(blk_queue_bypass(q)) || | ||
186 | !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0)) | ||
187 | return NULL; | ||
188 | } | ||
189 | } | ||
190 | |||
191 | static inline void elv_activate_rq(struct request_queue *q, struct request *rq) | 159 | static inline void elv_activate_rq(struct request_queue *q, struct request *rq) |
192 | { | 160 | { |
193 | struct elevator_queue *e = q->elevator; | 161 | struct elevator_queue *e = q->elevator; |