aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-29 14:51:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-29 14:51:49 -0500
commit0a4b6e2f80aad46fb55a5cf7b1664c0aef030ee0 (patch)
treecefccd67dc1f27bb45830f6b8065dd4a1c05e83b /block/blk.h
parent9697e9da84299d0d715d515dd2cc48f1eceb277d (diff)
parent796baeeef85a40b3495a907fb7425086e7010102 (diff)
Merge branch 'for-4.16/block' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: "This is the main pull request for block IO related changes for the 4.16 kernel. Nothing major in this pull request, but a good amount of improvements and fixes all over the map. This contains: - BFQ improvements, fixes, and cleanups from Angelo, Chiara, and Paolo. - Support for SMR zones for deadline and mq-deadline from Damien and Christoph. - Set of fixes for bcache by way of Michael Lyle, including fixes from himself, Kent, Rui, Tang, and Coly. - Series from Matias for lightnvm with fixes from Hans Holmberg, Javier, and Matias. Mostly centered around pblk, and the removing rrpc 1.2 in preparation for supporting 2.0. - A couple of NVMe pull requests from Christoph. Nothing major in here, just fixes and cleanups, and support for command tracing from Johannes. - Support for blk-throttle for tracking reads and writes separately. From Joseph Qi. A few cleanups/fixes also for blk-throttle from Weiping. - Series from Mike Snitzer that enables dm to register its queue more logically, something that's alwways been problematic on dm since it's a stacked device. - Series from Ming cleaning up some of the bio accessor use, in preparation for supporting multipage bvecs. - Various fixes from Ming closing up holes around queue mapping and quiescing. - BSD partition fix from Richard Narron, fixing a problem where we can't mount newer (10/11) FreeBSD partitions. - Series from Tejun reworking blk-mq timeout handling. The previous scheme relied on atomic bits, but it had races where we would think a request had timed out if it to reused at the wrong time. - null_blk now supports faking timeouts, to enable us to better exercise and test that functionality separately. From me. - Kill the separate atomic poll bit in the request struct. After this, we don't use the atomic bits on blk-mq anymore at all. From me. - sgl_alloc/free helpers from Bart. - Heavily contended tag case scalability improvement from me. - Various little fixes and cleanups from Arnd, Bart, Corentin, Douglas, Eryu, Goldwyn, and myself" * 'for-4.16/block' of git://git.kernel.dk/linux-block: (186 commits) block: remove smart1,2.h nvme: add tracepoint for nvme_complete_rq nvme: add tracepoint for nvme_setup_cmd nvme-pci: introduce RECONNECTING state to mark initializing procedure nvme-rdma: remove redundant boolean for inline_data nvme: don't free uuid pointer before printing it nvme-pci: Suspend queues after deleting them bsg: use pr_debug instead of hand crafted macros blk-mq-debugfs: don't allow write on attributes with seq_operations set nvme-pci: Fix queue double allocations block: Set BIO_TRACE_COMPLETION on new bio during split blk-throttle: use queue_is_rq_based block: Remove kblockd_schedule_delayed_work{,_on}() blk-mq: Avoid that blk_mq_delay_run_hw_queue() introduces unintended delays blk-mq: Rename blk_mq_request_direct_issue() into blk_mq_request_issue_directly() lib/scatterlist: Fix chaining support in sgl_alloc_order() blk-throttle: track read and write request individually block: add bdev_read_only() checks to common helpers block: fail op_is_write() requests to read-only partitions blk-throttle: export io_serviced_recursive, io_service_bytes_recursive ...
Diffstat (limited to 'block/blk.h')
-rw-r--r--block/blk.h46
1 files changed, 27 insertions, 19 deletions
diff --git a/block/blk.h b/block/blk.h
index 442098aa9463..46db5dc83dcb 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -120,33 +120,23 @@ void blk_account_io_completion(struct request *req, unsigned int bytes);
120void blk_account_io_done(struct request *req); 120void blk_account_io_done(struct request *req);
121 121
122/* 122/*
123 * Internal atomic flags for request handling
124 */
125enum rq_atomic_flags {
126 /*
127 * Keep these two bits first - not because we depend on the
128 * value of them, but we do depend on them being in the same
129 * byte of storage to ensure ordering on writes. Keeping them
130 * first will achieve that nicely.
131 */
132 REQ_ATOM_COMPLETE = 0,
133 REQ_ATOM_STARTED,
134
135 REQ_ATOM_POLL_SLEPT,
136};
137
138/*
139 * EH timer and IO completion will both attempt to 'grab' the request, make 123 * EH timer and IO completion will both attempt to 'grab' the request, make
140 * sure that only one of them succeeds 124 * sure that only one of them succeeds. Steal the bottom bit of the
125 * __deadline field for this.
141 */ 126 */
142static inline int blk_mark_rq_complete(struct request *rq) 127static inline int blk_mark_rq_complete(struct request *rq)
143{ 128{
144 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 129 return test_and_set_bit(0, &rq->__deadline);
145} 130}
146 131
147static inline void blk_clear_rq_complete(struct request *rq) 132static inline void blk_clear_rq_complete(struct request *rq)
148{ 133{
149 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 134 clear_bit(0, &rq->__deadline);
135}
136
137static inline bool blk_rq_is_complete(struct request *rq)
138{
139 return test_bit(0, &rq->__deadline);
150} 140}
151 141
152/* 142/*
@@ -172,6 +162,9 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq
172 e->type->ops.sq.elevator_deactivate_req_fn(q, rq); 162 e->type->ops.sq.elevator_deactivate_req_fn(q, rq);
173} 163}
174 164
165int elv_register_queue(struct request_queue *q);
166void elv_unregister_queue(struct request_queue *q);
167
175struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); 168struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
176 169
177#ifdef CONFIG_FAIL_IO_TIMEOUT 170#ifdef CONFIG_FAIL_IO_TIMEOUT
@@ -246,6 +239,21 @@ static inline void req_set_nomerge(struct request_queue *q, struct request *req)
246} 239}
247 240
248/* 241/*
242 * Steal a bit from this field for legacy IO path atomic IO marking. Note that
243 * setting the deadline clears the bottom bit, potentially clearing the
244 * completed bit. The user has to be OK with this (current ones are fine).
245 */
246static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
247{
248 rq->__deadline = time & ~0x1UL;
249}
250
251static inline unsigned long blk_rq_deadline(struct request *rq)
252{
253 return rq->__deadline & ~0x1UL;
254}
255
256/*
249 * Internal io_context interface 257 * Internal io_context interface
250 */ 258 */
251void get_io_context(struct io_context *ioc); 259void get_io_context(struct io_context *ioc);