diff options
author | Christoph Hellwig <hch@lst.de> | 2018-11-14 11:02:05 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-11-15 14:13:16 -0500 |
commit | 079076b3416e78ba2bb3ce38e05e320c388c3120 (patch) | |
tree | 9542a0f94fc2fcf98be8be557148904f8a55e58e /block/blk.h | |
parent | 8f4236d9008b0973a8281256ccfde6913cdec6cb (diff) |
block: remove deadline __deadline manipulation helpers
No users left since the removal of the legacy request interface, we can
remove all the magic bit stealing now and make it a normal field.
But use WRITE_ONCE/READ_ONCE on the new deadline field, given that we
don't seem to have any mechanism to guarantee a new value actually
gets seen by other threads.
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk.h')
-rw-r--r-- | block/blk.h | 35 |
1 files changed, 0 insertions, 35 deletions
diff --git a/block/blk.h b/block/blk.h index 41b64e6e101b..08a5845b03ba 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -239,26 +239,6 @@ void blk_account_io_completion(struct request *req, unsigned int bytes); | |||
239 | void blk_account_io_done(struct request *req, u64 now); | 239 | void blk_account_io_done(struct request *req, u64 now); |
240 | 240 | ||
241 | /* | 241 | /* |
242 | * EH timer and IO completion will both attempt to 'grab' the request, make | ||
243 | * sure that only one of them succeeds. Steal the bottom bit of the | ||
244 | * __deadline field for this. | ||
245 | */ | ||
246 | static inline int blk_mark_rq_complete(struct request *rq) | ||
247 | { | ||
248 | return test_and_set_bit(0, &rq->__deadline); | ||
249 | } | ||
250 | |||
251 | static inline void blk_clear_rq_complete(struct request *rq) | ||
252 | { | ||
253 | clear_bit(0, &rq->__deadline); | ||
254 | } | ||
255 | |||
256 | static inline bool blk_rq_is_complete(struct request *rq) | ||
257 | { | ||
258 | return test_bit(0, &rq->__deadline); | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * Internal elevator interface | 242 | * Internal elevator interface |
263 | */ | 243 | */ |
264 | #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) | 244 | #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) |
@@ -323,21 +303,6 @@ static inline void req_set_nomerge(struct request_queue *q, struct request *req) | |||
323 | } | 303 | } |
324 | 304 | ||
325 | /* | 305 | /* |
326 | * Steal a bit from this field for legacy IO path atomic IO marking. Note that | ||
327 | * setting the deadline clears the bottom bit, potentially clearing the | ||
328 | * completed bit. The user has to be OK with this (current ones are fine). | ||
329 | */ | ||
330 | static inline void blk_rq_set_deadline(struct request *rq, unsigned long time) | ||
331 | { | ||
332 | rq->__deadline = time & ~0x1UL; | ||
333 | } | ||
334 | |||
335 | static inline unsigned long blk_rq_deadline(struct request *rq) | ||
336 | { | ||
337 | return rq->__deadline & ~0x1UL; | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * Internal io_context interface | 306 | * Internal io_context interface |
342 | */ | 307 | */ |
343 | void get_io_context(struct io_context *ioc); | 308 | void get_io_context(struct io_context *ioc); |