aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-10-29 12:23:51 -0400
committerJens Axboe <axboe@kernel.dk>2018-11-07 15:42:32 -0500
commita1ce35fa49852db60fc6e268038530be533c5b15 (patch)
tree7a0bb9576a3f0e31e22f849463eee6cdda26bad5 /include/linux/blkdev.h
parentf382fb0bcef4c37dc049e9f6963e3baf204d815c (diff)
block: remove dead elevator code
This removes a bunch of core and elevator related code. On the core front, we remove anything related to queue running, draining, initialization, plugging, and congestions. We also kill anything related to request allocation, merging, retrieval, and completion. Remove any checking for single queue IO schedulers, as they no longer exist. This means we can also delete a bunch of code related to request issue, adding, completion, etc - and all the SQ related ops and helpers. Also kill the load_default_modules(), as all that did was provide for a way to load the default single queue elevator. Tested-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h93
1 files changed, 2 insertions, 91 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8afe3331777e..a9f6db8abcda 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -58,9 +58,6 @@ struct blk_stat_callback;
58 58
59typedef void (rq_end_io_fn)(struct request *, blk_status_t); 59typedef void (rq_end_io_fn)(struct request *, blk_status_t);
60 60
61#define BLK_RL_SYNCFULL (1U << 0)
62#define BLK_RL_ASYNCFULL (1U << 1)
63
64struct request_list { 61struct request_list {
65 struct request_queue *q; /* the queue this rl belongs to */ 62 struct request_queue *q; /* the queue this rl belongs to */
66#ifdef CONFIG_BLK_CGROUP 63#ifdef CONFIG_BLK_CGROUP
@@ -309,11 +306,8 @@ static inline unsigned short req_get_ioprio(struct request *req)
309 306
310struct blk_queue_ctx; 307struct blk_queue_ctx;
311 308
312typedef void (request_fn_proc) (struct request_queue *q);
313typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); 309typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
314typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t); 310typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
315typedef int (prep_rq_fn) (struct request_queue *, struct request *);
316typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
317 311
318struct bio_vec; 312struct bio_vec;
319typedef void (softirq_done_fn)(struct request *); 313typedef void (softirq_done_fn)(struct request *);
@@ -432,8 +426,6 @@ struct request_queue {
432 struct list_head queue_head; 426 struct list_head queue_head;
433 struct request *last_merge; 427 struct request *last_merge;
434 struct elevator_queue *elevator; 428 struct elevator_queue *elevator;
435 int nr_rqs[2]; /* # allocated [a]sync rqs */
436 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
437 429
438 struct blk_queue_stats *stats; 430 struct blk_queue_stats *stats;
439 struct rq_qos *rq_qos; 431 struct rq_qos *rq_qos;
@@ -446,11 +438,8 @@ struct request_queue {
446 */ 438 */
447 struct request_list root_rl; 439 struct request_list root_rl;
448 440
449 request_fn_proc *request_fn;
450 make_request_fn *make_request_fn; 441 make_request_fn *make_request_fn;
451 poll_q_fn *poll_fn; 442 poll_q_fn *poll_fn;
452 prep_rq_fn *prep_rq_fn;
453 unprep_rq_fn *unprep_rq_fn;
454 softirq_done_fn *softirq_done_fn; 443 softirq_done_fn *softirq_done_fn;
455 rq_timed_out_fn *rq_timed_out_fn; 444 rq_timed_out_fn *rq_timed_out_fn;
456 dma_drain_needed_fn *dma_drain_needed; 445 dma_drain_needed_fn *dma_drain_needed;
@@ -458,8 +447,6 @@ struct request_queue {
458 init_rq_fn *init_rq_fn; 447 init_rq_fn *init_rq_fn;
459 /* Called just before a request is freed */ 448 /* Called just before a request is freed */
460 exit_rq_fn *exit_rq_fn; 449 exit_rq_fn *exit_rq_fn;
461 /* Called from inside blk_get_request() */
462 void (*initialize_rq_fn)(struct request *rq);
463 450
464 const struct blk_mq_ops *mq_ops; 451 const struct blk_mq_ops *mq_ops;
465 452
@@ -475,17 +462,6 @@ struct request_queue {
475 struct blk_mq_hw_ctx **queue_hw_ctx; 462 struct blk_mq_hw_ctx **queue_hw_ctx;
476 unsigned int nr_hw_queues; 463 unsigned int nr_hw_queues;
477 464
478 /*
479 * Dispatch queue sorting
480 */
481 sector_t end_sector;
482 struct request *boundary_rq;
483
484 /*
485 * Delayed queue handling
486 */
487 struct delayed_work delay_work;
488
489 struct backing_dev_info *backing_dev_info; 465 struct backing_dev_info *backing_dev_info;
490 466
491 /* 467 /*
@@ -548,9 +524,6 @@ struct request_queue {
548 * queue settings 524 * queue settings
549 */ 525 */
550 unsigned long nr_requests; /* Max # of requests */ 526 unsigned long nr_requests; /* Max # of requests */
551 unsigned int nr_congestion_on;
552 unsigned int nr_congestion_off;
553 unsigned int nr_batching;
554 527
555 unsigned int dma_drain_size; 528 unsigned int dma_drain_size;
556 void *dma_drain_buffer; 529 void *dma_drain_buffer;
@@ -560,13 +533,6 @@ struct request_queue {
560 unsigned int nr_sorted; 533 unsigned int nr_sorted;
561 unsigned int in_flight[2]; 534 unsigned int in_flight[2];
562 535
563 /*
564 * Number of active block driver functions for which blk_drain_queue()
565 * must wait. Must be incremented around functions that unlock the
566 * queue_lock internally, e.g. scsi_request_fn().
567 */
568 unsigned int request_fn_active;
569
570 unsigned int rq_timeout; 536 unsigned int rq_timeout;
571 int poll_nsec; 537 int poll_nsec;
572 538
@@ -740,11 +706,6 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
740extern void blk_set_pm_only(struct request_queue *q); 706extern void blk_set_pm_only(struct request_queue *q);
741extern void blk_clear_pm_only(struct request_queue *q); 707extern void blk_clear_pm_only(struct request_queue *q);
742 708
743static inline int queue_in_flight(struct request_queue *q)
744{
745 return q->in_flight[0] + q->in_flight[1];
746}
747
748static inline bool blk_account_rq(struct request *rq) 709static inline bool blk_account_rq(struct request *rq)
749{ 710{
750 return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); 711 return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
@@ -765,7 +726,7 @@ static inline bool blk_account_rq(struct request *rq)
765 */ 726 */
766static inline bool queue_is_rq_based(struct request_queue *q) 727static inline bool queue_is_rq_based(struct request_queue *q)
767{ 728{
768 return q->request_fn || q->mq_ops; 729 return q->mq_ops;
769} 730}
770 731
771static inline unsigned int blk_queue_cluster(struct request_queue *q) 732static inline unsigned int blk_queue_cluster(struct request_queue *q)
@@ -828,27 +789,6 @@ static inline bool rq_is_sync(struct request *rq)
828 return op_is_sync(rq->cmd_flags); 789 return op_is_sync(rq->cmd_flags);
829} 790}
830 791
831static inline bool blk_rl_full(struct request_list *rl, bool sync)
832{
833 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
834
835 return rl->flags & flag;
836}
837
838static inline void blk_set_rl_full(struct request_list *rl, bool sync)
839{
840 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
841
842 rl->flags |= flag;
843}
844
845static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
846{
847 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
848
849 rl->flags &= ~flag;
850}
851
852static inline bool rq_mergeable(struct request *rq) 792static inline bool rq_mergeable(struct request *rq)
853{ 793{
854 if (blk_rq_is_passthrough(rq)) 794 if (blk_rq_is_passthrough(rq))
@@ -969,7 +909,6 @@ extern void blk_put_request(struct request *);
969extern void __blk_put_request(struct request_queue *, struct request *); 909extern void __blk_put_request(struct request_queue *, struct request *);
970extern struct request *blk_get_request(struct request_queue *, unsigned int op, 910extern struct request *blk_get_request(struct request_queue *, unsigned int op,
971 blk_mq_req_flags_t flags); 911 blk_mq_req_flags_t flags);
972extern void blk_requeue_request(struct request_queue *, struct request *);
973extern int blk_lld_busy(struct request_queue *q); 912extern int blk_lld_busy(struct request_queue *q);
974extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 913extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
975 struct bio_set *bs, gfp_t gfp_mask, 914 struct bio_set *bs, gfp_t gfp_mask,
@@ -979,7 +918,6 @@ extern void blk_rq_unprep_clone(struct request *rq);
979extern blk_status_t blk_insert_cloned_request(struct request_queue *q, 918extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
980 struct request *rq); 919 struct request *rq);
981extern int blk_rq_append_bio(struct request *rq, struct bio **bio); 920extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
982extern void blk_delay_queue(struct request_queue *, unsigned long);
983extern void blk_queue_split(struct request_queue *, struct bio **); 921extern void blk_queue_split(struct request_queue *, struct bio **);
984extern void blk_recount_segments(struct request_queue *, struct bio *); 922extern void blk_recount_segments(struct request_queue *, struct bio *);
985extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 923extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
@@ -992,15 +930,7 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
992 930
993extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 931extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
994extern void blk_queue_exit(struct request_queue *q); 932extern void blk_queue_exit(struct request_queue *q);
995extern void blk_start_queue(struct request_queue *q);
996extern void blk_start_queue_async(struct request_queue *q);
997extern void blk_stop_queue(struct request_queue *q);
998extern void blk_sync_queue(struct request_queue *q); 933extern void blk_sync_queue(struct request_queue *q);
999extern void __blk_stop_queue(struct request_queue *q);
1000extern void __blk_run_queue(struct request_queue *q);
1001extern void __blk_run_queue_uncond(struct request_queue *q);
1002extern void blk_run_queue(struct request_queue *);
1003extern void blk_run_queue_async(struct request_queue *q);
1004extern int blk_rq_map_user(struct request_queue *, struct request *, 934extern int blk_rq_map_user(struct request_queue *, struct request *,
1005 struct rq_map_data *, void __user *, unsigned long, 935 struct rq_map_data *, void __user *, unsigned long,
1006 gfp_t); 936 gfp_t);
@@ -1155,13 +1085,6 @@ static inline unsigned int blk_rq_count_bios(struct request *rq)
1155 return nr_bios; 1085 return nr_bios;
1156} 1086}
1157 1087
1158/*
1159 * Request issue related functions.
1160 */
1161extern struct request *blk_peek_request(struct request_queue *q);
1162extern void blk_start_request(struct request *rq);
1163extern struct request *blk_fetch_request(struct request_queue *q);
1164
1165void blk_steal_bios(struct bio_list *list, struct request *rq); 1088void blk_steal_bios(struct bio_list *list, struct request *rq);
1166 1089
1167/* 1090/*
@@ -1179,9 +1102,6 @@ void blk_steal_bios(struct bio_list *list, struct request *rq);
1179 */ 1102 */
1180extern bool blk_update_request(struct request *rq, blk_status_t error, 1103extern bool blk_update_request(struct request *rq, blk_status_t error,
1181 unsigned int nr_bytes); 1104 unsigned int nr_bytes);
1182extern void blk_finish_request(struct request *rq, blk_status_t error);
1183extern bool blk_end_request(struct request *rq, blk_status_t error,
1184 unsigned int nr_bytes);
1185extern void blk_end_request_all(struct request *rq, blk_status_t error); 1105extern void blk_end_request_all(struct request *rq, blk_status_t error);
1186extern bool __blk_end_request(struct request *rq, blk_status_t error, 1106extern bool __blk_end_request(struct request *rq, blk_status_t error,
1187 unsigned int nr_bytes); 1107 unsigned int nr_bytes);
@@ -1190,15 +1110,10 @@ extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
1190 1110
1191extern void __blk_complete_request(struct request *); 1111extern void __blk_complete_request(struct request *);
1192extern void blk_abort_request(struct request *); 1112extern void blk_abort_request(struct request *);
1193extern void blk_unprep_request(struct request *);
1194 1113
1195/* 1114/*
1196 * Access functions for manipulating queue properties 1115 * Access functions for manipulating queue properties
1197 */ 1116 */
1198extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
1199 spinlock_t *lock, int node_id);
1200extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
1201extern int blk_init_allocated_queue(struct request_queue *);
1202extern void blk_cleanup_queue(struct request_queue *); 1117extern void blk_cleanup_queue(struct request_queue *);
1203extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 1118extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
1204extern void blk_queue_bounce_limit(struct request_queue *, u64); 1119extern void blk_queue_bounce_limit(struct request_queue *, u64);
@@ -1239,8 +1154,6 @@ extern int blk_queue_dma_drain(struct request_queue *q,
1239 void *buf, unsigned int size); 1154 void *buf, unsigned int size);
1240extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 1155extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
1241extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 1156extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
1242extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
1243extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
1244extern void blk_queue_dma_alignment(struct request_queue *, int); 1157extern void blk_queue_dma_alignment(struct request_queue *, int);
1245extern void blk_queue_update_dma_alignment(struct request_queue *, int); 1158extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1246extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 1159extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
@@ -1298,7 +1211,6 @@ extern void blk_set_queue_dying(struct request_queue *);
1298 * schedule() where blk_schedule_flush_plug() is called. 1211 * schedule() where blk_schedule_flush_plug() is called.
1299 */ 1212 */
1300struct blk_plug { 1213struct blk_plug {
1301 struct list_head list; /* requests */
1302 struct list_head mq_list; /* blk-mq requests */ 1214 struct list_head mq_list; /* blk-mq requests */
1303 struct list_head cb_list; /* md requires an unplug callback */ 1215 struct list_head cb_list; /* md requires an unplug callback */
1304}; 1216};
@@ -1339,8 +1251,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1339 struct blk_plug *plug = tsk->plug; 1251 struct blk_plug *plug = tsk->plug;
1340 1252
1341 return plug && 1253 return plug &&
1342 (!list_empty(&plug->list) || 1254 (!list_empty(&plug->mq_list) ||
1343 !list_empty(&plug->mq_list) ||
1344 !list_empty(&plug->cb_list)); 1255 !list_empty(&plug->cb_list));
1345} 1256}
1346 1257