aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 16:19:59 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 16:19:59 -0500
commit0e9da3fbf7d81f0f913b491c8de1ba7883d4f217 (patch)
tree2b3d25e3be60bf4ee40b4690c7bb9d6fa499ae69 /include/linux/blkdev.h
parentb12a9124eeb71d766a3e3eb594ebbb3fefc66902 (diff)
parent00203ba40d40d7f33857416adfb18adaf0e40123 (diff)
Merge tag 'for-4.21/block-20181221' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: "This is the main pull request for block/storage for 4.21. Larger than usual, it was a busy round with lots of goodies queued up. Most notable is the removal of the old IO stack, which has been a long time coming. No new features for a while, everything coming in this week has all been fixes for things that were previously merged. This contains: - Use atomic counters instead of semaphores for mtip32xx (Arnd) - Cleanup of the mtip32xx request setup (Christoph) - Fix for circular locking dependency in loop (Jan, Tetsuo) - bcache (Coly, Guoju, Shenghui) * Optimizations for writeback caching * Various fixes and improvements - nvme (Chaitanya, Christoph, Sagi, Jay, me, Keith) * host and target support for NVMe over TCP * Error log page support * Support for separate read/write/poll queues * Much improved polling * discard OOM fallback * Tracepoint improvements - lightnvm (Hans, Hua, Igor, Matias, Javier) * Igor added packed metadata to pblk. Now drives without metadata per LBA can be used as well. * Fix from Geert on uninitialized value on chunk metadata reads. * Fixes from Hans and Javier to pblk recovery and write path. * Fix from Hua Su to fix a race condition in the pblk recovery code. * Scan optimization added to pblk recovery from Zhoujie. * Small geometry cleanup from me. - Conversion of the last few drivers that used the legacy path to blk-mq (me) - Removal of legacy IO path in SCSI (me, Christoph) - Removal of legacy IO stack and schedulers (me) - Support for much better polling, now without interrupts at all. blk-mq adds support for multiple queue maps, which enables us to have a map per type. This in turn enables nvme to have separate completion queues for polling, which can then be interrupt-less. Also means we're ready for async polled IO, which is hopefully coming in the next release. - Killing of (now) unused block exports (Christoph) - Unification of the blk-rq-qos and blk-wbt wait handling (Josef) - Support for zoned testing with null_blk (Masato) - sx8 conversion to per-host tag sets (Christoph) - IO priority improvements (Damien) - mq-deadline zoned fix (Damien) - Ref count blkcg series (Dennis) - Lots of blk-mq improvements and speedups (me) - sbitmap scalability improvements (me) - Make core inflight IO accounting per-cpu (Mikulas) - Export timeout setting in sysfs (Weiping) - Cleanup the direct issue path (Jianchao) - Export blk-wbt internals in block debugfs for easier debugging (Ming) - Lots of other fixes and improvements" * tag 'for-4.21/block-20181221' of git://git.kernel.dk/linux-block: (364 commits) kyber: use sbitmap add_wait_queue/list_del wait helpers sbitmap: add helpers for add/del wait queue handling block: save irq state in blkg_lookup_create() dm: don't reuse bio for flushes nvme-pci: trace SQ status on completions nvme-rdma: implement polling queue map nvme-fabrics: allow user to pass in nr_poll_queues nvme-fabrics: allow nvmf_connect_io_queue to poll nvme-core: optionally poll sync commands block: make request_to_qc_t public nvme-tcp: fix spelling mistake "attepmpt" -> "attempt" nvme-tcp: fix endianess annotations nvmet-tcp: fix endianess annotations nvme-pci: refactor nvme_poll_irqdisable to make sparse happy nvme-pci: only set nr_maps to 2 if poll queues are supported nvmet: use a macro for default error location nvmet: fix comparison of a u16 with -1 blk-mq: enable IO poll if .nr_queues of type poll > 0 blk-mq: change blk_mq_queue_busy() to blk_mq_queue_inflight() blk-mq: skip zero-queue maps in blk_mq_map_swqueue ...
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h250
1 files changed, 26 insertions, 224 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4293dc1cd160..45552e6eae1e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -58,25 +58,6 @@ struct blk_stat_callback;
58 58
59typedef void (rq_end_io_fn)(struct request *, blk_status_t); 59typedef void (rq_end_io_fn)(struct request *, blk_status_t);
60 60
61#define BLK_RL_SYNCFULL (1U << 0)
62#define BLK_RL_ASYNCFULL (1U << 1)
63
64struct request_list {
65 struct request_queue *q; /* the queue this rl belongs to */
66#ifdef CONFIG_BLK_CGROUP
67 struct blkcg_gq *blkg; /* blkg this request pool belongs to */
68#endif
69 /*
70 * count[], starved[], and wait[] are indexed by
71 * BLK_RW_SYNC/BLK_RW_ASYNC
72 */
73 int count[2];
74 int starved[2];
75 mempool_t *rq_pool;
76 wait_queue_head_t wait[2];
77 unsigned int flags;
78};
79
80/* 61/*
81 * request flags */ 62 * request flags */
82typedef __u32 __bitwise req_flags_t; 63typedef __u32 __bitwise req_flags_t;
@@ -85,8 +66,6 @@ typedef __u32 __bitwise req_flags_t;
85#define RQF_SORTED ((__force req_flags_t)(1 << 0)) 66#define RQF_SORTED ((__force req_flags_t)(1 << 0))
86/* drive already may have started this one */ 67/* drive already may have started this one */
87#define RQF_STARTED ((__force req_flags_t)(1 << 1)) 68#define RQF_STARTED ((__force req_flags_t)(1 << 1))
88/* uses tagged queueing */
89#define RQF_QUEUED ((__force req_flags_t)(1 << 2))
90/* may not be passed by ioscheduler */ 69/* may not be passed by ioscheduler */
91#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) 70#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
92/* request for flush sequence */ 71/* request for flush sequence */
@@ -150,8 +129,8 @@ enum mq_rq_state {
150struct request { 129struct request {
151 struct request_queue *q; 130 struct request_queue *q;
152 struct blk_mq_ctx *mq_ctx; 131 struct blk_mq_ctx *mq_ctx;
132 struct blk_mq_hw_ctx *mq_hctx;
153 133
154 int cpu;
155 unsigned int cmd_flags; /* op and common flags */ 134 unsigned int cmd_flags; /* op and common flags */
156 req_flags_t rq_flags; 135 req_flags_t rq_flags;
157 136
@@ -245,11 +224,7 @@ struct request {
245 refcount_t ref; 224 refcount_t ref;
246 225
247 unsigned int timeout; 226 unsigned int timeout;
248 227 unsigned long deadline;
249 /* access through blk_rq_set_deadline, blk_rq_deadline */
250 unsigned long __deadline;
251
252 struct list_head timeout_list;
253 228
254 union { 229 union {
255 struct __call_single_data csd; 230 struct __call_single_data csd;
@@ -264,10 +239,6 @@ struct request {
264 239
265 /* for bidi */ 240 /* for bidi */
266 struct request *next_rq; 241 struct request *next_rq;
267
268#ifdef CONFIG_BLK_CGROUP
269 struct request_list *rl; /* rl this rq is alloced from */
270#endif
271}; 242};
272 243
273static inline bool blk_op_is_scsi(unsigned int op) 244static inline bool blk_op_is_scsi(unsigned int op)
@@ -311,41 +282,21 @@ static inline unsigned short req_get_ioprio(struct request *req)
311 282
312struct blk_queue_ctx; 283struct blk_queue_ctx;
313 284
314typedef void (request_fn_proc) (struct request_queue *q);
315typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); 285typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
316typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
317typedef int (prep_rq_fn) (struct request_queue *, struct request *);
318typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
319 286
320struct bio_vec; 287struct bio_vec;
321typedef void (softirq_done_fn)(struct request *);
322typedef int (dma_drain_needed_fn)(struct request *); 288typedef int (dma_drain_needed_fn)(struct request *);
323typedef int (lld_busy_fn) (struct request_queue *q);
324typedef int (bsg_job_fn) (struct bsg_job *);
325typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
326typedef void (exit_rq_fn)(struct request_queue *, struct request *);
327 289
328enum blk_eh_timer_return { 290enum blk_eh_timer_return {
329 BLK_EH_DONE, /* drivers has completed the command */ 291 BLK_EH_DONE, /* drivers has completed the command */
330 BLK_EH_RESET_TIMER, /* reset timer and try again */ 292 BLK_EH_RESET_TIMER, /* reset timer and try again */
331}; 293};
332 294
333typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
334
335enum blk_queue_state { 295enum blk_queue_state {
336 Queue_down, 296 Queue_down,
337 Queue_up, 297 Queue_up,
338}; 298};
339 299
340struct blk_queue_tag {
341 struct request **tag_index; /* map of busy tags */
342 unsigned long *tag_map; /* bit map of free/busy tags */
343 int max_depth; /* what we will send to device */
344 int real_max_depth; /* what the array can hold */
345 atomic_t refcnt; /* map can be shared */
346 int alloc_policy; /* tag allocation policy */
347 int next_tag; /* next tag */
348};
349#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 300#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
350#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 301#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
351 302
@@ -444,40 +395,15 @@ struct request_queue {
444 struct list_head queue_head; 395 struct list_head queue_head;
445 struct request *last_merge; 396 struct request *last_merge;
446 struct elevator_queue *elevator; 397 struct elevator_queue *elevator;
447 int nr_rqs[2]; /* # allocated [a]sync rqs */
448 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
449 398
450 struct blk_queue_stats *stats; 399 struct blk_queue_stats *stats;
451 struct rq_qos *rq_qos; 400 struct rq_qos *rq_qos;
452 401
453 /*
454 * If blkcg is not used, @q->root_rl serves all requests. If blkcg
455 * is used, root blkg allocates from @q->root_rl and all other
456 * blkgs from their own blkg->rl. Which one to use should be
457 * determined using bio_request_list().
458 */
459 struct request_list root_rl;
460
461 request_fn_proc *request_fn;
462 make_request_fn *make_request_fn; 402 make_request_fn *make_request_fn;
463 poll_q_fn *poll_fn;
464 prep_rq_fn *prep_rq_fn;
465 unprep_rq_fn *unprep_rq_fn;
466 softirq_done_fn *softirq_done_fn;
467 rq_timed_out_fn *rq_timed_out_fn;
468 dma_drain_needed_fn *dma_drain_needed; 403 dma_drain_needed_fn *dma_drain_needed;
469 lld_busy_fn *lld_busy_fn;
470 /* Called just after a request is allocated */
471 init_rq_fn *init_rq_fn;
472 /* Called just before a request is freed */
473 exit_rq_fn *exit_rq_fn;
474 /* Called from inside blk_get_request() */
475 void (*initialize_rq_fn)(struct request *rq);
476 404
477 const struct blk_mq_ops *mq_ops; 405 const struct blk_mq_ops *mq_ops;
478 406
479 unsigned int *mq_map;
480
481 /* sw queues */ 407 /* sw queues */
482 struct blk_mq_ctx __percpu *queue_ctx; 408 struct blk_mq_ctx __percpu *queue_ctx;
483 unsigned int nr_queues; 409 unsigned int nr_queues;
@@ -488,17 +414,6 @@ struct request_queue {
488 struct blk_mq_hw_ctx **queue_hw_ctx; 414 struct blk_mq_hw_ctx **queue_hw_ctx;
489 unsigned int nr_hw_queues; 415 unsigned int nr_hw_queues;
490 416
491 /*
492 * Dispatch queue sorting
493 */
494 sector_t end_sector;
495 struct request *boundary_rq;
496
497 /*
498 * Delayed queue handling
499 */
500 struct delayed_work delay_work;
501
502 struct backing_dev_info *backing_dev_info; 417 struct backing_dev_info *backing_dev_info;
503 418
504 /* 419 /*
@@ -529,13 +444,7 @@ struct request_queue {
529 */ 444 */
530 gfp_t bounce_gfp; 445 gfp_t bounce_gfp;
531 446
532 /* 447 spinlock_t queue_lock;
533 * protects queue structures from reentrancy. ->__queue_lock should
534 * _never_ be used directly, it is queue private. always use
535 * ->queue_lock.
536 */
537 spinlock_t __queue_lock;
538 spinlock_t *queue_lock;
539 448
540 /* 449 /*
541 * queue kobject 450 * queue kobject
@@ -545,7 +454,7 @@ struct request_queue {
545 /* 454 /*
546 * mq queue kobject 455 * mq queue kobject
547 */ 456 */
548 struct kobject mq_kobj; 457 struct kobject *mq_kobj;
549 458
550#ifdef CONFIG_BLK_DEV_INTEGRITY 459#ifdef CONFIG_BLK_DEV_INTEGRITY
551 struct blk_integrity integrity; 460 struct blk_integrity integrity;
@@ -561,27 +470,12 @@ struct request_queue {
561 * queue settings 470 * queue settings
562 */ 471 */
563 unsigned long nr_requests; /* Max # of requests */ 472 unsigned long nr_requests; /* Max # of requests */
564 unsigned int nr_congestion_on;
565 unsigned int nr_congestion_off;
566 unsigned int nr_batching;
567 473
568 unsigned int dma_drain_size; 474 unsigned int dma_drain_size;
569 void *dma_drain_buffer; 475 void *dma_drain_buffer;
570 unsigned int dma_pad_mask; 476 unsigned int dma_pad_mask;
571 unsigned int dma_alignment; 477 unsigned int dma_alignment;
572 478
573 struct blk_queue_tag *queue_tags;
574
575 unsigned int nr_sorted;
576 unsigned int in_flight[2];
577
578 /*
579 * Number of active block driver functions for which blk_drain_queue()
580 * must wait. Must be incremented around functions that unlock the
581 * queue_lock internally, e.g. scsi_request_fn().
582 */
583 unsigned int request_fn_active;
584
585 unsigned int rq_timeout; 479 unsigned int rq_timeout;
586 int poll_nsec; 480 int poll_nsec;
587 481
@@ -590,7 +484,6 @@ struct request_queue {
590 484
591 struct timer_list timeout; 485 struct timer_list timeout;
592 struct work_struct timeout_work; 486 struct work_struct timeout_work;
593 struct list_head timeout_list;
594 487
595 struct list_head icq_list; 488 struct list_head icq_list;
596#ifdef CONFIG_BLK_CGROUP 489#ifdef CONFIG_BLK_CGROUP
@@ -645,11 +538,9 @@ struct request_queue {
645 538
646 struct mutex sysfs_lock; 539 struct mutex sysfs_lock;
647 540
648 int bypass_depth;
649 atomic_t mq_freeze_depth; 541 atomic_t mq_freeze_depth;
650 542
651#if defined(CONFIG_BLK_DEV_BSG) 543#if defined(CONFIG_BLK_DEV_BSG)
652 bsg_job_fn *bsg_job_fn;
653 struct bsg_class_device bsg_dev; 544 struct bsg_class_device bsg_dev;
654#endif 545#endif
655 546
@@ -669,12 +560,12 @@ struct request_queue {
669#ifdef CONFIG_BLK_DEBUG_FS 560#ifdef CONFIG_BLK_DEBUG_FS
670 struct dentry *debugfs_dir; 561 struct dentry *debugfs_dir;
671 struct dentry *sched_debugfs_dir; 562 struct dentry *sched_debugfs_dir;
563 struct dentry *rqos_debugfs_dir;
672#endif 564#endif
673 565
674 bool mq_sysfs_init_done; 566 bool mq_sysfs_init_done;
675 567
676 size_t cmd_size; 568 size_t cmd_size;
677 void *rq_alloc_data;
678 569
679 struct work_struct release_work; 570 struct work_struct release_work;
680 571
@@ -682,10 +573,8 @@ struct request_queue {
682 u64 write_hints[BLK_MAX_WRITE_HINTS]; 573 u64 write_hints[BLK_MAX_WRITE_HINTS];
683}; 574};
684 575
685#define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */
686#define QUEUE_FLAG_STOPPED 1 /* queue is stopped */ 576#define QUEUE_FLAG_STOPPED 1 /* queue is stopped */
687#define QUEUE_FLAG_DYING 2 /* queue being torn down */ 577#define QUEUE_FLAG_DYING 2 /* queue being torn down */
688#define QUEUE_FLAG_BYPASS 3 /* act as dumb FIFO queue */
689#define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */ 578#define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */
690#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */ 579#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */
691#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */ 580#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */
@@ -718,19 +607,15 @@ struct request_queue {
718 (1 << QUEUE_FLAG_ADD_RANDOM)) 607 (1 << QUEUE_FLAG_ADD_RANDOM))
719 608
720#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 609#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
721 (1 << QUEUE_FLAG_SAME_COMP) | \ 610 (1 << QUEUE_FLAG_SAME_COMP))
722 (1 << QUEUE_FLAG_POLL))
723 611
724void blk_queue_flag_set(unsigned int flag, struct request_queue *q); 612void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
725void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); 613void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
726bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); 614bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
727bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
728 615
729#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
730#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 616#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
731#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 617#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
732#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 618#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
733#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
734#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 619#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
735#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 620#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
736#define blk_queue_noxmerges(q) \ 621#define blk_queue_noxmerges(q) \
@@ -757,32 +642,20 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
757extern void blk_set_pm_only(struct request_queue *q); 642extern void blk_set_pm_only(struct request_queue *q);
758extern void blk_clear_pm_only(struct request_queue *q); 643extern void blk_clear_pm_only(struct request_queue *q);
759 644
760static inline int queue_in_flight(struct request_queue *q)
761{
762 return q->in_flight[0] + q->in_flight[1];
763}
764
765static inline bool blk_account_rq(struct request *rq) 645static inline bool blk_account_rq(struct request *rq)
766{ 646{
767 return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); 647 return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
768} 648}
769 649
770#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
771#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 650#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
772/* rq->queuelist of dequeued request must be list_empty() */
773#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
774 651
775#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 652#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
776 653
777#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 654#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
778 655
779/* 656static inline bool queue_is_mq(struct request_queue *q)
780 * Driver can handle struct request, if it either has an old style
781 * request_fn defined, or is blk-mq based.
782 */
783static inline bool queue_is_rq_based(struct request_queue *q)
784{ 657{
785 return q->request_fn || q->mq_ops; 658 return q->mq_ops;
786} 659}
787 660
788static inline unsigned int blk_queue_cluster(struct request_queue *q) 661static inline unsigned int blk_queue_cluster(struct request_queue *q)
@@ -845,27 +718,6 @@ static inline bool rq_is_sync(struct request *rq)
845 return op_is_sync(rq->cmd_flags); 718 return op_is_sync(rq->cmd_flags);
846} 719}
847 720
848static inline bool blk_rl_full(struct request_list *rl, bool sync)
849{
850 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
851
852 return rl->flags & flag;
853}
854
855static inline void blk_set_rl_full(struct request_list *rl, bool sync)
856{
857 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
858
859 rl->flags |= flag;
860}
861
862static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
863{
864 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
865
866 rl->flags &= ~flag;
867}
868
869static inline bool rq_mergeable(struct request *rq) 721static inline bool rq_mergeable(struct request *rq)
870{ 722{
871 if (blk_rq_is_passthrough(rq)) 723 if (blk_rq_is_passthrough(rq))
@@ -902,16 +754,6 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
902 return q->nr_requests; 754 return q->nr_requests;
903} 755}
904 756
905/*
906 * q->prep_rq_fn return values
907 */
908enum {
909 BLKPREP_OK, /* serve it */
910 BLKPREP_KILL, /* fatal error, kill, return -EIO */
911 BLKPREP_DEFER, /* leave on queue */
912 BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
913};
914
915extern unsigned long blk_max_low_pfn, blk_max_pfn; 757extern unsigned long blk_max_low_pfn, blk_max_pfn;
916 758
917/* 759/*
@@ -983,10 +825,8 @@ extern blk_qc_t direct_make_request(struct bio *bio);
983extern void blk_rq_init(struct request_queue *q, struct request *rq); 825extern void blk_rq_init(struct request_queue *q, struct request *rq);
984extern void blk_init_request_from_bio(struct request *req, struct bio *bio); 826extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
985extern void blk_put_request(struct request *); 827extern void blk_put_request(struct request *);
986extern void __blk_put_request(struct request_queue *, struct request *);
987extern struct request *blk_get_request(struct request_queue *, unsigned int op, 828extern struct request *blk_get_request(struct request_queue *, unsigned int op,
988 blk_mq_req_flags_t flags); 829 blk_mq_req_flags_t flags);
989extern void blk_requeue_request(struct request_queue *, struct request *);
990extern int blk_lld_busy(struct request_queue *q); 830extern int blk_lld_busy(struct request_queue *q);
991extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 831extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
992 struct bio_set *bs, gfp_t gfp_mask, 832 struct bio_set *bs, gfp_t gfp_mask,
@@ -996,7 +836,6 @@ extern void blk_rq_unprep_clone(struct request *rq);
996extern blk_status_t blk_insert_cloned_request(struct request_queue *q, 836extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
997 struct request *rq); 837 struct request *rq);
998extern int blk_rq_append_bio(struct request *rq, struct bio **bio); 838extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
999extern void blk_delay_queue(struct request_queue *, unsigned long);
1000extern void blk_queue_split(struct request_queue *, struct bio **); 839extern void blk_queue_split(struct request_queue *, struct bio **);
1001extern void blk_recount_segments(struct request_queue *, struct bio *); 840extern void blk_recount_segments(struct request_queue *, struct bio *);
1002extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 841extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
@@ -1009,15 +848,7 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
1009 848
1010extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 849extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
1011extern void blk_queue_exit(struct request_queue *q); 850extern void blk_queue_exit(struct request_queue *q);
1012extern void blk_start_queue(struct request_queue *q);
1013extern void blk_start_queue_async(struct request_queue *q);
1014extern void blk_stop_queue(struct request_queue *q);
1015extern void blk_sync_queue(struct request_queue *q); 851extern void blk_sync_queue(struct request_queue *q);
1016extern void __blk_stop_queue(struct request_queue *q);
1017extern void __blk_run_queue(struct request_queue *q);
1018extern void __blk_run_queue_uncond(struct request_queue *q);
1019extern void blk_run_queue(struct request_queue *);
1020extern void blk_run_queue_async(struct request_queue *q);
1021extern int blk_rq_map_user(struct request_queue *, struct request *, 852extern int blk_rq_map_user(struct request_queue *, struct request *,
1022 struct rq_map_data *, void __user *, unsigned long, 853 struct rq_map_data *, void __user *, unsigned long,
1023 gfp_t); 854 gfp_t);
@@ -1034,7 +865,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
1034int blk_status_to_errno(blk_status_t status); 865int blk_status_to_errno(blk_status_t status);
1035blk_status_t errno_to_blk_status(int errno); 866blk_status_t errno_to_blk_status(int errno);
1036 867
1037bool blk_poll(struct request_queue *q, blk_qc_t cookie); 868int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
1038 869
1039static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 870static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
1040{ 871{
@@ -1172,13 +1003,6 @@ static inline unsigned int blk_rq_count_bios(struct request *rq)
1172 return nr_bios; 1003 return nr_bios;
1173} 1004}
1174 1005
1175/*
1176 * Request issue related functions.
1177 */
1178extern struct request *blk_peek_request(struct request_queue *q);
1179extern void blk_start_request(struct request *rq);
1180extern struct request *blk_fetch_request(struct request_queue *q);
1181
1182void blk_steal_bios(struct bio_list *list, struct request *rq); 1006void blk_steal_bios(struct bio_list *list, struct request *rq);
1183 1007
1184/* 1008/*
@@ -1196,27 +1020,18 @@ void blk_steal_bios(struct bio_list *list, struct request *rq);
1196 */ 1020 */
1197extern bool blk_update_request(struct request *rq, blk_status_t error, 1021extern bool blk_update_request(struct request *rq, blk_status_t error,
1198 unsigned int nr_bytes); 1022 unsigned int nr_bytes);
1199extern void blk_finish_request(struct request *rq, blk_status_t error);
1200extern bool blk_end_request(struct request *rq, blk_status_t error,
1201 unsigned int nr_bytes);
1202extern void blk_end_request_all(struct request *rq, blk_status_t error); 1023extern void blk_end_request_all(struct request *rq, blk_status_t error);
1203extern bool __blk_end_request(struct request *rq, blk_status_t error, 1024extern bool __blk_end_request(struct request *rq, blk_status_t error,
1204 unsigned int nr_bytes); 1025 unsigned int nr_bytes);
1205extern void __blk_end_request_all(struct request *rq, blk_status_t error); 1026extern void __blk_end_request_all(struct request *rq, blk_status_t error);
1206extern bool __blk_end_request_cur(struct request *rq, blk_status_t error); 1027extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
1207 1028
1208extern void blk_complete_request(struct request *);
1209extern void __blk_complete_request(struct request *); 1029extern void __blk_complete_request(struct request *);
1210extern void blk_abort_request(struct request *); 1030extern void blk_abort_request(struct request *);
1211extern void blk_unprep_request(struct request *);
1212 1031
1213/* 1032/*
1214 * Access functions for manipulating queue properties 1033 * Access functions for manipulating queue properties
1215 */ 1034 */
1216extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
1217 spinlock_t *lock, int node_id);
1218extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
1219extern int blk_init_allocated_queue(struct request_queue *);
1220extern void blk_cleanup_queue(struct request_queue *); 1035extern void blk_cleanup_queue(struct request_queue *);
1221extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 1036extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
1222extern void blk_queue_bounce_limit(struct request_queue *, u64); 1037extern void blk_queue_bounce_limit(struct request_queue *, u64);
@@ -1255,15 +1070,10 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
1255extern int blk_queue_dma_drain(struct request_queue *q, 1070extern int blk_queue_dma_drain(struct request_queue *q,
1256 dma_drain_needed_fn *dma_drain_needed, 1071 dma_drain_needed_fn *dma_drain_needed,
1257 void *buf, unsigned int size); 1072 void *buf, unsigned int size);
1258extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
1259extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 1073extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
1260extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 1074extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
1261extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
1262extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
1263extern void blk_queue_dma_alignment(struct request_queue *, int); 1075extern void blk_queue_dma_alignment(struct request_queue *, int);
1264extern void blk_queue_update_dma_alignment(struct request_queue *, int); 1076extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1265extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
1266extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1267extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1077extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1268extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 1078extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
1269extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 1079extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
@@ -1299,8 +1109,7 @@ extern long nr_blockdev_pages(void);
1299 1109
1300bool __must_check blk_get_queue(struct request_queue *); 1110bool __must_check blk_get_queue(struct request_queue *);
1301struct request_queue *blk_alloc_queue(gfp_t); 1111struct request_queue *blk_alloc_queue(gfp_t);
1302struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, 1112struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id);
1303 spinlock_t *lock);
1304extern void blk_put_queue(struct request_queue *); 1113extern void blk_put_queue(struct request_queue *);
1305extern void blk_set_queue_dying(struct request_queue *); 1114extern void blk_set_queue_dying(struct request_queue *);
1306 1115
@@ -1317,9 +1126,10 @@ extern void blk_set_queue_dying(struct request_queue *);
1317 * schedule() where blk_schedule_flush_plug() is called. 1126 * schedule() where blk_schedule_flush_plug() is called.
1318 */ 1127 */
1319struct blk_plug { 1128struct blk_plug {
1320 struct list_head list; /* requests */
1321 struct list_head mq_list; /* blk-mq requests */ 1129 struct list_head mq_list; /* blk-mq requests */
1322 struct list_head cb_list; /* md requires an unplug callback */ 1130 struct list_head cb_list; /* md requires an unplug callback */
1131 unsigned short rq_count;
1132 bool multiple_queues;
1323}; 1133};
1324#define BLK_MAX_REQUEST_COUNT 16 1134#define BLK_MAX_REQUEST_COUNT 16
1325#define BLK_PLUG_FLUSH_SIZE (128 * 1024) 1135#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
@@ -1358,31 +1168,10 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1358 struct blk_plug *plug = tsk->plug; 1168 struct blk_plug *plug = tsk->plug;
1359 1169
1360 return plug && 1170 return plug &&
1361 (!list_empty(&plug->list) || 1171 (!list_empty(&plug->mq_list) ||
1362 !list_empty(&plug->mq_list) ||
1363 !list_empty(&plug->cb_list)); 1172 !list_empty(&plug->cb_list));
1364} 1173}
1365 1174
1366/*
1367 * tag stuff
1368 */
1369extern int blk_queue_start_tag(struct request_queue *, struct request *);
1370extern struct request *blk_queue_find_tag(struct request_queue *, int);
1371extern void blk_queue_end_tag(struct request_queue *, struct request *);
1372extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
1373extern void blk_queue_free_tags(struct request_queue *);
1374extern int blk_queue_resize_tags(struct request_queue *, int);
1375extern struct blk_queue_tag *blk_init_tags(int, int);
1376extern void blk_free_tags(struct blk_queue_tag *);
1377
1378static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1379 int tag)
1380{
1381 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
1382 return NULL;
1383 return bqt->tag_index[tag];
1384}
1385
1386extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1175extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
1387extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1176extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1388 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1177 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
@@ -1982,4 +1771,17 @@ static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
1982 1771
1983#endif /* CONFIG_BLOCK */ 1772#endif /* CONFIG_BLOCK */
1984 1773
1774static inline void blk_wake_io_task(struct task_struct *waiter)
1775{
1776 /*
1777 * If we're polling, the task itself is doing the completions. For
1778 * that case, we don't need to signal a wakeup, it's enough to just
1779 * mark us as RUNNING.
1780 */
1781 if (waiter == current)
1782 __set_current_state(TASK_RUNNING);
1783 else
1784 wake_up_process(waiter);
1785}
1786
1985#endif 1787#endif