aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2010-08-07 12:17:56 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-08-07 12:17:56 -0400
commit33659ebbae262228eef4e0fe990f393d1f0ed941 (patch)
treefcb537f09359c8dad3a6f6e16dc4319562dc42cc /include/linux/blkdev.h
parent7e005f79791dcd58436c88ded4a7f5aed1b82147 (diff)
block: remove wrappers for request type/flags
Remove all the trivial wrappers for the cmd_type and cmd_flags fields in struct requests. This allows much easier grepping for different request types instead of unwinding through macros. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h41
1 files changed, 13 insertions, 28 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d7ae241a9e5..3ecd28ef9ba 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -604,33 +604,20 @@ enum {
604 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 604 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
605#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 605#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
606 606
607#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 607#define blk_noretry_request(rq) \
608#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 608 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
609#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 609 REQ_FAILFAST_DRIVER))
610#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 610
611 611#define blk_account_rq(rq) \
612#define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) 612 (((rq)->cmd_flags & REQ_STARTED) && \
613#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) 613 ((rq)->cmd_type == REQ_TYPE_FS || \
614#define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) 614 ((rq)->cmd_flags & REQ_DISCARD)))
615#define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ 615
616 blk_failfast_transport(rq) || \
617 blk_failfast_driver(rq))
618#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
619#define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT)
620#define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET)
621
622#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
623
624#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
625#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
626#define blk_pm_request(rq) \ 616#define blk_pm_request(rq) \
627 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 617 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
618 (rq)->cmd_type == REQ_TYPE_PM_RESUME)
628 619
629#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 620#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
630#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
631#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
632#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
633#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
634#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 621#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
635/* rq->queuelist of dequeued request must be list_empty() */ 622/* rq->queuelist of dequeued request must be list_empty() */
636#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 623#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
@@ -652,9 +639,6 @@ static inline bool rq_is_sync(struct request *rq)
652 return rw_is_sync(rq->cmd_flags); 639 return rw_is_sync(rq->cmd_flags);
653} 640}
654 641
655#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
656#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE)
657
658static inline int blk_queue_full(struct request_queue *q, int sync) 642static inline int blk_queue_full(struct request_queue *q, int sync)
659{ 643{
660 if (sync) 644 if (sync)
@@ -687,7 +671,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
687 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 671 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
688#define rq_mergeable(rq) \ 672#define rq_mergeable(rq) \
689 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 673 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
690 (blk_discard_rq(rq) || blk_fs_request((rq)))) 674 (((rq)->cmd_flags & REQ_DISCARD) || \
675 (rq)->cmd_type == REQ_TYPE_FS))
691 676
692/* 677/*
693 * q->prep_rq_fn return values 678 * q->prep_rq_fn return values