aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2008-08-09 11:42:20 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:02 -0400
commite17fc0a1ccf88f6d4dcb363729f3141b0958c325 (patch)
tree0a7c2dc1c3159c2af14d87c67ca83e158b2c78b5 /include/linux/blkdev.h
parentd30a2605be9d5132d95944916e8f578fcfe4f976 (diff)
Allow elevators to sort/merge discard requests
But blkdev_issue_discard() still emits requests which are interpreted as soft barriers, because naïve callers might otherwise issue subsequent writes to those same sectors, which might cross on the queue (if they're reallocated quickly enough). Callers still _can_ issue non-barrier discard requests, but they have to take care of queue ordering for themselves. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 727886d25c4e..e9eb35c9bf26 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -541,7 +541,7 @@ enum {
541#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) 541#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)
542#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 542#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
543 543
544#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) 544#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
545 545
546#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) 546#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
547#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) 547#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
@@ -598,7 +598,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw)
598#define RQ_NOMERGE_FLAGS \ 598#define RQ_NOMERGE_FLAGS \
599 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 599 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
600#define rq_mergeable(rq) \ 600#define rq_mergeable(rq) \
601 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) 601 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
602 (blk_discard_rq(rq) || blk_fs_request((rq))))
602 603
603/* 604/*
604 * q->prep_rq_fn return values 605 * q->prep_rq_fn return values