aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/core/queue.h
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2017-11-29 08:41:04 -0500
committerUlf Hansson <ulf.hansson@linaro.org>2017-12-11 06:44:34 -0500
commit1e8e55b67030c6a2fef893d428bdcd611f73705c (patch)
tree3b6b1d8a5dc6a336ffb1058c02bdb4e50fe90c84 /drivers/mmc/core/queue.h
parent81196976ed946cbf36bb41ddda402853c7df7cfa (diff)
mmc: block: Add CQE support
Add CQE support to the block driver, including: - optionally using DCMD for flush requests - "manually" issuing discard requests - issuing read / write requests to the CQE - supporting block-layer timeouts - handling recovery - supporting re-tuning CQE offers 25% - 50% better random multi-threaded I/O. There is a slight (e.g. 2%) drop in sequential read speed but no observable change to sequential write. CQE automatically sends the commands to complete requests. However it only supports reads / writes and so-called "direct commands" (DCMD). Furthermore DCMD is limited to one command at a time, but discards require 3 commands. That makes issuing discards through CQE very awkward, but some CQE's don't support DCMD anyway. So for discards, the existing non-CQE approach is taken, where the mmc core code issues the 3 commands one at a time i.e. mmc_erase(). Where DCMD is used, is for issuing flushes. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Acked-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org> Tested-by: Linus Walleij <linus.walleij@linaro.org>
Diffstat (limited to 'drivers/mmc/core/queue.h')
-rw-r--r--drivers/mmc/core/queue.h18
1 files changed, 18 insertions, 0 deletions
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index ce9249852f26..1d7d3b0afff8 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -17,6 +17,7 @@ enum mmc_issued {
17 17
18enum mmc_issue_type { 18enum mmc_issue_type {
19 MMC_ISSUE_SYNC, 19 MMC_ISSUE_SYNC,
20 MMC_ISSUE_DCMD,
20 MMC_ISSUE_ASYNC, 21 MMC_ISSUE_ASYNC,
21 MMC_ISSUE_MAX, 22 MMC_ISSUE_MAX,
22}; 23};
@@ -92,8 +93,15 @@ struct mmc_queue {
92 int qcnt; 93 int qcnt;
93 94
94 int in_flight[MMC_ISSUE_MAX]; 95 int in_flight[MMC_ISSUE_MAX];
96 unsigned int cqe_busy;
97#define MMC_CQE_DCMD_BUSY BIT(0)
98#define MMC_CQE_QUEUE_FULL BIT(1)
99 bool use_cqe;
100 bool recovery_needed;
101 bool in_recovery;
95 bool rw_wait; 102 bool rw_wait;
96 bool waiting; 103 bool waiting;
104 struct work_struct recovery_work;
97 wait_queue_head_t wait; 105 wait_queue_head_t wait;
98 struct request *complete_req; 106 struct request *complete_req;
99 struct mutex complete_lock; 107 struct mutex complete_lock;
@@ -108,11 +116,21 @@ extern void mmc_queue_resume(struct mmc_queue *);
108extern unsigned int mmc_queue_map_sg(struct mmc_queue *, 116extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
109 struct mmc_queue_req *); 117 struct mmc_queue_req *);
110 118
119void mmc_cqe_check_busy(struct mmc_queue *mq);
120void mmc_cqe_recovery_notifier(struct mmc_request *mrq);
121
111enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req); 122enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req);
112 123
113static inline int mmc_tot_in_flight(struct mmc_queue *mq) 124static inline int mmc_tot_in_flight(struct mmc_queue *mq)
114{ 125{
115 return mq->in_flight[MMC_ISSUE_SYNC] + 126 return mq->in_flight[MMC_ISSUE_SYNC] +
127 mq->in_flight[MMC_ISSUE_DCMD] +
128 mq->in_flight[MMC_ISSUE_ASYNC];
129}
130
131static inline int mmc_cqe_qcnt(struct mmc_queue *mq)
132{
133 return mq->in_flight[MMC_ISSUE_DCMD] +
116 mq->in_flight[MMC_ISSUE_ASYNC]; 134 mq->in_flight[MMC_ISSUE_ASYNC];
117} 135}
118 136