summaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorEzequiel Garcia <ezequiel@collabora.com>2018-11-20 13:21:21 -0500
committerUlf Hansson <ulf.hansson@linaro.org>2018-12-17 02:26:24 -0500
commit96e03fffa306f3a02e34c1dbc271ea040b8705d0 (patch)
treeb6d905b5de6fe6d137227134e099007454da3297 /drivers/mmc
parent09b4f706736fb9f9af763d8af41bf6f99944dc0c (diff)
mmc: jz4740: rework pre_req/post_req implementation
As reported by Aaro, the JZ4740 MMC driver throws a warning when the kernel is built without preemption (CONFIG_PREEMPT_NONE=y). [ 16.461094] jz4740-mmc 13450000.mmc: [jz4740_mmc_prepare_dma_data] invalid cookie: data->host_cookie 567 host->next_data.cookie 568 [ 16.473120] jz4740-mmc 13450000.mmc: [jz4740_mmc_prepare_dma_data] invalid cookie: data->host_cookie 568 host->next_data.cookie 569 [ 16.485144] jz4740-mmc 13450000.mmc: [jz4740_mmc_prepare_dma_data] invalid cookie: data->host_cookie 569 host->next_data.cookie 570 [ 16.497170] jz4740-mmc 13450000.mmc: [jz4740_mmc_prepare_dma_data] invalid cookie: data->host_cookie 570 host->next_data.cookie 571 The problem seems to be related to how pre_req/post_req is implemented. Currently, it seems the driver expects jz4740_mmc_prepare_dma_data() to be called with monotonically increasing host_cookie values, which is wrong. Moreover, the implementation is overly complicated, keeping track of unneeded "next cookie" state. So, instead of attempting to fix the current pre_req/post_req implementation, this commit refactors the driver, dropping the state, following other drivers such as dw_mmc and sdhci. Cc: Paul Cercueil <paul@crapouillou.net> Cc: Mathieu Malaterre <malat@debian.org> Reported-by: Aaro Koskinen <aaro.koskinen@iki.fi> Signed-off-by: Ezequiel Garcia <ezequiel@collabora.com> Tested-by: Aaro Koskinen <aaro.koskinen@iki.fi> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/jz4740_mmc.c118
1 files changed, 54 insertions, 64 deletions
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 6f7a99e54af0..e82b0e14822a 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -126,9 +126,23 @@ enum jz4740_mmc_state {
126 JZ4740_MMC_STATE_DONE, 126 JZ4740_MMC_STATE_DONE,
127}; 127};
128 128
129struct jz4740_mmc_host_next { 129/*
130 int sg_len; 130 * The MMC core allows to prepare a mmc_request while another mmc_request
131 s32 cookie; 131 * is in-flight. This is used via the pre_req/post_req hooks.
132 * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request.
133 * Following what other drivers do (sdhci, dw_mmc) we use the following cookie
134 * flags to keep track of the mmc_request mapping state.
135 *
136 * COOKIE_UNMAPPED: the request is not mapped.
137 * COOKIE_PREMAPPED: the request was mapped in pre_req,
138 * and should be unmapped in post_req.
139 * COOKIE_MAPPED: the request was mapped in the irq handler,
140 * and should be unmapped before mmc_request_done is called..
141 */
142enum jz4780_cookie {
143 COOKIE_UNMAPPED = 0,
144 COOKIE_PREMAPPED,
145 COOKIE_MAPPED,
132}; 146};
133 147
134struct jz4740_mmc_host { 148struct jz4740_mmc_host {
@@ -163,9 +177,7 @@ struct jz4740_mmc_host {
163 /* DMA support */ 177 /* DMA support */
164 struct dma_chan *dma_rx; 178 struct dma_chan *dma_rx;
165 struct dma_chan *dma_tx; 179 struct dma_chan *dma_tx;
166 struct jz4740_mmc_host_next next_data;
167 bool use_dma; 180 bool use_dma;
168 int sg_len;
169 181
170/* The DMA trigger level is 8 words, that is to say, the DMA read 182/* The DMA trigger level is 8 words, that is to say, the DMA read
171 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write 183 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
@@ -227,9 +239,6 @@ static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
227 return PTR_ERR(host->dma_rx); 239 return PTR_ERR(host->dma_rx);
228 } 240 }
229 241
230 /* Initialize DMA pre request cookie */
231 host->next_data.cookie = 1;
232
233 return 0; 242 return 0;
234} 243}
235 244
@@ -246,60 +255,44 @@ static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
246 enum dma_data_direction dir = mmc_get_dma_dir(data); 255 enum dma_data_direction dir = mmc_get_dma_dir(data);
247 256
248 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 257 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
258 data->host_cookie = COOKIE_UNMAPPED;
249} 259}
250 260
251/* Prepares DMA data for current/next transfer, returns non-zero on failure */ 261/* Prepares DMA data for current or next transfer.
262 * A request can be in-flight when this is called.
263 */
252static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host, 264static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
253 struct mmc_data *data, 265 struct mmc_data *data,
254 struct jz4740_mmc_host_next *next, 266 int cookie)
255 struct dma_chan *chan)
256{ 267{
257 struct jz4740_mmc_host_next *next_data = &host->next_data; 268 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
258 enum dma_data_direction dir = mmc_get_dma_dir(data); 269 enum dma_data_direction dir = mmc_get_dma_dir(data);
259 int sg_len; 270 int sg_count;
260
261 if (!next && data->host_cookie &&
262 data->host_cookie != host->next_data.cookie) {
263 dev_warn(mmc_dev(host->mmc),
264 "[%s] invalid cookie: data->host_cookie %d host->next_data.cookie %d\n",
265 __func__,
266 data->host_cookie,
267 host->next_data.cookie);
268 data->host_cookie = 0;
269 }
270 271
271 /* Check if next job is already prepared */ 272 if (data->host_cookie == COOKIE_PREMAPPED)
272 if (next || data->host_cookie != host->next_data.cookie) { 273 return data->sg_count;
273 sg_len = dma_map_sg(chan->device->dev,
274 data->sg,
275 data->sg_len,
276 dir);
277 274
278 } else { 275 sg_count = dma_map_sg(chan->device->dev,
279 sg_len = next_data->sg_len; 276 data->sg,
280 next_data->sg_len = 0; 277 data->sg_len,
281 } 278 dir);
282 279
283 if (sg_len <= 0) { 280 if (sg_count <= 0) {
284 dev_err(mmc_dev(host->mmc), 281 dev_err(mmc_dev(host->mmc),
285 "Failed to map scatterlist for DMA operation\n"); 282 "Failed to map scatterlist for DMA operation\n");
286 return -EINVAL; 283 return -EINVAL;
287 } 284 }
288 285
289 if (next) { 286 data->sg_count = sg_count;
290 next->sg_len = sg_len; 287 data->host_cookie = cookie;
291 data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
292 } else
293 host->sg_len = sg_len;
294 288
295 return 0; 289 return data->sg_count;
296} 290}
297 291
298static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host, 292static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
299 struct mmc_data *data) 293 struct mmc_data *data)
300{ 294{
301 int ret; 295 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
302 struct dma_chan *chan;
303 struct dma_async_tx_descriptor *desc; 296 struct dma_async_tx_descriptor *desc;
304 struct dma_slave_config conf = { 297 struct dma_slave_config conf = {
305 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 298 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
@@ -307,29 +300,26 @@ static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
307 .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, 300 .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
308 .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, 301 .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
309 }; 302 };
303 int sg_count;
310 304
311 if (data->flags & MMC_DATA_WRITE) { 305 if (data->flags & MMC_DATA_WRITE) {
312 conf.direction = DMA_MEM_TO_DEV; 306 conf.direction = DMA_MEM_TO_DEV;
313 conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO; 307 conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
314 conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT; 308 conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT;
315 chan = host->dma_tx;
316 } else { 309 } else {
317 conf.direction = DMA_DEV_TO_MEM; 310 conf.direction = DMA_DEV_TO_MEM;
318 conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO; 311 conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
319 conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE; 312 conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE;
320 chan = host->dma_rx;
321 } 313 }
322 314
323 ret = jz4740_mmc_prepare_dma_data(host, data, NULL, chan); 315 sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED);
324 if (ret) 316 if (sg_count < 0)
325 return ret; 317 return sg_count;
326 318
327 dmaengine_slave_config(chan, &conf); 319 dmaengine_slave_config(chan, &conf);
328 desc = dmaengine_prep_slave_sg(chan, 320 desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count,
329 data->sg, 321 conf.direction,
330 host->sg_len, 322 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
331 conf.direction,
332 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
333 if (!desc) { 323 if (!desc) {
334 dev_err(mmc_dev(host->mmc), 324 dev_err(mmc_dev(host->mmc),
335 "Failed to allocate DMA %s descriptor", 325 "Failed to allocate DMA %s descriptor",
@@ -343,7 +333,8 @@ static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
343 return 0; 333 return 0;
344 334
345dma_unmap: 335dma_unmap:
346 jz4740_mmc_dma_unmap(host, data); 336 if (data->host_cookie == COOKIE_MAPPED)
337 jz4740_mmc_dma_unmap(host, data);
347 return -ENOMEM; 338 return -ENOMEM;
348} 339}
349 340
@@ -352,16 +343,13 @@ static void jz4740_mmc_pre_request(struct mmc_host *mmc,
352{ 343{
353 struct jz4740_mmc_host *host = mmc_priv(mmc); 344 struct jz4740_mmc_host *host = mmc_priv(mmc);
354 struct mmc_data *data = mrq->data; 345 struct mmc_data *data = mrq->data;
355 struct jz4740_mmc_host_next *next_data = &host->next_data;
356 346
357 BUG_ON(data->host_cookie); 347 if (!host->use_dma)
358 348 return;
359 if (host->use_dma) {
360 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
361 349
362 if (jz4740_mmc_prepare_dma_data(host, data, next_data, chan)) 350 data->host_cookie = COOKIE_UNMAPPED;
363 data->host_cookie = 0; 351 if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0)
364 } 352 data->host_cookie = COOKIE_UNMAPPED;
365} 353}
366 354
367static void jz4740_mmc_post_request(struct mmc_host *mmc, 355static void jz4740_mmc_post_request(struct mmc_host *mmc,
@@ -371,10 +359,8 @@ static void jz4740_mmc_post_request(struct mmc_host *mmc,
371 struct jz4740_mmc_host *host = mmc_priv(mmc); 359 struct jz4740_mmc_host *host = mmc_priv(mmc);
372 struct mmc_data *data = mrq->data; 360 struct mmc_data *data = mrq->data;
373 361
374 if (host->use_dma && data->host_cookie) { 362 if (data && data->host_cookie != COOKIE_UNMAPPED)
375 jz4740_mmc_dma_unmap(host, data); 363 jz4740_mmc_dma_unmap(host, data);
376 data->host_cookie = 0;
377 }
378 364
379 if (err) { 365 if (err) {
380 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 366 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
@@ -437,10 +423,14 @@ static void jz4740_mmc_reset(struct jz4740_mmc_host *host)
437static void jz4740_mmc_request_done(struct jz4740_mmc_host *host) 423static void jz4740_mmc_request_done(struct jz4740_mmc_host *host)
438{ 424{
439 struct mmc_request *req; 425 struct mmc_request *req;
426 struct mmc_data *data;
440 427
441 req = host->req; 428 req = host->req;
429 data = req->data;
442 host->req = NULL; 430 host->req = NULL;
443 431
432 if (data && data->host_cookie == COOKIE_MAPPED)
433 jz4740_mmc_dma_unmap(host, data);
444 mmc_request_done(host->mmc, req); 434 mmc_request_done(host->mmc, req);
445} 435}
446 436