summaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-omap2-mcspi.c
diff options
context:
space:
mode:
authorFranklin S Cooper Jr <fcooper@ti.com>2016-07-07 13:17:50 -0400
committerMark Brown <broonie@kernel.org>2016-07-08 04:48:02 -0400
commit0ba1870f886501beca0e2c19ec367a85ae201ea8 (patch)
tree7774d58c85024187752dd10adaebd0aedaa54065 /drivers/spi/spi-omap2-mcspi.c
parent2b32e987c48c65a1a40b3b4294435f761e063b6b (diff)
spi: omap2-mcspi: Use the SPI framework to handle DMA mapping
Currently, the driver handles mapping buffers to be used by the DMA. However, there are times that the current mapping implementation will fail for certain buffers. Fortunately, the SPI framework can detect and map buffers so its usable by the DMA. Update the driver to utilize the SPI framework for buffer mapping instead. Also incorporate hooks that the framework uses to determine if the DMA can or can not be used. This will result in the original omap2_mcspi_transfer_one function being deleted and omap2_mcspi_work_one being renamed to omap2_mcspi_transfer_one. Previously transfer_one was only responsible for mapping and work_one handled the transfer. But now only transferring needs to be handled by the driver. Signed-off-by: Franklin S Cooper Jr <fcooper@ti.com> Signed-off-by: Mark Brown <broonie@kernel.org>
Diffstat (limited to 'drivers/spi/spi-omap2-mcspi.c')
-rw-r--r--drivers/spi/spi-omap2-mcspi.c132
1 files changed, 56 insertions, 76 deletions
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index c47f95879833..d5157b2222ce 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -419,16 +419,13 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
419 419
420 if (mcspi_dma->dma_tx) { 420 if (mcspi_dma->dma_tx) {
421 struct dma_async_tx_descriptor *tx; 421 struct dma_async_tx_descriptor *tx;
422 struct scatterlist sg;
423 422
424 dmaengine_slave_config(mcspi_dma->dma_tx, &cfg); 423 dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
425 424
426 sg_init_table(&sg, 1); 425 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
427 sg_dma_address(&sg) = xfer->tx_dma; 426 xfer->tx_sg.nents,
428 sg_dma_len(&sg) = xfer->len; 427 DMA_MEM_TO_DEV,
429 428 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
430 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
431 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
432 if (tx) { 429 if (tx) {
433 tx->callback = omap2_mcspi_tx_callback; 430 tx->callback = omap2_mcspi_tx_callback;
434 tx->callback_param = spi; 431 tx->callback_param = spi;
@@ -449,7 +446,10 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
449{ 446{
450 struct omap2_mcspi *mcspi; 447 struct omap2_mcspi *mcspi;
451 struct omap2_mcspi_dma *mcspi_dma; 448 struct omap2_mcspi_dma *mcspi_dma;
452 unsigned int count, dma_count; 449 unsigned int count, transfer_reduction = 0;
450 struct scatterlist *sg_out[2];
451 int nb_sizes = 0, out_mapped_nents[2], ret, x;
452 size_t sizes[2];
453 u32 l; 453 u32 l;
454 int elements = 0; 454 int elements = 0;
455 int word_len, element_count; 455 int word_len, element_count;
@@ -457,7 +457,6 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
457 mcspi = spi_master_get_devdata(spi->master); 457 mcspi = spi_master_get_devdata(spi->master);
458 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 458 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
459 count = xfer->len; 459 count = xfer->len;
460 dma_count = xfer->len;
461 460
462 /* 461 /*
463 * In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM 462 * In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM
@@ -465,7 +464,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
465 * normal mode. 464 * normal mode.
466 */ 465 */
467 if (mcspi->fifo_depth == 0) 466 if (mcspi->fifo_depth == 0)
468 dma_count -= es; 467 transfer_reduction = es;
469 468
470 word_len = cs->word_len; 469 word_len = cs->word_len;
471 l = mcspi_cached_chconf0(spi); 470 l = mcspi_cached_chconf0(spi);
@@ -479,7 +478,6 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
479 478
480 if (mcspi_dma->dma_rx) { 479 if (mcspi_dma->dma_rx) {
481 struct dma_async_tx_descriptor *tx; 480 struct dma_async_tx_descriptor *tx;
482 struct scatterlist sg;
483 481
484 dmaengine_slave_config(mcspi_dma->dma_rx, &cfg); 482 dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
485 483
@@ -488,15 +486,38 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
488 * configured in turbo mode. 486 * configured in turbo mode.
489 */ 487 */
490 if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0) 488 if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
491 dma_count -= es; 489 transfer_reduction += es;
492 490
493 sg_init_table(&sg, 1); 491 if (transfer_reduction) {
494 sg_dma_address(&sg) = xfer->rx_dma; 492 /* Split sgl into two. The second sgl won't be used. */
495 sg_dma_len(&sg) = dma_count; 493 sizes[0] = count - transfer_reduction;
494 sizes[1] = transfer_reduction;
495 nb_sizes = 2;
496 } else {
497 /*
498 * Don't bother splitting the sgl. This essentially
499 * clones the original sgl.
500 */
501 sizes[0] = count;
502 nb_sizes = 1;
503 }
504
505 ret = sg_split(xfer->rx_sg.sgl, xfer->rx_sg.nents,
506 0, nb_sizes,
507 sizes,
508 sg_out, out_mapped_nents,
509 GFP_KERNEL);
496 510
497 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1, 511 if (ret < 0) {
498 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | 512 dev_err(&spi->dev, "sg_split failed\n");
499 DMA_CTRL_ACK); 513 return 0;
514 }
515
516 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx,
517 sg_out[0],
518 out_mapped_nents[0],
519 DMA_DEV_TO_MEM,
520 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
500 if (tx) { 521 if (tx) {
501 tx->callback = omap2_mcspi_rx_callback; 522 tx->callback = omap2_mcspi_rx_callback;
502 tx->callback_param = spi; 523 tx->callback_param = spi;
@@ -510,8 +531,9 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
510 omap2_mcspi_set_dma_req(spi, 1, 1); 531 omap2_mcspi_set_dma_req(spi, 1, 1);
511 532
512 wait_for_completion(&mcspi_dma->dma_rx_completion); 533 wait_for_completion(&mcspi_dma->dma_rx_completion);
513 dma_unmap_single(mcspi->dev, xfer->rx_dma, count, 534
514 DMA_FROM_DEVICE); 535 for (x = 0; x < nb_sizes; x++)
536 kfree(sg_out[x]);
515 537
516 if (mcspi->fifo_depth > 0) 538 if (mcspi->fifo_depth > 0)
517 return count; 539 return count;
@@ -628,8 +650,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
628 650
629 if (tx != NULL) { 651 if (tx != NULL) {
630 wait_for_completion(&mcspi_dma->dma_tx_completion); 652 wait_for_completion(&mcspi_dma->dma_tx_completion);
631 dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
632 DMA_TO_DEVICE);
633 653
634 if (mcspi->fifo_depth > 0) { 654 if (mcspi->fifo_depth > 0) {
635 irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS; 655 irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
@@ -1087,8 +1107,9 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
1087 gpio_free(spi->cs_gpio); 1107 gpio_free(spi->cs_gpio);
1088} 1108}
1089 1109
1090static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi, 1110static int omap2_mcspi_transfer_one(struct spi_master *master,
1091 struct spi_device *spi, struct spi_transfer *t) 1111 struct spi_device *spi,
1112 struct spi_transfer *t)
1092{ 1113{
1093 1114
1094 /* We only enable one channel at a time -- the one whose message is 1115 /* We only enable one channel at a time -- the one whose message is
@@ -1098,7 +1119,7 @@ static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
1098 * chipselect with the FORCE bit ... CS != channel enable. 1119 * chipselect with the FORCE bit ... CS != channel enable.
1099 */ 1120 */
1100 1121
1101 struct spi_master *master; 1122 struct omap2_mcspi *mcspi;
1102 struct omap2_mcspi_dma *mcspi_dma; 1123 struct omap2_mcspi_dma *mcspi_dma;
1103 struct omap2_mcspi_cs *cs; 1124 struct omap2_mcspi_cs *cs;
1104 struct omap2_mcspi_device_config *cd; 1125 struct omap2_mcspi_device_config *cd;
@@ -1106,7 +1127,7 @@ static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
1106 int status = 0; 1127 int status = 0;
1107 u32 chconf; 1128 u32 chconf;
1108 1129
1109 master = spi->master; 1130 mcspi = spi_master_get_devdata(master);
1110 mcspi_dma = mcspi->dma_channels + spi->chip_select; 1131 mcspi_dma = mcspi->dma_channels + spi->chip_select;
1111 cs = spi->controller_state; 1132 cs = spi->controller_state;
1112 cd = spi->controller_data; 1133 cd = spi->controller_data;
@@ -1166,7 +1187,8 @@ static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
1166 unsigned count; 1187 unsigned count;
1167 1188
1168 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && 1189 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1169 (t->len >= DMA_MIN_BYTES)) 1190 master->cur_msg_mapped &&
1191 master->can_dma(master, spi, t))
1170 omap2_mcspi_set_fifo(spi, t, 1); 1192 omap2_mcspi_set_fifo(spi, t, 1);
1171 1193
1172 omap2_mcspi_set_enable(spi, 1); 1194 omap2_mcspi_set_enable(spi, 1);
@@ -1177,7 +1199,8 @@ static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
1177 + OMAP2_MCSPI_TX0); 1199 + OMAP2_MCSPI_TX0);
1178 1200
1179 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && 1201 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1180 (t->len >= DMA_MIN_BYTES)) 1202 master->cur_msg_mapped &&
1203 master->can_dma(master, spi, t))
1181 count = omap2_mcspi_txrx_dma(spi, t); 1204 count = omap2_mcspi_txrx_dma(spi, t);
1182 else 1205 else
1183 count = omap2_mcspi_txrx_pio(spi, t); 1206 count = omap2_mcspi_txrx_pio(spi, t);
@@ -1246,55 +1269,11 @@ static int omap2_mcspi_prepare_message(struct spi_master *master,
1246 return 0; 1269 return 0;
1247} 1270}
1248 1271
1249static int omap2_mcspi_transfer_one(struct spi_master *master, 1272static bool omap2_mcspi_can_dma(struct spi_master *master,
1250 struct spi_device *spi, struct spi_transfer *t) 1273 struct spi_device *spi,
1274 struct spi_transfer *xfer)
1251{ 1275{
1252 struct omap2_mcspi *mcspi; 1276 return (xfer->len >= DMA_MIN_BYTES);
1253 struct omap2_mcspi_dma *mcspi_dma;
1254 const void *tx_buf = t->tx_buf;
1255 void *rx_buf = t->rx_buf;
1256 unsigned len = t->len;
1257
1258 mcspi = spi_master_get_devdata(master);
1259 mcspi_dma = mcspi->dma_channels + spi->chip_select;
1260
1261 if ((len && !(rx_buf || tx_buf))) {
1262 dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
1263 t->speed_hz,
1264 len,
1265 tx_buf ? "tx" : "",
1266 rx_buf ? "rx" : "",
1267 t->bits_per_word);
1268 return -EINVAL;
1269 }
1270
1271 if (len < DMA_MIN_BYTES)
1272 goto skip_dma_map;
1273
1274 if (mcspi_dma->dma_tx && tx_buf != NULL) {
1275 t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
1276 len, DMA_TO_DEVICE);
1277 if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
1278 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1279 'T', len);
1280 return -EINVAL;
1281 }
1282 }
1283 if (mcspi_dma->dma_rx && rx_buf != NULL) {
1284 t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
1285 DMA_FROM_DEVICE);
1286 if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
1287 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1288 'R', len);
1289 if (tx_buf != NULL)
1290 dma_unmap_single(mcspi->dev, t->tx_dma,
1291 len, DMA_TO_DEVICE);
1292 return -EINVAL;
1293 }
1294 }
1295
1296skip_dma_map:
1297 return omap2_mcspi_work_one(mcspi, spi, t);
1298} 1277}
1299 1278
1300static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) 1279static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
@@ -1374,6 +1353,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
1374 master->setup = omap2_mcspi_setup; 1353 master->setup = omap2_mcspi_setup;
1375 master->auto_runtime_pm = true; 1354 master->auto_runtime_pm = true;
1376 master->prepare_message = omap2_mcspi_prepare_message; 1355 master->prepare_message = omap2_mcspi_prepare_message;
1356 master->can_dma = omap2_mcspi_can_dma;
1377 master->transfer_one = omap2_mcspi_transfer_one; 1357 master->transfer_one = omap2_mcspi_transfer_one;
1378 master->set_cs = omap2_mcspi_set_cs; 1358 master->set_cs = omap2_mcspi_set_cs;
1379 master->cleanup = omap2_mcspi_cleanup; 1359 master->cleanup = omap2_mcspi_cleanup;