aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-sirf.c
diff options
context:
space:
mode:
authorQipan Li <Qipan.Li@csr.com>2014-04-14 02:30:00 -0400
committerMark Brown <broonie@linaro.org>2014-04-14 16:04:14 -0400
commitd77ec5df47696300b9498e6973dcc34b40de8d27 (patch)
tree370e5ed9f71f7cc6d688d372f8d5adc48e4710be /drivers/spi/spi-sirf.c
parent6ee8a2f7d5e78700b6e64799b5e9976b21cfad79 (diff)
spi: sirf: fix line over 80 characters style issue
fix a lot of "line over 80 characters" checkpatch issues, on which the users of the driver, key customers care about this very much. Signed-off-by: Qipan Li <Qipan.Li@csr.com> Signed-off-by: Barry Song <Baohua.Song@csr.com> Signed-off-by: Mark Brown <broonie@linaro.org>
Diffstat (limited to 'drivers/spi/spi-sirf.c')
-rw-r--r--drivers/spi/spi-sirf.c51
1 files changed, 32 insertions, 19 deletions
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 67d8909dcf39..3c12f396d96a 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -382,14 +382,16 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
382 if (IS_DMA_VALID(t)) { 382 if (IS_DMA_VALID(t)) {
383 struct dma_async_tx_descriptor *rx_desc, *tx_desc; 383 struct dma_async_tx_descriptor *rx_desc, *tx_desc;
384 384
385 sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len, DMA_FROM_DEVICE); 385 sspi->dst_start = dma_map_single(&spi->dev,
386 sspi->rx, t->len, DMA_FROM_DEVICE);
386 rx_desc = dmaengine_prep_slave_single(sspi->rx_chan, 387 rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
387 sspi->dst_start, t->len, DMA_DEV_TO_MEM, 388 sspi->dst_start, t->len, DMA_DEV_TO_MEM,
388 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 389 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
389 rx_desc->callback = spi_sirfsoc_dma_fini_callback; 390 rx_desc->callback = spi_sirfsoc_dma_fini_callback;
390 rx_desc->callback_param = &sspi->rx_done; 391 rx_desc->callback_param = &sspi->rx_done;
391 392
392 sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len, DMA_TO_DEVICE); 393 sspi->src_start = dma_map_single(&spi->dev,
394 (void *)sspi->tx, t->len, DMA_TO_DEVICE);
393 tx_desc = dmaengine_prep_slave_single(sspi->tx_chan, 395 tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
394 sspi->src_start, t->len, DMA_MEM_TO_DEV, 396 sspi->src_start, t->len, DMA_MEM_TO_DEV,
395 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 397 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -404,13 +406,18 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
404 /* Send the first word to trigger the whole tx/rx process */ 406 /* Send the first word to trigger the whole tx/rx process */
405 sspi->tx_word(sspi); 407 sspi->tx_word(sspi);
406 408
407 writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN | 409 writel(SIRFSOC_SPI_RX_OFLOW_INT_EN |
408 SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN | 410 SIRFSOC_SPI_TX_UFLOW_INT_EN |
409 SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN | 411 SIRFSOC_SPI_RXFIFO_THD_INT_EN |
410 SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN); 412 SIRFSOC_SPI_TXFIFO_THD_INT_EN |
413 SIRFSOC_SPI_FRM_END_INT_EN |
414 SIRFSOC_SPI_RXFIFO_FULL_INT_EN |
415 SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN,
416 sspi->base + SIRFSOC_SPI_INT_EN);
411 } 417 }
412 418
413 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN); 419 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
420 sspi->base + SIRFSOC_SPI_TX_RX_EN);
414 421
415 if (!IS_DMA_VALID(t)) { /* for PIO */ 422 if (!IS_DMA_VALID(t)) { /* for PIO */
416 if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) 423 if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0)
@@ -434,8 +441,10 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
434 } 441 }
435 442
436 if (IS_DMA_VALID(t)) { 443 if (IS_DMA_VALID(t)) {
437 dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE); 444 dma_unmap_single(&spi->dev,
438 dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE); 445 sspi->src_start, t->len, DMA_TO_DEVICE);
446 dma_unmap_single(&spi->dev,
447 sspi->dst_start, t->len, DMA_FROM_DEVICE);
439 } 448 }
440 449
441 /* TX, RX FIFO stop */ 450 /* TX, RX FIFO stop */
@@ -512,7 +521,8 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
512 break; 521 break;
513 case 12: 522 case 12:
514 case 16: 523 case 16:
515 regval |= (bits_per_word == 12) ? SIRFSOC_SPI_TRAN_DAT_FORMAT_12 : 524 regval |= (bits_per_word == 12) ?
525 SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
516 SIRFSOC_SPI_TRAN_DAT_FORMAT_16; 526 SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
517 sspi->rx_word = spi_sirfsoc_rx_word_u16; 527 sspi->rx_word = spi_sirfsoc_rx_word_u16;
518 sspi->tx_word = spi_sirfsoc_tx_word_u16; 528 sspi->tx_word = spi_sirfsoc_tx_word_u16;
@@ -540,8 +550,8 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
540 regval |= SIRFSOC_SPI_CLK_IDLE_STAT; 550 regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
541 551
542 /* 552 /*
543 * Data should be driven at least 1/2 cycle before the fetch edge to make 553 * Data should be driven at least 1/2 cycle before the fetch edge
544 * sure that data gets stable at the fetch edge. 554 * to make sure that data gets stable at the fetch edge.
545 */ 555 */
546 if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) || 556 if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
547 (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA))) 557 (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA)))
@@ -578,11 +588,14 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
578 if (IS_DMA_VALID(t)) { 588 if (IS_DMA_VALID(t)) {
579 /* Enable DMA mode for RX, TX */ 589 /* Enable DMA mode for RX, TX */
580 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); 590 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
581 writel(SIRFSOC_SPI_RX_DMA_FLUSH, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); 591 writel(SIRFSOC_SPI_RX_DMA_FLUSH,
592 sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
582 } else { 593 } else {
583 /* Enable IO mode for RX, TX */ 594 /* Enable IO mode for RX, TX */
584 writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); 595 writel(SIRFSOC_SPI_IO_MODE_SEL,
585 writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); 596 sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
597 writel(SIRFSOC_SPI_IO_MODE_SEL,
598 sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
586 } 599 }
587 600
588 return 0; 601 return 0;
@@ -612,7 +625,8 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
612 goto err_cs; 625 goto err_cs;
613 } 626 }
614 627
615 master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs); 628 master = spi_alloc_master(&pdev->dev,
629 sizeof(*sspi) + sizeof(int) * num_cs);
616 if (!master) { 630 if (!master) {
617 dev_err(&pdev->dev, "Unable to allocate SPI master\n"); 631 dev_err(&pdev->dev, "Unable to allocate SPI master\n");
618 return -ENOMEM; 632 return -ENOMEM;
@@ -808,8 +822,7 @@ static struct platform_driver spi_sirfsoc_driver = {
808 .remove = spi_sirfsoc_remove, 822 .remove = spi_sirfsoc_remove,
809}; 823};
810module_platform_driver(spi_sirfsoc_driver); 824module_platform_driver(spi_sirfsoc_driver);
811
812MODULE_DESCRIPTION("SiRF SoC SPI master driver"); 825MODULE_DESCRIPTION("SiRF SoC SPI master driver");
813MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>, " 826MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
814 "Barry Song <Baohua.Song@csr.com>"); 827MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
815MODULE_LICENSE("GPL v2"); 828MODULE_LICENSE("GPL v2");