aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
authorVignesh R <vigneshr@ti.com>2015-10-13 06:21:05 -0400
committerMark Brown <broonie@kernel.org>2015-10-16 14:08:06 -0400
commit57c2ecd9bf971946ea0c6ae90a79c90a22159c73 (patch)
treea27454ff77f3d5c5e8747d54a5f132ccb7191b47 /drivers/spi
parent6ff33f3902c3b1c5d0db6b1e2c70b6d76fba357f (diff)
spi: spi-ti-qspi: switch to polling mode for better r/w performance
Currently word completion interrupt is fired for transfer of every word(8bit to 128bit in size). This adds a lot of overhead, and decreases r/w throughput. It hardly takes 3us(@48MHz) for 128bit r/w to complete, hence its better to poll on word complete bit to be set in QSPI_SPI_STATUS_REG instead of using interrupts. This increases the throughput by 30% in both read and write case. So, switch to polling mode instead of interrupts to determine completion of word transfer. Signed-off-by: Vignesh R <vigneshr@ti.com> Signed-off-by: Mark Brown <broonie@kernel.org>
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/spi-ti-qspi.c74
1 files changed, 20 insertions, 54 deletions
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index aa6d284131e0..89cf0c821524 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -39,8 +39,6 @@ struct ti_qspi_regs {
39}; 39};
40 40
41struct ti_qspi { 41struct ti_qspi {
42 struct completion transfer_complete;
43
44 /* list synchronization */ 42 /* list synchronization */
45 struct mutex list_lock; 43 struct mutex list_lock;
46 44
@@ -62,10 +60,6 @@ struct ti_qspi {
62 60
63#define QSPI_PID (0x0) 61#define QSPI_PID (0x0)
64#define QSPI_SYSCONFIG (0x10) 62#define QSPI_SYSCONFIG (0x10)
65#define QSPI_INTR_STATUS_RAW_SET (0x20)
66#define QSPI_INTR_STATUS_ENABLED_CLEAR (0x24)
67#define QSPI_INTR_ENABLE_SET_REG (0x28)
68#define QSPI_INTR_ENABLE_CLEAR_REG (0x2c)
69#define QSPI_SPI_CLOCK_CNTRL_REG (0x40) 63#define QSPI_SPI_CLOCK_CNTRL_REG (0x40)
70#define QSPI_SPI_DC_REG (0x44) 64#define QSPI_SPI_DC_REG (0x44)
71#define QSPI_SPI_CMD_REG (0x48) 65#define QSPI_SPI_CMD_REG (0x48)
@@ -97,7 +91,6 @@ struct ti_qspi {
97#define QSPI_RD_DUAL (3 << 16) 91#define QSPI_RD_DUAL (3 << 16)
98#define QSPI_RD_QUAD (7 << 16) 92#define QSPI_RD_QUAD (7 << 16)
99#define QSPI_INVAL (4 << 16) 93#define QSPI_INVAL (4 << 16)
100#define QSPI_WC_CMD_INT_EN (1 << 14)
101#define QSPI_FLEN(n) ((n - 1) << 0) 94#define QSPI_FLEN(n) ((n - 1) << 0)
102#define QSPI_WLEN_MAX_BITS 128 95#define QSPI_WLEN_MAX_BITS 128
103#define QSPI_WLEN_MAX_BYTES 16 96#define QSPI_WLEN_MAX_BYTES 16
@@ -106,10 +99,6 @@ struct ti_qspi {
106#define BUSY 0x01 99#define BUSY 0x01
107#define WC 0x02 100#define WC 0x02
108 101
109/* INTERRUPT REGISTER */
110#define QSPI_WC_INT_EN (1 << 1)
111#define QSPI_WC_INT_DISABLE (1 << 1)
112
113/* Device Control */ 102/* Device Control */
114#define QSPI_DD(m, n) (m << (3 + n * 8)) 103#define QSPI_DD(m, n) (m << (3 + n * 8))
115#define QSPI_CKPHA(n) (1 << (2 + n * 8)) 104#define QSPI_CKPHA(n) (1 << (2 + n * 8))
@@ -217,6 +206,24 @@ static inline u32 qspi_is_busy(struct ti_qspi *qspi)
217 return stat & BUSY; 206 return stat & BUSY;
218} 207}
219 208
209static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
210{
211 u32 stat;
212 unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
213
214 do {
215 stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
216 if (stat & WC)
217 return 0;
218 cpu_relax();
219 } while (time_after(timeout, jiffies));
220
221 stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
222 if (stat & WC)
223 return 0;
224 return -ETIMEDOUT;
225}
226
220static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) 227static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
221{ 228{
222 int wlen, count, xfer_len; 229 int wlen, count, xfer_len;
@@ -275,8 +282,7 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
275 } 282 }
276 283
277 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); 284 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
278 if (!wait_for_completion_timeout(&qspi->transfer_complete, 285 if (ti_qspi_poll_wc(qspi)) {
279 QSPI_COMPLETION_TIMEOUT)) {
280 dev_err(qspi->dev, "write timed out\n"); 286 dev_err(qspi->dev, "write timed out\n");
281 return -ETIMEDOUT; 287 return -ETIMEDOUT;
282 } 288 }
@@ -315,8 +321,7 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
315 return -EBUSY; 321 return -EBUSY;
316 322
317 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); 323 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
318 if (!wait_for_completion_timeout(&qspi->transfer_complete, 324 if (ti_qspi_poll_wc(qspi)) {
319 QSPI_COMPLETION_TIMEOUT)) {
320 dev_err(qspi->dev, "read timed out\n"); 325 dev_err(qspi->dev, "read timed out\n");
321 return -ETIMEDOUT; 326 return -ETIMEDOUT;
322 } 327 }
@@ -388,9 +393,7 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
388 qspi->cmd = 0; 393 qspi->cmd = 0;
389 qspi->cmd |= QSPI_EN_CS(spi->chip_select); 394 qspi->cmd |= QSPI_EN_CS(spi->chip_select);
390 qspi->cmd |= QSPI_FLEN(frame_length); 395 qspi->cmd |= QSPI_FLEN(frame_length);
391 qspi->cmd |= QSPI_WC_CMD_INT_EN;
392 396
393 ti_qspi_write(qspi, QSPI_WC_INT_EN, QSPI_INTR_ENABLE_SET_REG);
394 ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG); 397 ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
395 398
396 mutex_lock(&qspi->list_lock); 399 mutex_lock(&qspi->list_lock);
@@ -418,31 +421,6 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
418 return status; 421 return status;
419} 422}
420 423
421static irqreturn_t ti_qspi_isr(int irq, void *dev_id)
422{
423 struct ti_qspi *qspi = dev_id;
424 u16 int_stat;
425 u32 stat;
426
427 irqreturn_t ret = IRQ_HANDLED;
428
429 int_stat = ti_qspi_read(qspi, QSPI_INTR_STATUS_ENABLED_CLEAR);
430 stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
431
432 if (!int_stat) {
433 dev_dbg(qspi->dev, "No IRQ triggered\n");
434 ret = IRQ_NONE;
435 goto out;
436 }
437
438 ti_qspi_write(qspi, QSPI_WC_INT_DISABLE,
439 QSPI_INTR_STATUS_ENABLED_CLEAR);
440 if (stat & WC)
441 complete(&qspi->transfer_complete);
442out:
443 return ret;
444}
445
446static int ti_qspi_runtime_resume(struct device *dev) 424static int ti_qspi_runtime_resume(struct device *dev)
447{ 425{
448 struct ti_qspi *qspi; 426 struct ti_qspi *qspi;
@@ -551,22 +529,12 @@ static int ti_qspi_probe(struct platform_device *pdev)
551 } 529 }
552 } 530 }
553 531
554 ret = devm_request_irq(&pdev->dev, irq, ti_qspi_isr, 0,
555 dev_name(&pdev->dev), qspi);
556 if (ret < 0) {
557 dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
558 irq);
559 goto free_master;
560 }
561
562 qspi->fclk = devm_clk_get(&pdev->dev, "fck"); 532 qspi->fclk = devm_clk_get(&pdev->dev, "fck");
563 if (IS_ERR(qspi->fclk)) { 533 if (IS_ERR(qspi->fclk)) {
564 ret = PTR_ERR(qspi->fclk); 534 ret = PTR_ERR(qspi->fclk);
565 dev_err(&pdev->dev, "could not get clk: %d\n", ret); 535 dev_err(&pdev->dev, "could not get clk: %d\n", ret);
566 } 536 }
567 537
568 init_completion(&qspi->transfer_complete);
569
570 pm_runtime_use_autosuspend(&pdev->dev); 538 pm_runtime_use_autosuspend(&pdev->dev);
571 pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT); 539 pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT);
572 pm_runtime_enable(&pdev->dev); 540 pm_runtime_enable(&pdev->dev);
@@ -596,8 +564,6 @@ static int ti_qspi_remove(struct platform_device *pdev)
596 return ret; 564 return ret;
597 } 565 }
598 566
599 ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG);
600
601 pm_runtime_put(qspi->dev); 567 pm_runtime_put(qspi->dev);
602 pm_runtime_disable(&pdev->dev); 568 pm_runtime_disable(&pdev->dev);
603 569