summaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-bcm2835.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi/spi-bcm2835.c')
-rw-r--r--drivers/spi/spi-bcm2835.c328
1 files changed, 204 insertions, 124 deletions
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 402c1efcd762..6f243a90c844 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/completion.h> 15#include <linux/completion.h>
16#include <linux/debugfs.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
17#include <linux/dma-mapping.h> 18#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h> 19#include <linux/dmaengine.h>
@@ -64,14 +65,18 @@
64 65
65#define BCM2835_SPI_FIFO_SIZE 64 66#define BCM2835_SPI_FIFO_SIZE 64
66#define BCM2835_SPI_FIFO_SIZE_3_4 48 67#define BCM2835_SPI_FIFO_SIZE_3_4 48
67#define BCM2835_SPI_POLLING_LIMIT_US 30
68#define BCM2835_SPI_POLLING_JIFFIES 2
69#define BCM2835_SPI_DMA_MIN_LENGTH 96 68#define BCM2835_SPI_DMA_MIN_LENGTH 96
70#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ 69#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
71 | SPI_NO_CS | SPI_3WIRE) 70 | SPI_NO_CS | SPI_3WIRE)
72 71
73#define DRV_NAME "spi-bcm2835" 72#define DRV_NAME "spi-bcm2835"
74 73
74/* define polling limits */
75unsigned int polling_limit_us = 30;
76module_param(polling_limit_us, uint, 0664);
77MODULE_PARM_DESC(polling_limit_us,
78 "time in us to run a transfer in polling mode\n");
79
75/** 80/**
76 * struct bcm2835_spi - BCM2835 SPI controller 81 * struct bcm2835_spi - BCM2835 SPI controller
77 * @regs: base address of register map 82 * @regs: base address of register map
@@ -88,6 +93,15 @@
88 * length is not a multiple of 4 (to overcome hardware limitation) 93 * length is not a multiple of 4 (to overcome hardware limitation)
89 * @tx_spillover: whether @tx_prologue spills over to second TX sglist entry 94 * @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
90 * @dma_pending: whether a DMA transfer is in progress 95 * @dma_pending: whether a DMA transfer is in progress
96 * @debugfs_dir: the debugfs directory - neede to remove debugfs when
97 * unloading the module
98 * @count_transfer_polling: count of how often polling mode is used
99 * @count_transfer_irq: count of how often interrupt mode is used
100 * @count_transfer_irq_after_polling: count of how often we fall back to
101 * interrupt mode after starting in polling mode.
102 * These are counted as well in @count_transfer_polling and
103 * @count_transfer_irq
104 * @count_transfer_dma: count how often dma mode is used
91 */ 105 */
92struct bcm2835_spi { 106struct bcm2835_spi {
93 void __iomem *regs; 107 void __iomem *regs;
@@ -102,8 +116,55 @@ struct bcm2835_spi {
102 int rx_prologue; 116 int rx_prologue;
103 unsigned int tx_spillover; 117 unsigned int tx_spillover;
104 unsigned int dma_pending; 118 unsigned int dma_pending;
119
120 struct dentry *debugfs_dir;
121 u64 count_transfer_polling;
122 u64 count_transfer_irq;
123 u64 count_transfer_irq_after_polling;
124 u64 count_transfer_dma;
105}; 125};
106 126
127#if defined(CONFIG_DEBUG_FS)
128static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
129 const char *dname)
130{
131 char name[64];
132 struct dentry *dir;
133
134 /* get full name */
135 snprintf(name, sizeof(name), "spi-bcm2835-%s", dname);
136
137 /* the base directory */
138 dir = debugfs_create_dir(name, NULL);
139 bs->debugfs_dir = dir;
140
141 /* the counters */
142 debugfs_create_u64("count_transfer_polling", 0444, dir,
143 &bs->count_transfer_polling);
144 debugfs_create_u64("count_transfer_irq", 0444, dir,
145 &bs->count_transfer_irq);
146 debugfs_create_u64("count_transfer_irq_after_polling", 0444, dir,
147 &bs->count_transfer_irq_after_polling);
148 debugfs_create_u64("count_transfer_dma", 0444, dir,
149 &bs->count_transfer_dma);
150}
151
152static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
153{
154 debugfs_remove_recursive(bs->debugfs_dir);
155 bs->debugfs_dir = NULL;
156}
157#else
158static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
159 const char *dname)
160{
161}
162
163static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
164{
165}
166#endif /* CONFIG_DEBUG_FS */
167
107static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg) 168static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
108{ 169{
109 return readl(bs->regs + reg); 170 return readl(bs->regs + reg);
@@ -248,9 +309,9 @@ static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count)
248 } 309 }
249} 310}
250 311
251static void bcm2835_spi_reset_hw(struct spi_master *master) 312static void bcm2835_spi_reset_hw(struct spi_controller *ctlr)
252{ 313{
253 struct bcm2835_spi *bs = spi_master_get_devdata(master); 314 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
254 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); 315 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
255 316
256 /* Disable SPI interrupts and transfer */ 317 /* Disable SPI interrupts and transfer */
@@ -269,8 +330,8 @@ static void bcm2835_spi_reset_hw(struct spi_master *master)
269 330
270static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) 331static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
271{ 332{
272 struct spi_master *master = dev_id; 333 struct spi_controller *ctlr = dev_id;
273 struct bcm2835_spi *bs = spi_master_get_devdata(master); 334 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
274 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); 335 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
275 336
276 /* 337 /*
@@ -292,20 +353,23 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
292 353
293 if (!bs->rx_len) { 354 if (!bs->rx_len) {
294 /* Transfer complete - reset SPI HW */ 355 /* Transfer complete - reset SPI HW */
295 bcm2835_spi_reset_hw(master); 356 bcm2835_spi_reset_hw(ctlr);
296 /* wake up the framework */ 357 /* wake up the framework */
297 complete(&master->xfer_completion); 358 complete(&ctlr->xfer_completion);
298 } 359 }
299 360
300 return IRQ_HANDLED; 361 return IRQ_HANDLED;
301} 362}
302 363
303static int bcm2835_spi_transfer_one_irq(struct spi_master *master, 364static int bcm2835_spi_transfer_one_irq(struct spi_controller *ctlr,
304 struct spi_device *spi, 365 struct spi_device *spi,
305 struct spi_transfer *tfr, 366 struct spi_transfer *tfr,
306 u32 cs, bool fifo_empty) 367 u32 cs, bool fifo_empty)
307{ 368{
308 struct bcm2835_spi *bs = spi_master_get_devdata(master); 369 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
370
371 /* update usage statistics */
372 bs->count_transfer_irq++;
309 373
310 /* 374 /*
311 * Enable HW block, but with interrupts still disabled. 375 * Enable HW block, but with interrupts still disabled.
@@ -328,7 +392,7 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
328 392
329/** 393/**
330 * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA 394 * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
331 * @master: SPI master 395 * @ctlr: SPI master controller
332 * @tfr: SPI transfer 396 * @tfr: SPI transfer
333 * @bs: BCM2835 SPI controller 397 * @bs: BCM2835 SPI controller
334 * @cs: CS register 398 * @cs: CS register
@@ -372,7 +436,7 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
372 * be transmitted in 32-bit width to ensure that the following DMA transfer can 436 * be transmitted in 32-bit width to ensure that the following DMA transfer can
373 * pick up the residue in the RX FIFO in ungarbled form. 437 * pick up the residue in the RX FIFO in ungarbled form.
374 */ 438 */
375static void bcm2835_spi_transfer_prologue(struct spi_master *master, 439static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
376 struct spi_transfer *tfr, 440 struct spi_transfer *tfr,
377 struct bcm2835_spi *bs, 441 struct bcm2835_spi *bs,
378 u32 cs) 442 u32 cs)
@@ -413,9 +477,9 @@ static void bcm2835_spi_transfer_prologue(struct spi_master *master,
413 bcm2835_wr_fifo_count(bs, bs->rx_prologue); 477 bcm2835_wr_fifo_count(bs, bs->rx_prologue);
414 bcm2835_wait_tx_fifo_empty(bs); 478 bcm2835_wait_tx_fifo_empty(bs);
415 bcm2835_rd_fifo_count(bs, bs->rx_prologue); 479 bcm2835_rd_fifo_count(bs, bs->rx_prologue);
416 bcm2835_spi_reset_hw(master); 480 bcm2835_spi_reset_hw(ctlr);
417 481
418 dma_sync_single_for_device(master->dma_rx->device->dev, 482 dma_sync_single_for_device(ctlr->dma_rx->device->dev,
419 sg_dma_address(&tfr->rx_sg.sgl[0]), 483 sg_dma_address(&tfr->rx_sg.sgl[0]),
420 bs->rx_prologue, DMA_FROM_DEVICE); 484 bs->rx_prologue, DMA_FROM_DEVICE);
421 485
@@ -479,11 +543,11 @@ static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
479 543
480static void bcm2835_spi_dma_done(void *data) 544static void bcm2835_spi_dma_done(void *data)
481{ 545{
482 struct spi_master *master = data; 546 struct spi_controller *ctlr = data;
483 struct bcm2835_spi *bs = spi_master_get_devdata(master); 547 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
484 548
485 /* reset fifo and HW */ 549 /* reset fifo and HW */
486 bcm2835_spi_reset_hw(master); 550 bcm2835_spi_reset_hw(ctlr);
487 551
488 /* and terminate tx-dma as we do not have an irq for it 552 /* and terminate tx-dma as we do not have an irq for it
489 * because when the rx dma will terminate and this callback 553 * because when the rx dma will terminate and this callback
@@ -491,15 +555,15 @@ static void bcm2835_spi_dma_done(void *data)
491 * situation otherwise... 555 * situation otherwise...
492 */ 556 */
493 if (cmpxchg(&bs->dma_pending, true, false)) { 557 if (cmpxchg(&bs->dma_pending, true, false)) {
494 dmaengine_terminate_async(master->dma_tx); 558 dmaengine_terminate_async(ctlr->dma_tx);
495 bcm2835_spi_undo_prologue(bs); 559 bcm2835_spi_undo_prologue(bs);
496 } 560 }
497 561
498 /* and mark as completed */; 562 /* and mark as completed */;
499 complete(&master->xfer_completion); 563 complete(&ctlr->xfer_completion);
500} 564}
501 565
502static int bcm2835_spi_prepare_sg(struct spi_master *master, 566static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
503 struct spi_transfer *tfr, 567 struct spi_transfer *tfr,
504 bool is_tx) 568 bool is_tx)
505{ 569{
@@ -514,14 +578,14 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master,
514 578
515 if (is_tx) { 579 if (is_tx) {
516 dir = DMA_MEM_TO_DEV; 580 dir = DMA_MEM_TO_DEV;
517 chan = master->dma_tx; 581 chan = ctlr->dma_tx;
518 nents = tfr->tx_sg.nents; 582 nents = tfr->tx_sg.nents;
519 sgl = tfr->tx_sg.sgl; 583 sgl = tfr->tx_sg.sgl;
520 flags = 0 /* no tx interrupt */; 584 flags = 0 /* no tx interrupt */;
521 585
522 } else { 586 } else {
523 dir = DMA_DEV_TO_MEM; 587 dir = DMA_DEV_TO_MEM;
524 chan = master->dma_rx; 588 chan = ctlr->dma_rx;
525 nents = tfr->rx_sg.nents; 589 nents = tfr->rx_sg.nents;
526 sgl = tfr->rx_sg.sgl; 590 sgl = tfr->rx_sg.sgl;
527 flags = DMA_PREP_INTERRUPT; 591 flags = DMA_PREP_INTERRUPT;
@@ -534,7 +598,7 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master,
534 /* set callback for rx */ 598 /* set callback for rx */
535 if (!is_tx) { 599 if (!is_tx) {
536 desc->callback = bcm2835_spi_dma_done; 600 desc->callback = bcm2835_spi_dma_done;
537 desc->callback_param = master; 601 desc->callback_param = ctlr;
538 } 602 }
539 603
540 /* submit it to DMA-engine */ 604 /* submit it to DMA-engine */
@@ -543,27 +607,30 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master,
543 return dma_submit_error(cookie); 607 return dma_submit_error(cookie);
544} 608}
545 609
546static int bcm2835_spi_transfer_one_dma(struct spi_master *master, 610static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
547 struct spi_device *spi, 611 struct spi_device *spi,
548 struct spi_transfer *tfr, 612 struct spi_transfer *tfr,
549 u32 cs) 613 u32 cs)
550{ 614{
551 struct bcm2835_spi *bs = spi_master_get_devdata(master); 615 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
552 int ret; 616 int ret;
553 617
618 /* update usage statistics */
619 bs->count_transfer_dma++;
620
554 /* 621 /*
555 * Transfer first few bytes without DMA if length of first TX or RX 622 * Transfer first few bytes without DMA if length of first TX or RX
556 * sglist entry is not a multiple of 4 bytes (hardware limitation). 623 * sglist entry is not a multiple of 4 bytes (hardware limitation).
557 */ 624 */
558 bcm2835_spi_transfer_prologue(master, tfr, bs, cs); 625 bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs);
559 626
560 /* setup tx-DMA */ 627 /* setup tx-DMA */
561 ret = bcm2835_spi_prepare_sg(master, tfr, true); 628 ret = bcm2835_spi_prepare_sg(ctlr, tfr, true);
562 if (ret) 629 if (ret)
563 goto err_reset_hw; 630 goto err_reset_hw;
564 631
565 /* start TX early */ 632 /* start TX early */
566 dma_async_issue_pending(master->dma_tx); 633 dma_async_issue_pending(ctlr->dma_tx);
567 634
568 /* mark as dma pending */ 635 /* mark as dma pending */
569 bs->dma_pending = 1; 636 bs->dma_pending = 1;
@@ -579,27 +646,27 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
579 * mapping of the rx buffers still takes place 646 * mapping of the rx buffers still takes place
580 * this saves 10us or more. 647 * this saves 10us or more.
581 */ 648 */
582 ret = bcm2835_spi_prepare_sg(master, tfr, false); 649 ret = bcm2835_spi_prepare_sg(ctlr, tfr, false);
583 if (ret) { 650 if (ret) {
584 /* need to reset on errors */ 651 /* need to reset on errors */
585 dmaengine_terminate_sync(master->dma_tx); 652 dmaengine_terminate_sync(ctlr->dma_tx);
586 bs->dma_pending = false; 653 bs->dma_pending = false;
587 goto err_reset_hw; 654 goto err_reset_hw;
588 } 655 }
589 656
590 /* start rx dma late */ 657 /* start rx dma late */
591 dma_async_issue_pending(master->dma_rx); 658 dma_async_issue_pending(ctlr->dma_rx);
592 659
593 /* wait for wakeup in framework */ 660 /* wait for wakeup in framework */
594 return 1; 661 return 1;
595 662
596err_reset_hw: 663err_reset_hw:
597 bcm2835_spi_reset_hw(master); 664 bcm2835_spi_reset_hw(ctlr);
598 bcm2835_spi_undo_prologue(bs); 665 bcm2835_spi_undo_prologue(bs);
599 return ret; 666 return ret;
600} 667}
601 668
602static bool bcm2835_spi_can_dma(struct spi_master *master, 669static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
603 struct spi_device *spi, 670 struct spi_device *spi,
604 struct spi_transfer *tfr) 671 struct spi_transfer *tfr)
605{ 672{
@@ -611,21 +678,21 @@ static bool bcm2835_spi_can_dma(struct spi_master *master,
611 return true; 678 return true;
612} 679}
613 680
614static void bcm2835_dma_release(struct spi_master *master) 681static void bcm2835_dma_release(struct spi_controller *ctlr)
615{ 682{
616 if (master->dma_tx) { 683 if (ctlr->dma_tx) {
617 dmaengine_terminate_sync(master->dma_tx); 684 dmaengine_terminate_sync(ctlr->dma_tx);
618 dma_release_channel(master->dma_tx); 685 dma_release_channel(ctlr->dma_tx);
619 master->dma_tx = NULL; 686 ctlr->dma_tx = NULL;
620 } 687 }
621 if (master->dma_rx) { 688 if (ctlr->dma_rx) {
622 dmaengine_terminate_sync(master->dma_rx); 689 dmaengine_terminate_sync(ctlr->dma_rx);
623 dma_release_channel(master->dma_rx); 690 dma_release_channel(ctlr->dma_rx);
624 master->dma_rx = NULL; 691 ctlr->dma_rx = NULL;
625 } 692 }
626} 693}
627 694
628static void bcm2835_dma_init(struct spi_master *master, struct device *dev) 695static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev)
629{ 696{
630 struct dma_slave_config slave_config; 697 struct dma_slave_config slave_config;
631 const __be32 *addr; 698 const __be32 *addr;
@@ -633,7 +700,7 @@ static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
633 int ret; 700 int ret;
634 701
635 /* base address in dma-space */ 702 /* base address in dma-space */
636 addr = of_get_address(master->dev.of_node, 0, NULL, NULL); 703 addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
637 if (!addr) { 704 if (!addr) {
638 dev_err(dev, "could not get DMA-register address - not using dma mode\n"); 705 dev_err(dev, "could not get DMA-register address - not using dma mode\n");
639 goto err; 706 goto err;
@@ -641,38 +708,36 @@ static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
641 dma_reg_base = be32_to_cpup(addr); 708 dma_reg_base = be32_to_cpup(addr);
642 709
643 /* get tx/rx dma */ 710 /* get tx/rx dma */
644 master->dma_tx = dma_request_slave_channel(dev, "tx"); 711 ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
645 if (!master->dma_tx) { 712 if (!ctlr->dma_tx) {
646 dev_err(dev, "no tx-dma configuration found - not using dma mode\n"); 713 dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
647 goto err; 714 goto err;
648 } 715 }
649 master->dma_rx = dma_request_slave_channel(dev, "rx"); 716 ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
650 if (!master->dma_rx) { 717 if (!ctlr->dma_rx) {
651 dev_err(dev, "no rx-dma configuration found - not using dma mode\n"); 718 dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
652 goto err_release; 719 goto err_release;
653 } 720 }
654 721
655 /* configure DMAs */ 722 /* configure DMAs */
656 slave_config.direction = DMA_MEM_TO_DEV;
657 slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO); 723 slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
658 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 724 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
659 725
660 ret = dmaengine_slave_config(master->dma_tx, &slave_config); 726 ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config);
661 if (ret) 727 if (ret)
662 goto err_config; 728 goto err_config;
663 729
664 slave_config.direction = DMA_DEV_TO_MEM;
665 slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO); 730 slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
666 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 731 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
667 732
668 ret = dmaengine_slave_config(master->dma_rx, &slave_config); 733 ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config);
669 if (ret) 734 if (ret)
670 goto err_config; 735 goto err_config;
671 736
672 /* all went well, so set can_dma */ 737 /* all went well, so set can_dma */
673 master->can_dma = bcm2835_spi_can_dma; 738 ctlr->can_dma = bcm2835_spi_can_dma;
674 /* need to do TX AND RX DMA, so we need dummy buffers */ 739 /* need to do TX AND RX DMA, so we need dummy buffers */
675 master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; 740 ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
676 741
677 return; 742 return;
678 743
@@ -680,20 +745,22 @@ err_config:
680 dev_err(dev, "issue configuring dma: %d - not using DMA mode\n", 745 dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
681 ret); 746 ret);
682err_release: 747err_release:
683 bcm2835_dma_release(master); 748 bcm2835_dma_release(ctlr);
684err: 749err:
685 return; 750 return;
686} 751}
687 752
688static int bcm2835_spi_transfer_one_poll(struct spi_master *master, 753static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
689 struct spi_device *spi, 754 struct spi_device *spi,
690 struct spi_transfer *tfr, 755 struct spi_transfer *tfr,
691 u32 cs, 756 u32 cs)
692 unsigned long long xfer_time_us)
693{ 757{
694 struct bcm2835_spi *bs = spi_master_get_devdata(master); 758 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
695 unsigned long timeout; 759 unsigned long timeout;
696 760
761 /* update usage statistics */
762 bs->count_transfer_polling++;
763
697 /* enable HW block without interrupts */ 764 /* enable HW block without interrupts */
698 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); 765 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
699 766
@@ -703,8 +770,8 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
703 */ 770 */
704 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE); 771 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
705 772
706 /* set the timeout */ 773 /* set the timeout to at least 2 jiffies */
707 timeout = jiffies + BCM2835_SPI_POLLING_JIFFIES; 774 timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
708 775
709 /* loop until finished the transfer */ 776 /* loop until finished the transfer */
710 while (bs->rx_len) { 777 while (bs->rx_len) {
@@ -723,25 +790,28 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
723 jiffies - timeout, 790 jiffies - timeout,
724 bs->tx_len, bs->rx_len); 791 bs->tx_len, bs->rx_len);
725 /* fall back to interrupt mode */ 792 /* fall back to interrupt mode */
726 return bcm2835_spi_transfer_one_irq(master, spi, 793
794 /* update usage statistics */
795 bs->count_transfer_irq_after_polling++;
796
797 return bcm2835_spi_transfer_one_irq(ctlr, spi,
727 tfr, cs, false); 798 tfr, cs, false);
728 } 799 }
729 } 800 }
730 801
731 /* Transfer complete - reset SPI HW */ 802 /* Transfer complete - reset SPI HW */
732 bcm2835_spi_reset_hw(master); 803 bcm2835_spi_reset_hw(ctlr);
733 /* and return without waiting for completion */ 804 /* and return without waiting for completion */
734 return 0; 805 return 0;
735} 806}
736 807
737static int bcm2835_spi_transfer_one(struct spi_master *master, 808static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
738 struct spi_device *spi, 809 struct spi_device *spi,
739 struct spi_transfer *tfr) 810 struct spi_transfer *tfr)
740{ 811{
741 struct bcm2835_spi *bs = spi_master_get_devdata(master); 812 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
742 unsigned long spi_hz, clk_hz, cdiv; 813 unsigned long spi_hz, clk_hz, cdiv, spi_used_hz;
743 unsigned long spi_used_hz; 814 unsigned long hz_per_byte, byte_limit;
744 unsigned long long xfer_time_us;
745 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); 815 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
746 816
747 /* set clock */ 817 /* set clock */
@@ -782,42 +852,49 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
782 bs->tx_len = tfr->len; 852 bs->tx_len = tfr->len;
783 bs->rx_len = tfr->len; 853 bs->rx_len = tfr->len;
784 854
785 /* calculate the estimated time in us the transfer runs */ 855 /* Calculate the estimated time in us the transfer runs. Note that
786 xfer_time_us = (unsigned long long)tfr->len 856 * there is 1 idle clocks cycles after each byte getting transferred
787 * 9 /* clocks/byte - SPI-HW waits 1 clock after each byte */ 857 * so we have 9 cycles/byte. This is used to find the number of Hz
788 * 1000000; 858 * per byte per polling limit. E.g., we can transfer 1 byte in 30 us
789 do_div(xfer_time_us, spi_used_hz); 859 * per 300,000 Hz of bus clock.
860 */
861 hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
862 byte_limit = hz_per_byte ? spi_used_hz / hz_per_byte : 1;
790 863
791 /* for short requests run polling*/ 864 /* run in polling mode for short transfers */
792 if (xfer_time_us <= BCM2835_SPI_POLLING_LIMIT_US) 865 if (tfr->len < byte_limit)
793 return bcm2835_spi_transfer_one_poll(master, spi, tfr, 866 return bcm2835_spi_transfer_one_poll(ctlr, spi, tfr, cs);
794 cs, xfer_time_us);
795 867
796 /* run in dma mode if conditions are right */ 868 /* run in dma mode if conditions are right
797 if (master->can_dma && bcm2835_spi_can_dma(master, spi, tfr)) 869 * Note that unlike poll or interrupt mode DMA mode does not have
798 return bcm2835_spi_transfer_one_dma(master, spi, tfr, cs); 870 * this 1 idle clock cycle pattern but runs the spi clock without gaps
871 */
872 if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr))
873 return bcm2835_spi_transfer_one_dma(ctlr, spi, tfr, cs);
799 874
800 /* run in interrupt-mode */ 875 /* run in interrupt-mode */
801 return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs, true); 876 return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true);
802} 877}
803 878
804static int bcm2835_spi_prepare_message(struct spi_master *master, 879static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
805 struct spi_message *msg) 880 struct spi_message *msg)
806{ 881{
807 struct spi_device *spi = msg->spi; 882 struct spi_device *spi = msg->spi;
808 struct bcm2835_spi *bs = spi_master_get_devdata(master); 883 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
809 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); 884 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
810 int ret; 885 int ret;
811 886
812 /* 887 if (ctlr->can_dma) {
813 * DMA transfers are limited to 16 bit (0 to 65535 bytes) by the SPI HW 888 /*
814 * due to DLEN. Split up transfers (32-bit FIFO aligned) if the limit is 889 * DMA transfers are limited to 16 bit (0 to 65535 bytes) by
815 * exceeded. 890 * the SPI HW due to DLEN. Split up transfers (32-bit FIFO
816 */ 891 * aligned) if the limit is exceeded.
817 ret = spi_split_transfers_maxsize(master, msg, 65532, 892 */
818 GFP_KERNEL | GFP_DMA); 893 ret = spi_split_transfers_maxsize(ctlr, msg, 65532,
819 if (ret) 894 GFP_KERNEL | GFP_DMA);
820 return ret; 895 if (ret)
896 return ret;
897 }
821 898
822 cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA); 899 cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
823 900
@@ -831,19 +908,19 @@ static int bcm2835_spi_prepare_message(struct spi_master *master,
831 return 0; 908 return 0;
832} 909}
833 910
834static void bcm2835_spi_handle_err(struct spi_master *master, 911static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
835 struct spi_message *msg) 912 struct spi_message *msg)
836{ 913{
837 struct bcm2835_spi *bs = spi_master_get_devdata(master); 914 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
838 915
839 /* if an error occurred and we have an active dma, then terminate */ 916 /* if an error occurred and we have an active dma, then terminate */
840 if (cmpxchg(&bs->dma_pending, true, false)) { 917 if (cmpxchg(&bs->dma_pending, true, false)) {
841 dmaengine_terminate_sync(master->dma_tx); 918 dmaengine_terminate_sync(ctlr->dma_tx);
842 dmaengine_terminate_sync(master->dma_rx); 919 dmaengine_terminate_sync(ctlr->dma_rx);
843 bcm2835_spi_undo_prologue(bs); 920 bcm2835_spi_undo_prologue(bs);
844 } 921 }
845 /* and reset */ 922 /* and reset */
846 bcm2835_spi_reset_hw(master); 923 bcm2835_spi_reset_hw(ctlr);
847} 924}
848 925
849static int chip_match_name(struct gpio_chip *chip, void *data) 926static int chip_match_name(struct gpio_chip *chip, void *data)
@@ -900,85 +977,88 @@ static int bcm2835_spi_setup(struct spi_device *spi)
900 977
901static int bcm2835_spi_probe(struct platform_device *pdev) 978static int bcm2835_spi_probe(struct platform_device *pdev)
902{ 979{
903 struct spi_master *master; 980 struct spi_controller *ctlr;
904 struct bcm2835_spi *bs; 981 struct bcm2835_spi *bs;
905 struct resource *res; 982 struct resource *res;
906 int err; 983 int err;
907 984
908 master = spi_alloc_master(&pdev->dev, sizeof(*bs)); 985 ctlr = spi_alloc_master(&pdev->dev, sizeof(*bs));
909 if (!master) { 986 if (!ctlr)
910 dev_err(&pdev->dev, "spi_alloc_master() failed\n");
911 return -ENOMEM; 987 return -ENOMEM;
912 }
913 988
914 platform_set_drvdata(pdev, master); 989 platform_set_drvdata(pdev, ctlr);
915 990
916 master->mode_bits = BCM2835_SPI_MODE_BITS; 991 ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
917 master->bits_per_word_mask = SPI_BPW_MASK(8); 992 ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
918 master->num_chipselect = 3; 993 ctlr->num_chipselect = 3;
919 master->setup = bcm2835_spi_setup; 994 ctlr->setup = bcm2835_spi_setup;
920 master->transfer_one = bcm2835_spi_transfer_one; 995 ctlr->transfer_one = bcm2835_spi_transfer_one;
921 master->handle_err = bcm2835_spi_handle_err; 996 ctlr->handle_err = bcm2835_spi_handle_err;
922 master->prepare_message = bcm2835_spi_prepare_message; 997 ctlr->prepare_message = bcm2835_spi_prepare_message;
923 master->dev.of_node = pdev->dev.of_node; 998 ctlr->dev.of_node = pdev->dev.of_node;
924 999
925 bs = spi_master_get_devdata(master); 1000 bs = spi_controller_get_devdata(ctlr);
926 1001
927 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1002 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
928 bs->regs = devm_ioremap_resource(&pdev->dev, res); 1003 bs->regs = devm_ioremap_resource(&pdev->dev, res);
929 if (IS_ERR(bs->regs)) { 1004 if (IS_ERR(bs->regs)) {
930 err = PTR_ERR(bs->regs); 1005 err = PTR_ERR(bs->regs);
931 goto out_master_put; 1006 goto out_controller_put;
932 } 1007 }
933 1008
934 bs->clk = devm_clk_get(&pdev->dev, NULL); 1009 bs->clk = devm_clk_get(&pdev->dev, NULL);
935 if (IS_ERR(bs->clk)) { 1010 if (IS_ERR(bs->clk)) {
936 err = PTR_ERR(bs->clk); 1011 err = PTR_ERR(bs->clk);
937 dev_err(&pdev->dev, "could not get clk: %d\n", err); 1012 dev_err(&pdev->dev, "could not get clk: %d\n", err);
938 goto out_master_put; 1013 goto out_controller_put;
939 } 1014 }
940 1015
941 bs->irq = platform_get_irq(pdev, 0); 1016 bs->irq = platform_get_irq(pdev, 0);
942 if (bs->irq <= 0) { 1017 if (bs->irq <= 0) {
943 dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq); 1018 dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
944 err = bs->irq ? bs->irq : -ENODEV; 1019 err = bs->irq ? bs->irq : -ENODEV;
945 goto out_master_put; 1020 goto out_controller_put;
946 } 1021 }
947 1022
948 clk_prepare_enable(bs->clk); 1023 clk_prepare_enable(bs->clk);
949 1024
950 bcm2835_dma_init(master, &pdev->dev); 1025 bcm2835_dma_init(ctlr, &pdev->dev);
951 1026
952 /* initialise the hardware with the default polarities */ 1027 /* initialise the hardware with the default polarities */
953 bcm2835_wr(bs, BCM2835_SPI_CS, 1028 bcm2835_wr(bs, BCM2835_SPI_CS,
954 BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); 1029 BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
955 1030
956 err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0, 1031 err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
957 dev_name(&pdev->dev), master); 1032 dev_name(&pdev->dev), ctlr);
958 if (err) { 1033 if (err) {
959 dev_err(&pdev->dev, "could not request IRQ: %d\n", err); 1034 dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
960 goto out_clk_disable; 1035 goto out_clk_disable;
961 } 1036 }
962 1037
963 err = devm_spi_register_master(&pdev->dev, master); 1038 err = devm_spi_register_controller(&pdev->dev, ctlr);
964 if (err) { 1039 if (err) {
965 dev_err(&pdev->dev, "could not register SPI master: %d\n", err); 1040 dev_err(&pdev->dev, "could not register SPI controller: %d\n",
1041 err);
966 goto out_clk_disable; 1042 goto out_clk_disable;
967 } 1043 }
968 1044
1045 bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
1046
969 return 0; 1047 return 0;
970 1048
971out_clk_disable: 1049out_clk_disable:
972 clk_disable_unprepare(bs->clk); 1050 clk_disable_unprepare(bs->clk);
973out_master_put: 1051out_controller_put:
974 spi_master_put(master); 1052 spi_controller_put(ctlr);
975 return err; 1053 return err;
976} 1054}
977 1055
978static int bcm2835_spi_remove(struct platform_device *pdev) 1056static int bcm2835_spi_remove(struct platform_device *pdev)
979{ 1057{
980 struct spi_master *master = platform_get_drvdata(pdev); 1058 struct spi_controller *ctlr = platform_get_drvdata(pdev);
981 struct bcm2835_spi *bs = spi_master_get_devdata(master); 1059 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
1060
1061 bcm2835_debugfs_remove(bs);
982 1062
983 /* Clear FIFOs, and disable the HW block */ 1063 /* Clear FIFOs, and disable the HW block */
984 bcm2835_wr(bs, BCM2835_SPI_CS, 1064 bcm2835_wr(bs, BCM2835_SPI_CS,
@@ -986,7 +1066,7 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
986 1066
987 clk_disable_unprepare(bs->clk); 1067 clk_disable_unprepare(bs->clk);
988 1068
989 bcm2835_dma_release(master); 1069 bcm2835_dma_release(ctlr);
990 1070
991 return 0; 1071 return 0;
992} 1072}