aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
authorMark Brown <broonie@linaro.org>2013-06-26 11:21:04 -0400
committerMark Brown <broonie@linaro.org>2013-06-26 11:21:04 -0400
commitf4e975814ed34b34e61a51683cf6b7034263689a (patch)
tree99cf2d1606b7a3f11d3c06ccf11060646dbb3974 /drivers/spi
parentc87e5d97c9971f4345b221c7f5de9b2e0ea4235d (diff)
parentcd469106c3aa2303d928c20a35cac5d669f816f7 (diff)
Merge remote-tracking branch 'spi/topic/s3c64xx' into spi-next
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/spi-s3c64xx.c202
1 files changed, 143 insertions, 59 deletions
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 32ba3555f8b9..eb53df27e7ea 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -39,6 +39,7 @@
39#endif 39#endif
40 40
41#define MAX_SPI_PORTS 3 41#define MAX_SPI_PORTS 3
42#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
42 43
43/* Registers and bit-fields */ 44/* Registers and bit-fields */
44 45
@@ -130,6 +131,7 @@
130#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT 131#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
131 132
132#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) 133#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
134#define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
133 135
134#define RXBUSY (1<<2) 136#define RXBUSY (1<<2)
135#define TXBUSY (1<<3) 137#define TXBUSY (1<<3)
@@ -158,6 +160,7 @@ struct s3c64xx_spi_port_config {
158 int fifo_lvl_mask[MAX_SPI_PORTS]; 160 int fifo_lvl_mask[MAX_SPI_PORTS];
159 int rx_lvl_offset; 161 int rx_lvl_offset;
160 int tx_st_done; 162 int tx_st_done;
163 int quirks;
161 bool high_speed; 164 bool high_speed;
162 bool clk_from_cmu; 165 bool clk_from_cmu;
163}; 166};
@@ -205,6 +208,7 @@ struct s3c64xx_spi_driver_data {
205 struct s3c64xx_spi_port_config *port_conf; 208 struct s3c64xx_spi_port_config *port_conf;
206 unsigned int port_id; 209 unsigned int port_id;
207 unsigned long gpios[4]; 210 unsigned long gpios[4];
211 bool cs_gpio;
208}; 212};
209 213
210static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) 214static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
@@ -344,8 +348,12 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
344{ 348{
345 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 349 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
346 350
347 /* Acquire DMA channels */ 351 /*
348 while (!acquire_dma(sdd)) 352 * If DMA resource was not available during
353 * probe, no need to continue with dma requests
354 * else Acquire DMA channels
355 */
356 while (!is_polling(sdd) && !acquire_dma(sdd))
349 usleep_range(10000, 11000); 357 usleep_range(10000, 11000);
350 358
351 pm_runtime_get_sync(&sdd->pdev->dev); 359 pm_runtime_get_sync(&sdd->pdev->dev);
@@ -358,9 +366,12 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
358 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 366 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
359 367
360 /* Free DMA channels */ 368 /* Free DMA channels */
361 sdd->ops->release((enum dma_ch)sdd->rx_dma.ch, &s3c64xx_spi_dma_client); 369 if (!is_polling(sdd)) {
362 sdd->ops->release((enum dma_ch)sdd->tx_dma.ch, &s3c64xx_spi_dma_client); 370 sdd->ops->release((enum dma_ch)sdd->rx_dma.ch,
363 371 &s3c64xx_spi_dma_client);
372 sdd->ops->release((enum dma_ch)sdd->tx_dma.ch,
373 &s3c64xx_spi_dma_client);
374 }
364 pm_runtime_put(&sdd->pdev->dev); 375 pm_runtime_put(&sdd->pdev->dev);
365 376
366 return 0; 377 return 0;
@@ -464,8 +475,10 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
464 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); 475 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
465 476
466 /* Free DMA channels */ 477 /* Free DMA channels */
467 dma_release_channel(sdd->rx_dma.ch); 478 if (!is_polling(sdd)) {
468 dma_release_channel(sdd->tx_dma.ch); 479 dma_release_channel(sdd->rx_dma.ch);
480 dma_release_channel(sdd->tx_dma.ch);
481 }
469 482
470 pm_runtime_put(&sdd->pdev->dev); 483 pm_runtime_put(&sdd->pdev->dev);
471 return 0; 484 return 0;
@@ -558,14 +571,40 @@ static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
558 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ 571 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
559 /* Deselect the last toggled device */ 572 /* Deselect the last toggled device */
560 cs = sdd->tgl_spi->controller_data; 573 cs = sdd->tgl_spi->controller_data;
561 gpio_set_value(cs->line, 574 if (sdd->cs_gpio)
562 spi->mode & SPI_CS_HIGH ? 0 : 1); 575 gpio_set_value(cs->line,
576 spi->mode & SPI_CS_HIGH ? 0 : 1);
563 } 577 }
564 sdd->tgl_spi = NULL; 578 sdd->tgl_spi = NULL;
565 } 579 }
566 580
567 cs = spi->controller_data; 581 cs = spi->controller_data;
568 gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0); 582 if (sdd->cs_gpio)
583 gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
584
585 /* Start the signals */
586 writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
587}
588
589static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
590 int timeout_ms)
591{
592 void __iomem *regs = sdd->regs;
593 unsigned long val = 1;
594 u32 status;
595
596 /* max fifo depth available */
597 u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
598
599 if (timeout_ms)
600 val = msecs_to_loops(timeout_ms);
601
602 do {
603 status = readl(regs + S3C64XX_SPI_STATUS);
604 } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
605
606 /* return the actual received data length */
607 return RX_FIFO_LVL(status, sdd);
569} 608}
570 609
571static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, 610static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
@@ -590,20 +629,19 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
590 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); 629 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
591 } 630 }
592 631
593 if (!val)
594 return -EIO;
595
596 if (dma_mode) { 632 if (dma_mode) {
597 u32 status; 633 u32 status;
598 634
599 /* 635 /*
636 * If the previous xfer was completed within timeout, then
637 * proceed further else return -EIO.
600 * DmaTx returns after simply writing data in the FIFO, 638 * DmaTx returns after simply writing data in the FIFO,
601 * w/o waiting for real transmission on the bus to finish. 639 * w/o waiting for real transmission on the bus to finish.
602 * DmaRx returns only after Dma read data from FIFO which 640 * DmaRx returns only after Dma read data from FIFO which
603 * needs bus transmission to finish, so we don't worry if 641 * needs bus transmission to finish, so we don't worry if
604 * Xfer involved Rx(with or without Tx). 642 * Xfer involved Rx(with or without Tx).
605 */ 643 */
606 if (xfer->rx_buf == NULL) { 644 if (val && !xfer->rx_buf) {
607 val = msecs_to_loops(10); 645 val = msecs_to_loops(10);
608 status = readl(regs + S3C64XX_SPI_STATUS); 646 status = readl(regs + S3C64XX_SPI_STATUS);
609 while ((TX_FIFO_LVL(status, sdd) 647 while ((TX_FIFO_LVL(status, sdd)
@@ -613,30 +651,54 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
613 status = readl(regs + S3C64XX_SPI_STATUS); 651 status = readl(regs + S3C64XX_SPI_STATUS);
614 } 652 }
615 653
616 if (!val)
617 return -EIO;
618 } 654 }
655
656 /* If timed out while checking rx/tx status return error */
657 if (!val)
658 return -EIO;
619 } else { 659 } else {
660 int loops;
661 u32 cpy_len;
662 u8 *buf;
663
620 /* If it was only Tx */ 664 /* If it was only Tx */
621 if (xfer->rx_buf == NULL) { 665 if (!xfer->rx_buf) {
622 sdd->state &= ~TXBUSY; 666 sdd->state &= ~TXBUSY;
623 return 0; 667 return 0;
624 } 668 }
625 669
626 switch (sdd->cur_bpw) { 670 /*
627 case 32: 671 * If the receive length is bigger than the controller fifo
628 ioread32_rep(regs + S3C64XX_SPI_RX_DATA, 672 * size, calculate the loops and read the fifo as many times.
629 xfer->rx_buf, xfer->len / 4); 673 * loops = length / max fifo size (calculated by using the
630 break; 674 * fifo mask).
631 case 16: 675 * For any size less than the fifo size the below code is
632 ioread16_rep(regs + S3C64XX_SPI_RX_DATA, 676 * executed atleast once.
633 xfer->rx_buf, xfer->len / 2); 677 */
634 break; 678 loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
635 default: 679 buf = xfer->rx_buf;
636 ioread8_rep(regs + S3C64XX_SPI_RX_DATA, 680 do {
637 xfer->rx_buf, xfer->len); 681 /* wait for data to be received in the fifo */
638 break; 682 cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
639 } 683 (loops ? ms : 0));
684
685 switch (sdd->cur_bpw) {
686 case 32:
687 ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
688 buf, cpy_len / 4);
689 break;
690 case 16:
691 ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
692 buf, cpy_len / 2);
693 break;
694 default:
695 ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
696 buf, cpy_len);
697 break;
698 }
699
700 buf = buf + cpy_len;
701 } while (loops--);
640 sdd->state &= ~RXBUSY; 702 sdd->state &= ~RXBUSY;
641 } 703 }
642 704
@@ -651,7 +713,11 @@ static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
651 if (sdd->tgl_spi == spi) 713 if (sdd->tgl_spi == spi)
652 sdd->tgl_spi = NULL; 714 sdd->tgl_spi = NULL;
653 715
654 gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1); 716 if (sdd->cs_gpio)
717 gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
718
719 /* Quiese the signals */
720 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
655} 721}
656 722
657static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) 723static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
@@ -733,7 +799,7 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
733 struct device *dev = &sdd->pdev->dev; 799 struct device *dev = &sdd->pdev->dev;
734 struct spi_transfer *xfer; 800 struct spi_transfer *xfer;
735 801
736 if (msg->is_dma_mapped) 802 if (is_polling(sdd) || msg->is_dma_mapped)
737 return 0; 803 return 0;
738 804
739 /* First mark all xfer unmapped */ 805 /* First mark all xfer unmapped */
@@ -782,7 +848,7 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
782 struct device *dev = &sdd->pdev->dev; 848 struct device *dev = &sdd->pdev->dev;
783 struct spi_transfer *xfer; 849 struct spi_transfer *xfer;
784 850
785 if (msg->is_dma_mapped) 851 if (is_polling(sdd) || msg->is_dma_mapped)
786 return; 852 return;
787 853
788 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 854 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
@@ -861,8 +927,9 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
861 927
862 /* Polling method for xfers not bigger than FIFO capacity */ 928 /* Polling method for xfers not bigger than FIFO capacity */
863 use_dma = 0; 929 use_dma = 0;
864 if (sdd->rx_dma.ch && sdd->tx_dma.ch && 930 if (!is_polling(sdd) &&
865 (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))) 931 (sdd->rx_dma.ch && sdd->tx_dma.ch &&
932 (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))))
866 use_dma = 1; 933 use_dma = 1;
867 934
868 spin_lock_irqsave(&sdd->lock, flags); 935 spin_lock_irqsave(&sdd->lock, flags);
@@ -876,17 +943,10 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
876 /* Slave Select */ 943 /* Slave Select */
877 enable_cs(sdd, spi); 944 enable_cs(sdd, spi);
878 945
879 /* Start the signals */
880 writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
881
882 spin_unlock_irqrestore(&sdd->lock, flags); 946 spin_unlock_irqrestore(&sdd->lock, flags);
883 947
884 status = wait_for_xfer(sdd, xfer, use_dma); 948 status = wait_for_xfer(sdd, xfer, use_dma);
885 949
886 /* Quiese the signals */
887 writel(S3C64XX_SPI_SLAVE_SIG_INACT,
888 sdd->regs + S3C64XX_SPI_SLAVE_SEL);
889
890 if (status) { 950 if (status) {
891 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", 951 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
892 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, 952 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
@@ -942,8 +1002,10 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
942{ 1002{
943 struct s3c64xx_spi_csinfo *cs; 1003 struct s3c64xx_spi_csinfo *cs;
944 struct device_node *slave_np, *data_np = NULL; 1004 struct device_node *slave_np, *data_np = NULL;
1005 struct s3c64xx_spi_driver_data *sdd;
945 u32 fb_delay = 0; 1006 u32 fb_delay = 0;
946 1007
1008 sdd = spi_master_get_devdata(spi->master);
947 slave_np = spi->dev.of_node; 1009 slave_np = spi->dev.of_node;
948 if (!slave_np) { 1010 if (!slave_np) {
949 dev_err(&spi->dev, "device node not found\n"); 1011 dev_err(&spi->dev, "device node not found\n");
@@ -963,7 +1025,10 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
963 return ERR_PTR(-ENOMEM); 1025 return ERR_PTR(-ENOMEM);
964 } 1026 }
965 1027
966 cs->line = of_get_named_gpio(data_np, "cs-gpio", 0); 1028 /* The CS line is asserted/deasserted by the gpio pin */
1029 if (sdd->cs_gpio)
1030 cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
1031
967 if (!gpio_is_valid(cs->line)) { 1032 if (!gpio_is_valid(cs->line)) {
968 dev_err(&spi->dev, "chip select gpio is not specified or invalid\n"); 1033 dev_err(&spi->dev, "chip select gpio is not specified or invalid\n");
969 kfree(cs); 1034 kfree(cs);
@@ -1003,7 +1068,8 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
1003 return -ENODEV; 1068 return -ENODEV;
1004 } 1069 }
1005 1070
1006 if (!spi_get_ctldata(spi)) { 1071 /* Request gpio only if cs line is asserted by gpio pins */
1072 if (sdd->cs_gpio) {
1007 err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH, 1073 err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH,
1008 dev_name(&spi->dev)); 1074 dev_name(&spi->dev));
1009 if (err) { 1075 if (err) {
@@ -1012,9 +1078,11 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
1012 cs->line, err); 1078 cs->line, err);
1013 goto err_gpio_req; 1079 goto err_gpio_req;
1014 } 1080 }
1015 spi_set_ctldata(spi, cs);
1016 } 1081 }
1017 1082
1083 if (!spi_get_ctldata(spi))
1084 spi_set_ctldata(spi, cs);
1085
1018 sci = sdd->cntrlr_info; 1086 sci = sdd->cntrlr_info;
1019 1087
1020 spin_lock_irqsave(&sdd->lock, flags); 1088 spin_lock_irqsave(&sdd->lock, flags);
@@ -1092,8 +1160,10 @@ err_gpio_req:
1092static void s3c64xx_spi_cleanup(struct spi_device *spi) 1160static void s3c64xx_spi_cleanup(struct spi_device *spi)
1093{ 1161{
1094 struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi); 1162 struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
1163 struct s3c64xx_spi_driver_data *sdd;
1095 1164
1096 if (cs) { 1165 sdd = spi_master_get_devdata(spi->master);
1166 if (cs && sdd->cs_gpio) {
1097 gpio_free(cs->line); 1167 gpio_free(cs->line);
1098 if (spi->dev.of_node) 1168 if (spi->dev.of_node)
1099 kfree(cs); 1169 kfree(cs);
@@ -1270,7 +1340,11 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
1270 sdd->cntrlr_info = sci; 1340 sdd->cntrlr_info = sci;
1271 sdd->pdev = pdev; 1341 sdd->pdev = pdev;
1272 sdd->sfr_start = mem_res->start; 1342 sdd->sfr_start = mem_res->start;
1343 sdd->cs_gpio = true;
1273 if (pdev->dev.of_node) { 1344 if (pdev->dev.of_node) {
1345 if (!of_find_property(pdev->dev.of_node, "cs-gpio", NULL))
1346 sdd->cs_gpio = false;
1347
1274 ret = of_alias_get_id(pdev->dev.of_node, "spi"); 1348 ret = of_alias_get_id(pdev->dev.of_node, "spi");
1275 if (ret < 0) { 1349 if (ret < 0) {
1276 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", 1350 dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
@@ -1287,19 +1361,19 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
1287 if (!sdd->pdev->dev.of_node) { 1361 if (!sdd->pdev->dev.of_node) {
1288 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1362 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1289 if (!res) { 1363 if (!res) {
1290 dev_err(&pdev->dev, "Unable to get SPI tx dma " 1364 dev_warn(&pdev->dev, "Unable to get SPI tx dma "
1291 "resource\n"); 1365 "resource. Switching to poll mode\n");
1292 return -ENXIO; 1366 sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
1293 } 1367 } else
1294 sdd->tx_dma.dmach = res->start; 1368 sdd->tx_dma.dmach = res->start;
1295 1369
1296 res = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1370 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1297 if (!res) { 1371 if (!res) {
1298 dev_err(&pdev->dev, "Unable to get SPI rx dma " 1372 dev_warn(&pdev->dev, "Unable to get SPI rx dma "
1299 "resource\n"); 1373 "resource. Switching to poll mode\n");
1300 return -ENXIO; 1374 sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
1301 } 1375 } else
1302 sdd->rx_dma.dmach = res->start; 1376 sdd->rx_dma.dmach = res->start;
1303 } 1377 }
1304 1378
1305 sdd->tx_dma.direction = DMA_MEM_TO_DEV; 1379 sdd->tx_dma.direction = DMA_MEM_TO_DEV;
@@ -1534,6 +1608,15 @@ static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
1534 .clk_from_cmu = true, 1608 .clk_from_cmu = true,
1535}; 1609};
1536 1610
1611static struct s3c64xx_spi_port_config exynos5440_spi_port_config = {
1612 .fifo_lvl_mask = { 0x1ff },
1613 .rx_lvl_offset = 15,
1614 .tx_st_done = 25,
1615 .high_speed = true,
1616 .clk_from_cmu = true,
1617 .quirks = S3C64XX_SPI_QUIRK_POLL,
1618};
1619
1537static struct platform_device_id s3c64xx_spi_driver_ids[] = { 1620static struct platform_device_id s3c64xx_spi_driver_ids[] = {
1538 { 1621 {
1539 .name = "s3c2443-spi", 1622 .name = "s3c2443-spi",
@@ -1557,15 +1640,16 @@ static struct platform_device_id s3c64xx_spi_driver_ids[] = {
1557 { }, 1640 { },
1558}; 1641};
1559 1642
1560#ifdef CONFIG_OF
1561static const struct of_device_id s3c64xx_spi_dt_match[] = { 1643static const struct of_device_id s3c64xx_spi_dt_match[] = {
1562 { .compatible = "samsung,exynos4210-spi", 1644 { .compatible = "samsung,exynos4210-spi",
1563 .data = (void *)&exynos4_spi_port_config, 1645 .data = (void *)&exynos4_spi_port_config,
1564 }, 1646 },
1647 { .compatible = "samsung,exynos5440-spi",
1648 .data = (void *)&exynos5440_spi_port_config,
1649 },
1565 { }, 1650 { },
1566}; 1651};
1567MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match); 1652MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
1568#endif /* CONFIG_OF */
1569 1653
1570static struct platform_driver s3c64xx_spi_driver = { 1654static struct platform_driver s3c64xx_spi_driver = {
1571 .driver = { 1655 .driver = {