aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-21 17:37:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-21 17:37:00 -0400
commit70ada77920723fbc2b35e9b301022fb1e166b41b (patch)
treef30f24135eff89020d8ae21d6c7a83cf5c812585 /drivers
parentb22793f7fdc38d73c4bb4299a313deef56dcfe66 (diff)
parent2764c500be0c1f057349ee6c81557239de060f87 (diff)
Merge branch 'next-spi' of git://git.secretlab.ca/git/linux-2.6
* 'next-spi' of git://git.secretlab.ca/git/linux-2.6: (53 commits) spi/omap2_mcspi: Verify TX reg is empty after TX only xfer with DMA spi/omap2_mcspi: disable channel after TX_ONLY transfer in PIO mode spi/bfin_spi: namespace local structs spi/bfin_spi: init early spi/bfin_spi: check per-transfer bits_per_word spi/bfin_spi: warn when CS is driven by hardware (CPHA=0) spi/bfin_spi: cs should be always low when a new transfer begins spi/bfin_spi: fix typo in comment spi/bfin_spi: reject unsupported SPI modes spi/bfin_spi: use dma_disable_irq_nosync() in irq handler spi/bfin_spi: combine duplicate SPI_CTL read/write logic spi/bfin_spi: reset ctl_reg bits when setup is run again on a device spi/bfin_spi: push all size checks into the transfer function spi/bfin_spi: use nosync when disabling the IRQ from the IRQ handler spi/bfin_spi: sync hardware state before reprogramming everything spi/bfin_spi: save/restore state when suspending/resuming spi/bfin_spi: redo GPIO CS handling Blackfin: SPI: expand SPI bitmasks spi/bfin_spi: use the SPI namespaced bit names spi/bfin_spi: drop extra memory we don't need ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mfd/ab8500-spi.c5
-rw-r--r--drivers/spi/Kconfig30
-rw-r--r--drivers/spi/Makefile9
-rw-r--r--drivers/spi/amba-pl022.c751
-rw-r--r--drivers/spi/atmel_spi.c14
-rw-r--r--drivers/spi/omap2_mcspi.c81
-rw-r--r--drivers/spi/orion_spi.c4
-rw-r--r--drivers/spi/spi_bfin5xx.c844
-rw-r--r--drivers/spi/spi_fsl_espi.c748
-rw-r--r--drivers/spi/spi_fsl_lib.c237
-rw-r--r--drivers/spi/spi_fsl_lib.h124
-rw-r--r--drivers/spi/spi_fsl_spi.c (renamed from drivers/spi/spi_mpc8xxx.c)552
-rw-r--r--drivers/spi/spi_s3c64xx.c158
-rw-r--r--drivers/spi/spi_topcliff_pch.c1303
14 files changed, 3741 insertions, 1119 deletions
diff --git a/drivers/mfd/ab8500-spi.c b/drivers/mfd/ab8500-spi.c
index e1c8b62b086d..01b6d584442c 100644
--- a/drivers/mfd/ab8500-spi.c
+++ b/drivers/mfd/ab8500-spi.c
@@ -83,6 +83,11 @@ static int __devinit ab8500_spi_probe(struct spi_device *spi)
83 struct ab8500 *ab8500; 83 struct ab8500 *ab8500;
84 int ret; 84 int ret;
85 85
86 spi->bits_per_word = 24;
87 ret = spi_setup(spi);
88 if (ret < 0)
89 return ret;
90
86 ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL); 91 ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
87 if (!ab8500) 92 if (!ab8500)
88 return -ENOMEM; 93 return -ENOMEM;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 91c2f4f3af10..9949c252c23d 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -182,12 +182,27 @@ config SPI_MPC512x_PSC
182 This enables using the Freescale MPC5121 Programmable Serial 182 This enables using the Freescale MPC5121 Programmable Serial
183 Controller in SPI master mode. 183 Controller in SPI master mode.
184 184
185config SPI_MPC8xxx 185config SPI_FSL_LIB
186 tristate "Freescale MPC8xxx SPI controller" 186 tristate
187 depends on FSL_SOC 187 depends on FSL_SOC
188
189config SPI_FSL_SPI
190 tristate "Freescale SPI controller"
191 depends on FSL_SOC
192 select SPI_FSL_LIB
188 help 193 help
189 This enables using the Freescale MPC8xxx SPI controllers in master 194 This enables using the Freescale SPI controllers in master mode.
190 mode. 195 MPC83xx platform uses the controller in cpu mode or CPM/QE mode.
196 MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
197
198config SPI_FSL_ESPI
199 tristate "Freescale eSPI controller"
200 depends on FSL_SOC
201 select SPI_FSL_LIB
202 help
203 This enables using the Freescale eSPI controllers in master mode.
204 From MPC8536, 85xx platform uses the controller, and all P10xx,
205 P20xx, P30xx,P40xx, P50xx uses this controller.
191 206
192config SPI_OMAP_UWIRE 207config SPI_OMAP_UWIRE
193 tristate "OMAP1 MicroWire" 208 tristate "OMAP1 MicroWire"
@@ -298,6 +313,13 @@ config SPI_STMP3XXX
298 help 313 help
299 SPI driver for Freescale STMP37xx/378x SoC SSP interface 314 SPI driver for Freescale STMP37xx/378x SoC SSP interface
300 315
316config SPI_TOPCLIFF_PCH
317 tristate "Topcliff PCH SPI Controller"
318 depends on PCI
319 help
320 SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
321 used in some x86 embedded processors.
322
301config SPI_TXX9 323config SPI_TXX9
302 tristate "Toshiba TXx9 SPI controller" 324 tristate "Toshiba TXx9 SPI controller"
303 depends on GENERIC_GPIO && CPU_TX49XX 325 depends on GENERIC_GPIO && CPU_TX49XX
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index e9cbd18217a0..557aaadf56b2 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -2,9 +2,7 @@
2# Makefile for kernel SPI drivers. 2# Makefile for kernel SPI drivers.
3# 3#
4 4
5ifeq ($(CONFIG_SPI_DEBUG),y) 5ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
6EXTRA_CFLAGS += -DDEBUG
7endif
8 6
9# small core, mostly translating board-specific 7# small core, mostly translating board-specific
10# config declarations into driver model code 8# config declarations into driver model code
@@ -34,11 +32,14 @@ obj-$(CONFIG_SPI_PL022) += amba-pl022.o
34obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o 32obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o
35obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o 33obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
36obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o 34obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o
37obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o 35obj-$(CONFIG_SPI_FSL_LIB) += spi_fsl_lib.o
36obj-$(CONFIG_SPI_FSL_ESPI) += spi_fsl_espi.o
37obj-$(CONFIG_SPI_FSL_SPI) += spi_fsl_spi.o
38obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o 38obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
39obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o 39obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
40obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o 40obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
41obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o 41obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
42obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o
42obj-$(CONFIG_SPI_TXX9) += spi_txx9.o 43obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
43obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o 44obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
44obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o 45obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index 4c37c4e28647..fb3d1b31772d 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -27,7 +27,6 @@
27/* 27/*
28 * TODO: 28 * TODO:
29 * - add timeout on polled transfers 29 * - add timeout on polled transfers
30 * - add generic DMA framework support
31 */ 30 */
32 31
33#include <linux/init.h> 32#include <linux/init.h>
@@ -45,6 +44,9 @@
45#include <linux/amba/pl022.h> 44#include <linux/amba/pl022.h>
46#include <linux/io.h> 45#include <linux/io.h>
47#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/dmaengine.h>
48#include <linux/dma-mapping.h>
49#include <linux/scatterlist.h>
48 50
49/* 51/*
50 * This macro is used to define some register default values. 52 * This macro is used to define some register default values.
@@ -381,6 +383,14 @@ struct pl022 {
381 enum ssp_reading read; 383 enum ssp_reading read;
382 enum ssp_writing write; 384 enum ssp_writing write;
383 u32 exp_fifo_level; 385 u32 exp_fifo_level;
386 /* DMA settings */
387#ifdef CONFIG_DMA_ENGINE
388 struct dma_chan *dma_rx_channel;
389 struct dma_chan *dma_tx_channel;
390 struct sg_table sgt_rx;
391 struct sg_table sgt_tx;
392 char *dummypage;
393#endif
384}; 394};
385 395
386/** 396/**
@@ -406,7 +416,7 @@ struct chip_data {
406 u16 dmacr; 416 u16 dmacr;
407 u16 cpsr; 417 u16 cpsr;
408 u8 n_bytes; 418 u8 n_bytes;
409 u8 enable_dma:1; 419 bool enable_dma;
410 enum ssp_reading read; 420 enum ssp_reading read;
411 enum ssp_writing write; 421 enum ssp_writing write;
412 void (*cs_control) (u32 command); 422 void (*cs_control) (u32 command);
@@ -763,6 +773,371 @@ static void *next_transfer(struct pl022 *pl022)
763 } 773 }
764 return STATE_DONE; 774 return STATE_DONE;
765} 775}
776
777/*
778 * This DMA functionality is only compiled in if we have
779 * access to the generic DMA devices/DMA engine.
780 */
781#ifdef CONFIG_DMA_ENGINE
782static void unmap_free_dma_scatter(struct pl022 *pl022)
783{
784 /* Unmap and free the SG tables */
785 dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl,
786 pl022->sgt_tx.nents, DMA_TO_DEVICE);
787 dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl,
788 pl022->sgt_rx.nents, DMA_FROM_DEVICE);
789 sg_free_table(&pl022->sgt_rx);
790 sg_free_table(&pl022->sgt_tx);
791}
792
793static void dma_callback(void *data)
794{
795 struct pl022 *pl022 = data;
796 struct spi_message *msg = pl022->cur_msg;
797
798 BUG_ON(!pl022->sgt_rx.sgl);
799
800#ifdef VERBOSE_DEBUG
801 /*
802 * Optionally dump out buffers to inspect contents, this is
803 * good if you want to convince yourself that the loopback
804 * read/write contents are the same, when adopting to a new
805 * DMA engine.
806 */
807 {
808 struct scatterlist *sg;
809 unsigned int i;
810
811 dma_sync_sg_for_cpu(&pl022->adev->dev,
812 pl022->sgt_rx.sgl,
813 pl022->sgt_rx.nents,
814 DMA_FROM_DEVICE);
815
816 for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) {
817 dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i);
818 print_hex_dump(KERN_ERR, "SPI RX: ",
819 DUMP_PREFIX_OFFSET,
820 16,
821 1,
822 sg_virt(sg),
823 sg_dma_len(sg),
824 1);
825 }
826 for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) {
827 dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i);
828 print_hex_dump(KERN_ERR, "SPI TX: ",
829 DUMP_PREFIX_OFFSET,
830 16,
831 1,
832 sg_virt(sg),
833 sg_dma_len(sg),
834 1);
835 }
836 }
837#endif
838
839 unmap_free_dma_scatter(pl022);
840
841 /* Update total bytes transfered */
842 msg->actual_length += pl022->cur_transfer->len;
843 if (pl022->cur_transfer->cs_change)
844 pl022->cur_chip->
845 cs_control(SSP_CHIP_DESELECT);
846
847 /* Move to next transfer */
848 msg->state = next_transfer(pl022);
849 tasklet_schedule(&pl022->pump_transfers);
850}
851
852static void setup_dma_scatter(struct pl022 *pl022,
853 void *buffer,
854 unsigned int length,
855 struct sg_table *sgtab)
856{
857 struct scatterlist *sg;
858 int bytesleft = length;
859 void *bufp = buffer;
860 int mapbytes;
861 int i;
862
863 if (buffer) {
864 for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
865 /*
866 * If there are less bytes left than what fits
867 * in the current page (plus page alignment offset)
868 * we just feed in this, else we stuff in as much
869 * as we can.
870 */
871 if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
872 mapbytes = bytesleft;
873 else
874 mapbytes = PAGE_SIZE - offset_in_page(bufp);
875 sg_set_page(sg, virt_to_page(bufp),
876 mapbytes, offset_in_page(bufp));
877 bufp += mapbytes;
878 bytesleft -= mapbytes;
879 dev_dbg(&pl022->adev->dev,
880 "set RX/TX target page @ %p, %d bytes, %d left\n",
881 bufp, mapbytes, bytesleft);
882 }
883 } else {
884 /* Map the dummy buffer on every page */
885 for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
886 if (bytesleft < PAGE_SIZE)
887 mapbytes = bytesleft;
888 else
889 mapbytes = PAGE_SIZE;
890 sg_set_page(sg, virt_to_page(pl022->dummypage),
891 mapbytes, 0);
892 bytesleft -= mapbytes;
893 dev_dbg(&pl022->adev->dev,
894 "set RX/TX to dummy page %d bytes, %d left\n",
895 mapbytes, bytesleft);
896
897 }
898 }
899 BUG_ON(bytesleft);
900}
901
902/**
903 * configure_dma - configures the channels for the next transfer
904 * @pl022: SSP driver's private data structure
905 */
906static int configure_dma(struct pl022 *pl022)
907{
908 struct dma_slave_config rx_conf = {
909 .src_addr = SSP_DR(pl022->phybase),
910 .direction = DMA_FROM_DEVICE,
911 .src_maxburst = pl022->vendor->fifodepth >> 1,
912 };
913 struct dma_slave_config tx_conf = {
914 .dst_addr = SSP_DR(pl022->phybase),
915 .direction = DMA_TO_DEVICE,
916 .dst_maxburst = pl022->vendor->fifodepth >> 1,
917 };
918 unsigned int pages;
919 int ret;
920 int sglen;
921 struct dma_chan *rxchan = pl022->dma_rx_channel;
922 struct dma_chan *txchan = pl022->dma_tx_channel;
923 struct dma_async_tx_descriptor *rxdesc;
924 struct dma_async_tx_descriptor *txdesc;
925 dma_cookie_t cookie;
926
927 /* Check that the channels are available */
928 if (!rxchan || !txchan)
929 return -ENODEV;
930
931 switch (pl022->read) {
932 case READING_NULL:
933 /* Use the same as for writing */
934 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
935 break;
936 case READING_U8:
937 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
938 break;
939 case READING_U16:
940 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
941 break;
942 case READING_U32:
943 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
944 break;
945 }
946
947 switch (pl022->write) {
948 case WRITING_NULL:
949 /* Use the same as for reading */
950 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
951 break;
952 case WRITING_U8:
953 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
954 break;
955 case WRITING_U16:
956 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
957 break;
958 case WRITING_U32:
959 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;;
960 break;
961 }
962
963 /* SPI pecularity: we need to read and write the same width */
964 if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
965 rx_conf.src_addr_width = tx_conf.dst_addr_width;
966 if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
967 tx_conf.dst_addr_width = rx_conf.src_addr_width;
968 BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width);
969
970 rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
971 (unsigned long) &rx_conf);
972 txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
973 (unsigned long) &tx_conf);
974
975 /* Create sglists for the transfers */
976 pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1;
977 dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages);
978
979 ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL);
980 if (ret)
981 goto err_alloc_rx_sg;
982
983 ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL);
984 if (ret)
985 goto err_alloc_tx_sg;
986
987 /* Fill in the scatterlists for the RX+TX buffers */
988 setup_dma_scatter(pl022, pl022->rx,
989 pl022->cur_transfer->len, &pl022->sgt_rx);
990 setup_dma_scatter(pl022, pl022->tx,
991 pl022->cur_transfer->len, &pl022->sgt_tx);
992
993 /* Map DMA buffers */
994 sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_rx.sgl,
995 pl022->sgt_rx.nents, DMA_FROM_DEVICE);
996 if (!sglen)
997 goto err_rx_sgmap;
998
999 sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_tx.sgl,
1000 pl022->sgt_tx.nents, DMA_TO_DEVICE);
1001 if (!sglen)
1002 goto err_tx_sgmap;
1003
1004 /* Send both scatterlists */
1005 rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
1006 pl022->sgt_rx.sgl,
1007 pl022->sgt_rx.nents,
1008 DMA_FROM_DEVICE,
1009 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1010 if (!rxdesc)
1011 goto err_rxdesc;
1012
1013 txdesc = txchan->device->device_prep_slave_sg(txchan,
1014 pl022->sgt_tx.sgl,
1015 pl022->sgt_tx.nents,
1016 DMA_TO_DEVICE,
1017 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1018 if (!txdesc)
1019 goto err_txdesc;
1020
1021 /* Put the callback on the RX transfer only, that should finish last */
1022 rxdesc->callback = dma_callback;
1023 rxdesc->callback_param = pl022;
1024
1025 /* Submit and fire RX and TX with TX last so we're ready to read! */
1026 cookie = rxdesc->tx_submit(rxdesc);
1027 if (dma_submit_error(cookie))
1028 goto err_submit_rx;
1029 cookie = txdesc->tx_submit(txdesc);
1030 if (dma_submit_error(cookie))
1031 goto err_submit_tx;
1032 rxchan->device->device_issue_pending(rxchan);
1033 txchan->device->device_issue_pending(txchan);
1034
1035 return 0;
1036
1037err_submit_tx:
1038err_submit_rx:
1039err_txdesc:
1040 txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
1041err_rxdesc:
1042 rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
1043 dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl,
1044 pl022->sgt_tx.nents, DMA_TO_DEVICE);
1045err_tx_sgmap:
1046 dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl,
1047 pl022->sgt_tx.nents, DMA_FROM_DEVICE);
1048err_rx_sgmap:
1049 sg_free_table(&pl022->sgt_tx);
1050err_alloc_tx_sg:
1051 sg_free_table(&pl022->sgt_rx);
1052err_alloc_rx_sg:
1053 return -ENOMEM;
1054}
1055
1056static int __init pl022_dma_probe(struct pl022 *pl022)
1057{
1058 dma_cap_mask_t mask;
1059
1060 /* Try to acquire a generic DMA engine slave channel */
1061 dma_cap_zero(mask);
1062 dma_cap_set(DMA_SLAVE, mask);
1063 /*
1064 * We need both RX and TX channels to do DMA, else do none
1065 * of them.
1066 */
1067 pl022->dma_rx_channel = dma_request_channel(mask,
1068 pl022->master_info->dma_filter,
1069 pl022->master_info->dma_rx_param);
1070 if (!pl022->dma_rx_channel) {
1071 dev_err(&pl022->adev->dev, "no RX DMA channel!\n");
1072 goto err_no_rxchan;
1073 }
1074
1075 pl022->dma_tx_channel = dma_request_channel(mask,
1076 pl022->master_info->dma_filter,
1077 pl022->master_info->dma_tx_param);
1078 if (!pl022->dma_tx_channel) {
1079 dev_err(&pl022->adev->dev, "no TX DMA channel!\n");
1080 goto err_no_txchan;
1081 }
1082
1083 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1084 if (!pl022->dummypage) {
1085 dev_err(&pl022->adev->dev, "no DMA dummypage!\n");
1086 goto err_no_dummypage;
1087 }
1088
1089 dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n",
1090 dma_chan_name(pl022->dma_rx_channel),
1091 dma_chan_name(pl022->dma_tx_channel));
1092
1093 return 0;
1094
1095err_no_dummypage:
1096 dma_release_channel(pl022->dma_tx_channel);
1097err_no_txchan:
1098 dma_release_channel(pl022->dma_rx_channel);
1099 pl022->dma_rx_channel = NULL;
1100err_no_rxchan:
1101 return -ENODEV;
1102}
1103
1104static void terminate_dma(struct pl022 *pl022)
1105{
1106 struct dma_chan *rxchan = pl022->dma_rx_channel;
1107 struct dma_chan *txchan = pl022->dma_tx_channel;
1108
1109 rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
1110 txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
1111 unmap_free_dma_scatter(pl022);
1112}
1113
1114static void pl022_dma_remove(struct pl022 *pl022)
1115{
1116 if (pl022->busy)
1117 terminate_dma(pl022);
1118 if (pl022->dma_tx_channel)
1119 dma_release_channel(pl022->dma_tx_channel);
1120 if (pl022->dma_rx_channel)
1121 dma_release_channel(pl022->dma_rx_channel);
1122 kfree(pl022->dummypage);
1123}
1124
1125#else
1126static inline int configure_dma(struct pl022 *pl022)
1127{
1128 return -ENODEV;
1129}
1130
1131static inline int pl022_dma_probe(struct pl022 *pl022)
1132{
1133 return 0;
1134}
1135
1136static inline void pl022_dma_remove(struct pl022 *pl022)
1137{
1138}
1139#endif
1140
766/** 1141/**
767 * pl022_interrupt_handler - Interrupt handler for SSP controller 1142 * pl022_interrupt_handler - Interrupt handler for SSP controller
768 * 1143 *
@@ -794,14 +1169,17 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
794 if (unlikely(!irq_status)) 1169 if (unlikely(!irq_status))
795 return IRQ_NONE; 1170 return IRQ_NONE;
796 1171
797 /* This handles the error code interrupts */ 1172 /*
1173 * This handles the FIFO interrupts, the timeout
1174 * interrupts are flatly ignored, they cannot be
1175 * trusted.
1176 */
798 if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { 1177 if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
799 /* 1178 /*
800 * Overrun interrupt - bail out since our Data has been 1179 * Overrun interrupt - bail out since our Data has been
801 * corrupted 1180 * corrupted
802 */ 1181 */
803 dev_err(&pl022->adev->dev, 1182 dev_err(&pl022->adev->dev, "FIFO overrun\n");
804 "FIFO overrun\n");
805 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) 1183 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
806 dev_err(&pl022->adev->dev, 1184 dev_err(&pl022->adev->dev,
807 "RXFIFO is full\n"); 1185 "RXFIFO is full\n");
@@ -896,8 +1274,8 @@ static int set_up_next_transfer(struct pl022 *pl022,
896} 1274}
897 1275
898/** 1276/**
899 * pump_transfers - Tasklet function which schedules next interrupt transfer 1277 * pump_transfers - Tasklet function which schedules next transfer
900 * when running in interrupt transfer mode. 1278 * when running in interrupt or DMA transfer mode.
901 * @data: SSP driver private data structure 1279 * @data: SSP driver private data structure
902 * 1280 *
903 */ 1281 */
@@ -954,65 +1332,23 @@ static void pump_transfers(unsigned long data)
954 } 1332 }
955 /* Flush the FIFOs and let's go! */ 1333 /* Flush the FIFOs and let's go! */
956 flush(pl022); 1334 flush(pl022);
957 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
958}
959
960/**
961 * NOT IMPLEMENTED
962 * configure_dma - It configures the DMA pipes for DMA transfers
963 * @data: SSP driver's private data structure
964 *
965 */
966static int configure_dma(void *data)
967{
968 struct pl022 *pl022 = data;
969 dev_dbg(&pl022->adev->dev, "configure DMA\n");
970 return -ENOTSUPP;
971}
972
973/**
974 * do_dma_transfer - It handles transfers of the current message
975 * if it is DMA xfer.
976 * NOT FULLY IMPLEMENTED
977 * @data: SSP driver's private data structure
978 */
979static void do_dma_transfer(void *data)
980{
981 struct pl022 *pl022 = data;
982
983 if (configure_dma(data)) {
984 dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n");
985 goto err_config_dma;
986 }
987 1335
988 /* TODO: Implememt DMA setup of pipes here */ 1336 if (pl022->cur_chip->enable_dma) {
989 1337 if (configure_dma(pl022)) {
990 /* Enable target chip, set up transfer */ 1338 dev_dbg(&pl022->adev->dev,
991 pl022->cur_chip->cs_control(SSP_CHIP_SELECT); 1339 "configuration of DMA failed, fall back to interrupt mode\n");
992 if (set_up_next_transfer(pl022, pl022->cur_transfer)) { 1340 goto err_config_dma;
993 /* Error path */ 1341 }
994 pl022->cur_msg->state = STATE_ERROR;
995 pl022->cur_msg->status = -EIO;
996 giveback(pl022);
997 return; 1342 return;
998 } 1343 }
999 /* Enable SSP */
1000 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1001 SSP_CR1(pl022->virtbase));
1002
1003 /* TODO: Enable the DMA transfer here */
1004 return;
1005 1344
1006 err_config_dma: 1345err_config_dma:
1007 pl022->cur_msg->state = STATE_ERROR; 1346 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
1008 pl022->cur_msg->status = -EIO;
1009 giveback(pl022);
1010 return;
1011} 1347}
1012 1348
1013static void do_interrupt_transfer(void *data) 1349static void do_interrupt_dma_transfer(struct pl022 *pl022)
1014{ 1350{
1015 struct pl022 *pl022 = data; 1351 u32 irqflags = ENABLE_ALL_INTERRUPTS;
1016 1352
1017 /* Enable target chip */ 1353 /* Enable target chip */
1018 pl022->cur_chip->cs_control(SSP_CHIP_SELECT); 1354 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
@@ -1023,15 +1359,26 @@ static void do_interrupt_transfer(void *data)
1023 giveback(pl022); 1359 giveback(pl022);
1024 return; 1360 return;
1025 } 1361 }
1362 /* If we're using DMA, set up DMA here */
1363 if (pl022->cur_chip->enable_dma) {
1364 /* Configure DMA transfer */
1365 if (configure_dma(pl022)) {
1366 dev_dbg(&pl022->adev->dev,
1367 "configuration of DMA failed, fall back to interrupt mode\n");
1368 goto err_config_dma;
1369 }
1370 /* Disable interrupts in DMA mode, IRQ from DMA controller */
1371 irqflags = DISABLE_ALL_INTERRUPTS;
1372 }
1373err_config_dma:
1026 /* Enable SSP, turn on interrupts */ 1374 /* Enable SSP, turn on interrupts */
1027 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), 1375 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1028 SSP_CR1(pl022->virtbase)); 1376 SSP_CR1(pl022->virtbase));
1029 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); 1377 writew(irqflags, SSP_IMSC(pl022->virtbase));
1030} 1378}
1031 1379
1032static void do_polling_transfer(void *data) 1380static void do_polling_transfer(struct pl022 *pl022)
1033{ 1381{
1034 struct pl022 *pl022 = data;
1035 struct spi_message *message = NULL; 1382 struct spi_message *message = NULL;
1036 struct spi_transfer *transfer = NULL; 1383 struct spi_transfer *transfer = NULL;
1037 struct spi_transfer *previous = NULL; 1384 struct spi_transfer *previous = NULL;
@@ -1101,7 +1448,7 @@ static void do_polling_transfer(void *data)
1101 * 1448 *
1102 * This function checks if there is any spi message in the queue that 1449 * This function checks if there is any spi message in the queue that
1103 * needs processing and delegate control to appropriate function 1450 * needs processing and delegate control to appropriate function
1104 * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer() 1451 * do_polling_transfer()/do_interrupt_dma_transfer()
1105 * based on the kind of the transfer 1452 * based on the kind of the transfer
1106 * 1453 *
1107 */ 1454 */
@@ -1150,10 +1497,8 @@ static void pump_messages(struct work_struct *work)
1150 1497
1151 if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) 1498 if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
1152 do_polling_transfer(pl022); 1499 do_polling_transfer(pl022);
1153 else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER)
1154 do_interrupt_transfer(pl022);
1155 else 1500 else
1156 do_dma_transfer(pl022); 1501 do_interrupt_dma_transfer(pl022);
1157} 1502}
1158 1503
1159 1504
@@ -1248,100 +1593,56 @@ static int destroy_queue(struct pl022 *pl022)
1248} 1593}
1249 1594
1250static int verify_controller_parameters(struct pl022 *pl022, 1595static int verify_controller_parameters(struct pl022 *pl022,
1251 struct pl022_config_chip *chip_info) 1596 struct pl022_config_chip const *chip_info)
1252{ 1597{
1253 if ((chip_info->lbm != LOOPBACK_ENABLED)
1254 && (chip_info->lbm != LOOPBACK_DISABLED)) {
1255 dev_err(chip_info->dev,
1256 "loopback Mode is configured incorrectly\n");
1257 return -EINVAL;
1258 }
1259 if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) 1598 if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
1260 || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { 1599 || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
1261 dev_err(chip_info->dev, 1600 dev_err(&pl022->adev->dev,
1262 "interface is configured incorrectly\n"); 1601 "interface is configured incorrectly\n");
1263 return -EINVAL; 1602 return -EINVAL;
1264 } 1603 }
1265 if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && 1604 if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
1266 (!pl022->vendor->unidir)) { 1605 (!pl022->vendor->unidir)) {
1267 dev_err(chip_info->dev, 1606 dev_err(&pl022->adev->dev,
1268 "unidirectional mode not supported in this " 1607 "unidirectional mode not supported in this "
1269 "hardware version\n"); 1608 "hardware version\n");
1270 return -EINVAL; 1609 return -EINVAL;
1271 } 1610 }
1272 if ((chip_info->hierarchy != SSP_MASTER) 1611 if ((chip_info->hierarchy != SSP_MASTER)
1273 && (chip_info->hierarchy != SSP_SLAVE)) { 1612 && (chip_info->hierarchy != SSP_SLAVE)) {
1274 dev_err(chip_info->dev, 1613 dev_err(&pl022->adev->dev,
1275 "hierarchy is configured incorrectly\n"); 1614 "hierarchy is configured incorrectly\n");
1276 return -EINVAL; 1615 return -EINVAL;
1277 } 1616 }
1278 if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN)
1279 || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) {
1280 dev_err(chip_info->dev,
1281 "cpsdvsr is configured incorrectly\n");
1282 return -EINVAL;
1283 }
1284 if ((chip_info->endian_rx != SSP_RX_MSB)
1285 && (chip_info->endian_rx != SSP_RX_LSB)) {
1286 dev_err(chip_info->dev,
1287 "RX FIFO endianess is configured incorrectly\n");
1288 return -EINVAL;
1289 }
1290 if ((chip_info->endian_tx != SSP_TX_MSB)
1291 && (chip_info->endian_tx != SSP_TX_LSB)) {
1292 dev_err(chip_info->dev,
1293 "TX FIFO endianess is configured incorrectly\n");
1294 return -EINVAL;
1295 }
1296 if ((chip_info->data_size < SSP_DATA_BITS_4)
1297 || (chip_info->data_size > SSP_DATA_BITS_32)) {
1298 dev_err(chip_info->dev,
1299 "DATA Size is configured incorrectly\n");
1300 return -EINVAL;
1301 }
1302 if ((chip_info->com_mode != INTERRUPT_TRANSFER) 1617 if ((chip_info->com_mode != INTERRUPT_TRANSFER)
1303 && (chip_info->com_mode != DMA_TRANSFER) 1618 && (chip_info->com_mode != DMA_TRANSFER)
1304 && (chip_info->com_mode != POLLING_TRANSFER)) { 1619 && (chip_info->com_mode != POLLING_TRANSFER)) {
1305 dev_err(chip_info->dev, 1620 dev_err(&pl022->adev->dev,
1306 "Communication mode is configured incorrectly\n"); 1621 "Communication mode is configured incorrectly\n");
1307 return -EINVAL; 1622 return -EINVAL;
1308 } 1623 }
1309 if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) 1624 if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM)
1310 || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { 1625 || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) {
1311 dev_err(chip_info->dev, 1626 dev_err(&pl022->adev->dev,
1312 "RX FIFO Trigger Level is configured incorrectly\n"); 1627 "RX FIFO Trigger Level is configured incorrectly\n");
1313 return -EINVAL; 1628 return -EINVAL;
1314 } 1629 }
1315 if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) 1630 if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC)
1316 || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { 1631 || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) {
1317 dev_err(chip_info->dev, 1632 dev_err(&pl022->adev->dev,
1318 "TX FIFO Trigger Level is configured incorrectly\n"); 1633 "TX FIFO Trigger Level is configured incorrectly\n");
1319 return -EINVAL; 1634 return -EINVAL;
1320 } 1635 }
1321 if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) {
1322 if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE)
1323 && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) {
1324 dev_err(chip_info->dev,
1325 "Clock Phase is configured incorrectly\n");
1326 return -EINVAL;
1327 }
1328 if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW)
1329 && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) {
1330 dev_err(chip_info->dev,
1331 "Clock Polarity is configured incorrectly\n");
1332 return -EINVAL;
1333 }
1334 }
1335 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { 1636 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
1336 if ((chip_info->ctrl_len < SSP_BITS_4) 1637 if ((chip_info->ctrl_len < SSP_BITS_4)
1337 || (chip_info->ctrl_len > SSP_BITS_32)) { 1638 || (chip_info->ctrl_len > SSP_BITS_32)) {
1338 dev_err(chip_info->dev, 1639 dev_err(&pl022->adev->dev,
1339 "CTRL LEN is configured incorrectly\n"); 1640 "CTRL LEN is configured incorrectly\n");
1340 return -EINVAL; 1641 return -EINVAL;
1341 } 1642 }
1342 if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) 1643 if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
1343 && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { 1644 && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
1344 dev_err(chip_info->dev, 1645 dev_err(&pl022->adev->dev,
1345 "Wait State is configured incorrectly\n"); 1646 "Wait State is configured incorrectly\n");
1346 return -EINVAL; 1647 return -EINVAL;
1347 } 1648 }
@@ -1350,24 +1651,20 @@ static int verify_controller_parameters(struct pl022 *pl022,
1350 if ((chip_info->duplex != 1651 if ((chip_info->duplex !=
1351 SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) 1652 SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1352 && (chip_info->duplex != 1653 && (chip_info->duplex !=
1353 SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) 1654 SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
1354 dev_err(chip_info->dev, 1655 dev_err(&pl022->adev->dev,
1355 "Microwire duplex mode is configured incorrectly\n"); 1656 "Microwire duplex mode is configured incorrectly\n");
1356 return -EINVAL; 1657 return -EINVAL;
1658 }
1357 } else { 1659 } else {
1358 if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) 1660 if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1359 dev_err(chip_info->dev, 1661 dev_err(&pl022->adev->dev,
1360 "Microwire half duplex mode requested," 1662 "Microwire half duplex mode requested,"
1361 " but this is only available in the" 1663 " but this is only available in the"
1362 " ST version of PL022\n"); 1664 " ST version of PL022\n");
1363 return -EINVAL; 1665 return -EINVAL;
1364 } 1666 }
1365 } 1667 }
1366 if (chip_info->cs_control == NULL) {
1367 dev_warn(chip_info->dev,
1368 "Chip Select Function is NULL for this chip\n");
1369 chip_info->cs_control = null_cs_control;
1370 }
1371 return 0; 1668 return 0;
1372} 1669}
1373 1670
@@ -1467,22 +1764,24 @@ static int calculate_effective_freq(struct pl022 *pl022,
1467 return 0; 1764 return 0;
1468} 1765}
1469 1766
1470/** 1767
1471 * NOT IMPLEMENTED 1768/*
1472 * process_dma_info - Processes the DMA info provided by client drivers 1769 * A piece of default chip info unless the platform
1473 * @chip_info: chip info provided by client device 1770 * supplies it.
1474 * @chip: Runtime state maintained by the SSP controller for each spi device
1475 *
1476 * This function processes and stores DMA config provided by client driver
1477 * into the runtime state maintained by the SSP controller driver
1478 */ 1771 */
1479static int process_dma_info(struct pl022_config_chip *chip_info, 1772static const struct pl022_config_chip pl022_default_chip_info = {
1480 struct chip_data *chip) 1773 .com_mode = POLLING_TRANSFER,
1481{ 1774 .iface = SSP_INTERFACE_MOTOROLA_SPI,
1482 dev_err(chip_info->dev, 1775 .hierarchy = SSP_SLAVE,
1483 "cannot process DMA info, DMA not implemented!\n"); 1776 .slave_tx_disable = DO_NOT_DRIVE_TX,
1484 return -ENOTSUPP; 1777 .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
1485} 1778 .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC,
1779 .ctrl_len = SSP_BITS_8,
1780 .wait_state = SSP_MWIRE_WAIT_ZERO,
1781 .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
1782 .cs_control = null_cs_control,
1783};
1784
1486 1785
1487/** 1786/**
1488 * pl022_setup - setup function registered to SPI master framework 1787 * pl022_setup - setup function registered to SPI master framework
@@ -1496,23 +1795,15 @@ static int process_dma_info(struct pl022_config_chip *chip_info,
1496 * controller hardware here, that is not done until the actual transfer 1795 * controller hardware here, that is not done until the actual transfer
1497 * commence. 1796 * commence.
1498 */ 1797 */
1499
1500/* FIXME: JUST GUESSING the spi->mode bits understood by this driver */
1501#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
1502 | SPI_LSB_FIRST | SPI_LOOP)
1503
1504static int pl022_setup(struct spi_device *spi) 1798static int pl022_setup(struct spi_device *spi)
1505{ 1799{
1506 struct pl022_config_chip *chip_info; 1800 struct pl022_config_chip const *chip_info;
1507 struct chip_data *chip; 1801 struct chip_data *chip;
1802 struct ssp_clock_params clk_freq;
1508 int status = 0; 1803 int status = 0;
1509 struct pl022 *pl022 = spi_master_get_devdata(spi->master); 1804 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1510 1805 unsigned int bits = spi->bits_per_word;
1511 if (spi->mode & ~MODEBITS) { 1806 u32 tmp;
1512 dev_dbg(&spi->dev, "unsupported mode bits %x\n",
1513 spi->mode & ~MODEBITS);
1514 return -EINVAL;
1515 }
1516 1807
1517 if (!spi->max_speed_hz) 1808 if (!spi->max_speed_hz)
1518 return -EINVAL; 1809 return -EINVAL;
@@ -1535,48 +1826,13 @@ static int pl022_setup(struct spi_device *spi)
1535 chip_info = spi->controller_data; 1826 chip_info = spi->controller_data;
1536 1827
1537 if (chip_info == NULL) { 1828 if (chip_info == NULL) {
1829 chip_info = &pl022_default_chip_info;
1538 /* spi_board_info.controller_data not is supplied */ 1830 /* spi_board_info.controller_data not is supplied */
1539 dev_dbg(&spi->dev, 1831 dev_dbg(&spi->dev,
1540 "using default controller_data settings\n"); 1832 "using default controller_data settings\n");
1541 1833 } else
1542 chip_info =
1543 kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL);
1544
1545 if (!chip_info) {
1546 dev_err(&spi->dev,
1547 "cannot allocate controller data\n");
1548 status = -ENOMEM;
1549 goto err_first_setup;
1550 }
1551
1552 dev_dbg(&spi->dev, "allocated memory for controller data\n");
1553
1554 /* Pointer back to the SPI device */
1555 chip_info->dev = &spi->dev;
1556 /*
1557 * Set controller data default values:
1558 * Polling is supported by default
1559 */
1560 chip_info->lbm = LOOPBACK_DISABLED;
1561 chip_info->com_mode = POLLING_TRANSFER;
1562 chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI;
1563 chip_info->hierarchy = SSP_SLAVE;
1564 chip_info->slave_tx_disable = DO_NOT_DRIVE_TX;
1565 chip_info->endian_tx = SSP_TX_LSB;
1566 chip_info->endian_rx = SSP_RX_LSB;
1567 chip_info->data_size = SSP_DATA_BITS_12;
1568 chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM;
1569 chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC;
1570 chip_info->clk_phase = SSP_CLK_SECOND_EDGE;
1571 chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW;
1572 chip_info->ctrl_len = SSP_BITS_8;
1573 chip_info->wait_state = SSP_MWIRE_WAIT_ZERO;
1574 chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX;
1575 chip_info->cs_control = null_cs_control;
1576 } else {
1577 dev_dbg(&spi->dev, 1834 dev_dbg(&spi->dev,
1578 "using user supplied controller_data settings\n"); 1835 "using user supplied controller_data settings\n");
1579 }
1580 1836
1581 /* 1837 /*
1582 * We can override with custom divisors, else we use the board 1838 * We can override with custom divisors, else we use the board
@@ -1586,29 +1842,48 @@ static int pl022_setup(struct spi_device *spi)
1586 && (0 == chip_info->clk_freq.scr)) { 1842 && (0 == chip_info->clk_freq.scr)) {
1587 status = calculate_effective_freq(pl022, 1843 status = calculate_effective_freq(pl022,
1588 spi->max_speed_hz, 1844 spi->max_speed_hz,
1589 &chip_info->clk_freq); 1845 &clk_freq);
1590 if (status < 0) 1846 if (status < 0)
1591 goto err_config_params; 1847 goto err_config_params;
1592 } else { 1848 } else {
1593 if ((chip_info->clk_freq.cpsdvsr % 2) != 0) 1849 memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq));
1594 chip_info->clk_freq.cpsdvsr = 1850 if ((clk_freq.cpsdvsr % 2) != 0)
1595 chip_info->clk_freq.cpsdvsr - 1; 1851 clk_freq.cpsdvsr =
1852 clk_freq.cpsdvsr - 1;
1853 }
1854 if ((clk_freq.cpsdvsr < CPSDVR_MIN)
1855 || (clk_freq.cpsdvsr > CPSDVR_MAX)) {
1856 dev_err(&spi->dev,
1857 "cpsdvsr is configured incorrectly\n");
1858 goto err_config_params;
1596 } 1859 }
1860
1861
1597 status = verify_controller_parameters(pl022, chip_info); 1862 status = verify_controller_parameters(pl022, chip_info);
1598 if (status) { 1863 if (status) {
1599 dev_err(&spi->dev, "controller data is incorrect"); 1864 dev_err(&spi->dev, "controller data is incorrect");
1600 goto err_config_params; 1865 goto err_config_params;
1601 } 1866 }
1867
1602 /* Now set controller state based on controller data */ 1868 /* Now set controller state based on controller data */
1603 chip->xfer_type = chip_info->com_mode; 1869 chip->xfer_type = chip_info->com_mode;
1604 chip->cs_control = chip_info->cs_control; 1870 if (!chip_info->cs_control) {
1605 1871 chip->cs_control = null_cs_control;
1606 if (chip_info->data_size <= 8) { 1872 dev_warn(&spi->dev,
1607 dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n"); 1873 "chip select function is NULL for this chip\n");
1874 } else
1875 chip->cs_control = chip_info->cs_control;
1876
1877 if (bits <= 3) {
1878 /* PL022 doesn't support less than 4-bits */
1879 status = -ENOTSUPP;
1880 goto err_config_params;
1881 } else if (bits <= 8) {
1882 dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
1608 chip->n_bytes = 1; 1883 chip->n_bytes = 1;
1609 chip->read = READING_U8; 1884 chip->read = READING_U8;
1610 chip->write = WRITING_U8; 1885 chip->write = WRITING_U8;
1611 } else if (chip_info->data_size <= 16) { 1886 } else if (bits <= 16) {
1612 dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); 1887 dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
1613 chip->n_bytes = 2; 1888 chip->n_bytes = 2;
1614 chip->read = READING_U16; 1889 chip->read = READING_U16;
@@ -1625,6 +1900,7 @@ static int pl022_setup(struct spi_device *spi)
1625 dev_err(&spi->dev, 1900 dev_err(&spi->dev,
1626 "a standard pl022 can only handle " 1901 "a standard pl022 can only handle "
1627 "1 <= n <= 16 bit words\n"); 1902 "1 <= n <= 16 bit words\n");
1903 status = -ENOTSUPP;
1628 goto err_config_params; 1904 goto err_config_params;
1629 } 1905 }
1630 } 1906 }
@@ -1636,9 +1912,8 @@ static int pl022_setup(struct spi_device *spi)
1636 chip->cpsr = 0; 1912 chip->cpsr = 0;
1637 if ((chip_info->com_mode == DMA_TRANSFER) 1913 if ((chip_info->com_mode == DMA_TRANSFER)
1638 && ((pl022->master_info)->enable_dma)) { 1914 && ((pl022->master_info)->enable_dma)) {
1639 chip->enable_dma = 1; 1915 chip->enable_dma = true;
1640 dev_dbg(&spi->dev, "DMA mode set in controller state\n"); 1916 dev_dbg(&spi->dev, "DMA mode set in controller state\n");
1641 status = process_dma_info(chip_info, chip);
1642 if (status < 0) 1917 if (status < 0)
1643 goto err_config_params; 1918 goto err_config_params;
1644 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, 1919 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
@@ -1646,7 +1921,7 @@ static int pl022_setup(struct spi_device *spi)
1646 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, 1921 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1647 SSP_DMACR_MASK_TXDMAE, 1); 1922 SSP_DMACR_MASK_TXDMAE, 1);
1648 } else { 1923 } else {
1649 chip->enable_dma = 0; 1924 chip->enable_dma = false;
1650 dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); 1925 dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
1651 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, 1926 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1652 SSP_DMACR_MASK_RXDMAE, 0); 1927 SSP_DMACR_MASK_RXDMAE, 0);
@@ -1654,10 +1929,12 @@ static int pl022_setup(struct spi_device *spi)
1654 SSP_DMACR_MASK_TXDMAE, 1); 1929 SSP_DMACR_MASK_TXDMAE, 1);
1655 } 1930 }
1656 1931
1657 chip->cpsr = chip_info->clk_freq.cpsdvsr; 1932 chip->cpsr = clk_freq.cpsdvsr;
1658 1933
1659 /* Special setup for the ST micro extended control registers */ 1934 /* Special setup for the ST micro extended control registers */
1660 if (pl022->vendor->extended_cr) { 1935 if (pl022->vendor->extended_cr) {
1936 u32 etx;
1937
1661 if (pl022->vendor->pl023) { 1938 if (pl022->vendor->pl023) {
1662 /* These bits are only in the PL023 */ 1939 /* These bits are only in the PL023 */
1663 SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, 1940 SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay,
@@ -1673,29 +1950,51 @@ static int pl022_setup(struct spi_device *spi)
1673 SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, 1950 SSP_WRITE_BITS(chip->cr1, chip_info->wait_state,
1674 SSP_CR1_MASK_MWAIT_ST, 6); 1951 SSP_CR1_MASK_MWAIT_ST, 6);
1675 } 1952 }
1676 SSP_WRITE_BITS(chip->cr0, chip_info->data_size, 1953 SSP_WRITE_BITS(chip->cr0, bits - 1,
1677 SSP_CR0_MASK_DSS_ST, 0); 1954 SSP_CR0_MASK_DSS_ST, 0);
1678 SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, 1955
1679 SSP_CR1_MASK_RENDN_ST, 4); 1956 if (spi->mode & SPI_LSB_FIRST) {
1680 SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, 1957 tmp = SSP_RX_LSB;
1681 SSP_CR1_MASK_TENDN_ST, 5); 1958 etx = SSP_TX_LSB;
1959 } else {
1960 tmp = SSP_RX_MSB;
1961 etx = SSP_TX_MSB;
1962 }
1963 SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4);
1964 SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5);
1682 SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, 1965 SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig,
1683 SSP_CR1_MASK_RXIFLSEL_ST, 7); 1966 SSP_CR1_MASK_RXIFLSEL_ST, 7);
1684 SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, 1967 SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig,
1685 SSP_CR1_MASK_TXIFLSEL_ST, 10); 1968 SSP_CR1_MASK_TXIFLSEL_ST, 10);
1686 } else { 1969 } else {
1687 SSP_WRITE_BITS(chip->cr0, chip_info->data_size, 1970 SSP_WRITE_BITS(chip->cr0, bits - 1,
1688 SSP_CR0_MASK_DSS, 0); 1971 SSP_CR0_MASK_DSS, 0);
1689 SSP_WRITE_BITS(chip->cr0, chip_info->iface, 1972 SSP_WRITE_BITS(chip->cr0, chip_info->iface,
1690 SSP_CR0_MASK_FRF, 4); 1973 SSP_CR0_MASK_FRF, 4);
1691 } 1974 }
1975
1692 /* Stuff that is common for all versions */ 1976 /* Stuff that is common for all versions */
1693 SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6); 1977 if (spi->mode & SPI_CPOL)
1694 SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7); 1978 tmp = SSP_CLK_POL_IDLE_HIGH;
1695 SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8); 1979 else
1980 tmp = SSP_CLK_POL_IDLE_LOW;
1981 SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6);
1982
1983 if (spi->mode & SPI_CPHA)
1984 tmp = SSP_CLK_SECOND_EDGE;
1985 else
1986 tmp = SSP_CLK_FIRST_EDGE;
1987 SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7);
1988
1989 SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8);
1696 /* Loopback is available on all versions except PL023 */ 1990 /* Loopback is available on all versions except PL023 */
1697 if (!pl022->vendor->pl023) 1991 if (!pl022->vendor->pl023) {
1698 SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0); 1992 if (spi->mode & SPI_LOOP)
1993 tmp = LOOPBACK_ENABLED;
1994 else
1995 tmp = LOOPBACK_DISABLED;
1996 SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0);
1997 }
1699 SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); 1998 SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
1700 SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); 1999 SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
1701 SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); 2000 SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3);
@@ -1704,7 +2003,7 @@ static int pl022_setup(struct spi_device *spi)
1704 spi_set_ctldata(spi, chip); 2003 spi_set_ctldata(spi, chip);
1705 return status; 2004 return status;
1706 err_config_params: 2005 err_config_params:
1707 err_first_setup: 2006 spi_set_ctldata(spi, NULL);
1708 kfree(chip); 2007 kfree(chip);
1709 return status; 2008 return status;
1710} 2009}
@@ -1766,12 +2065,21 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
1766 master->setup = pl022_setup; 2065 master->setup = pl022_setup;
1767 master->transfer = pl022_transfer; 2066 master->transfer = pl022_transfer;
1768 2067
2068 /*
2069 * Supports mode 0-3, loopback, and active low CS. Transfers are
2070 * always MS bit first on the original pl022.
2071 */
2072 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
2073 if (pl022->vendor->extended_cr)
2074 master->mode_bits |= SPI_LSB_FIRST;
2075
1769 dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); 2076 dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
1770 2077
1771 status = amba_request_regions(adev, NULL); 2078 status = amba_request_regions(adev, NULL);
1772 if (status) 2079 if (status)
1773 goto err_no_ioregion; 2080 goto err_no_ioregion;
1774 2081
2082 pl022->phybase = adev->res.start;
1775 pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); 2083 pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
1776 if (pl022->virtbase == NULL) { 2084 if (pl022->virtbase == NULL) {
1777 status = -ENOMEM; 2085 status = -ENOMEM;
@@ -1798,6 +2106,14 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
1798 dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); 2106 dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
1799 goto err_no_irq; 2107 goto err_no_irq;
1800 } 2108 }
2109
2110 /* Get DMA channels */
2111 if (platform_info->enable_dma) {
2112 status = pl022_dma_probe(pl022);
2113 if (status != 0)
2114 goto err_no_dma;
2115 }
2116
1801 /* Initialize and start queue */ 2117 /* Initialize and start queue */
1802 status = init_queue(pl022); 2118 status = init_queue(pl022);
1803 if (status != 0) { 2119 if (status != 0) {
@@ -1826,6 +2142,8 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
1826 err_start_queue: 2142 err_start_queue:
1827 err_init_queue: 2143 err_init_queue:
1828 destroy_queue(pl022); 2144 destroy_queue(pl022);
2145 pl022_dma_remove(pl022);
2146 err_no_dma:
1829 free_irq(adev->irq[0], pl022); 2147 free_irq(adev->irq[0], pl022);
1830 err_no_irq: 2148 err_no_irq:
1831 clk_put(pl022->clk); 2149 clk_put(pl022->clk);
@@ -1856,6 +2174,7 @@ pl022_remove(struct amba_device *adev)
1856 return status; 2174 return status;
1857 } 2175 }
1858 load_ssp_default_config(pl022); 2176 load_ssp_default_config(pl022);
2177 pl022_dma_remove(pl022);
1859 free_irq(adev->irq[0], pl022); 2178 free_irq(adev->irq[0], pl022);
1860 clk_disable(pl022->clk); 2179 clk_disable(pl022->clk);
1861 clk_put(pl022->clk); 2180 clk_put(pl022->clk);
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index c4e04428992d..154529aacc03 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -654,6 +654,8 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
654 struct spi_transfer *xfer; 654 struct spi_transfer *xfer;
655 unsigned long flags; 655 unsigned long flags;
656 struct device *controller = spi->master->dev.parent; 656 struct device *controller = spi->master->dev.parent;
657 u8 bits;
658 struct atmel_spi_device *asd;
657 659
658 as = spi_master_get_devdata(spi->master); 660 as = spi_master_get_devdata(spi->master);
659 661
@@ -672,8 +674,18 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
672 return -EINVAL; 674 return -EINVAL;
673 } 675 }
674 676
677 if (xfer->bits_per_word) {
678 asd = spi->controller_state;
679 bits = (asd->csr >> 4) & 0xf;
680 if (bits != xfer->bits_per_word - 8) {
681 dev_dbg(&spi->dev, "you can't yet change "
682 "bits_per_word in transfers\n");
683 return -ENOPROTOOPT;
684 }
685 }
686
675 /* FIXME implement these protocol options!! */ 687 /* FIXME implement these protocol options!! */
676 if (xfer->bits_per_word || xfer->speed_hz) { 688 if (xfer->speed_hz) {
677 dev_dbg(&spi->dev, "no protocol options yet\n"); 689 dev_dbg(&spi->dev, "no protocol options yet\n");
678 return -ENOPROTOOPT; 690 return -ENOPROTOOPT;
679 } 691 }
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index b3a94ca0a75a..2a651e61bfbf 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -296,6 +296,19 @@ static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
296 return 0; 296 return 0;
297} 297}
298 298
299static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
300{
301 unsigned long timeout;
302
303 timeout = jiffies + msecs_to_jiffies(1000);
304 while (!(__raw_readl(reg) & bit)) {
305 if (time_after(jiffies, timeout))
306 return -1;
307 cpu_relax();
308 }
309 return 0;
310}
311
299static unsigned 312static unsigned
300omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) 313omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
301{ 314{
@@ -309,11 +322,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
309 u32 l; 322 u32 l;
310 u8 * rx; 323 u8 * rx;
311 const u8 * tx; 324 const u8 * tx;
325 void __iomem *chstat_reg;
312 326
313 mcspi = spi_master_get_devdata(spi->master); 327 mcspi = spi_master_get_devdata(spi->master);
314 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 328 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
315 l = mcspi_cached_chconf0(spi); 329 l = mcspi_cached_chconf0(spi);
316 330
331 chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
332
317 count = xfer->len; 333 count = xfer->len;
318 c = count; 334 c = count;
319 word_len = cs->word_len; 335 word_len = cs->word_len;
@@ -382,6 +398,16 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
382 if (tx != NULL) { 398 if (tx != NULL) {
383 wait_for_completion(&mcspi_dma->dma_tx_completion); 399 wait_for_completion(&mcspi_dma->dma_tx_completion);
384 dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE); 400 dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE);
401
402 /* for TX_ONLY mode, be sure all words have shifted out */
403 if (rx == NULL) {
404 if (mcspi_wait_for_reg_bit(chstat_reg,
405 OMAP2_MCSPI_CHSTAT_TXS) < 0)
406 dev_err(&spi->dev, "TXS timed out\n");
407 else if (mcspi_wait_for_reg_bit(chstat_reg,
408 OMAP2_MCSPI_CHSTAT_EOT) < 0)
409 dev_err(&spi->dev, "EOT timed out\n");
410 }
385 } 411 }
386 412
387 if (rx != NULL) { 413 if (rx != NULL) {
@@ -435,19 +461,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
435 return count; 461 return count;
436} 462}
437 463
438static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
439{
440 unsigned long timeout;
441
442 timeout = jiffies + msecs_to_jiffies(1000);
443 while (!(__raw_readl(reg) & bit)) {
444 if (time_after(jiffies, timeout))
445 return -1;
446 cpu_relax();
447 }
448 return 0;
449}
450
451static unsigned 464static unsigned
452omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) 465omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
453{ 466{
@@ -489,10 +502,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
489 dev_err(&spi->dev, "TXS timed out\n"); 502 dev_err(&spi->dev, "TXS timed out\n");
490 goto out; 503 goto out;
491 } 504 }
492#ifdef VERBOSE 505 dev_vdbg(&spi->dev, "write-%d %02x\n",
493 dev_dbg(&spi->dev, "write-%d %02x\n",
494 word_len, *tx); 506 word_len, *tx);
495#endif
496 __raw_writel(*tx++, tx_reg); 507 __raw_writel(*tx++, tx_reg);
497 } 508 }
498 if (rx != NULL) { 509 if (rx != NULL) {
@@ -506,10 +517,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
506 (l & OMAP2_MCSPI_CHCONF_TURBO)) { 517 (l & OMAP2_MCSPI_CHCONF_TURBO)) {
507 omap2_mcspi_set_enable(spi, 0); 518 omap2_mcspi_set_enable(spi, 0);
508 *rx++ = __raw_readl(rx_reg); 519 *rx++ = __raw_readl(rx_reg);
509#ifdef VERBOSE 520 dev_vdbg(&spi->dev, "read-%d %02x\n",
510 dev_dbg(&spi->dev, "read-%d %02x\n",
511 word_len, *(rx - 1)); 521 word_len, *(rx - 1));
512#endif
513 if (mcspi_wait_for_reg_bit(chstat_reg, 522 if (mcspi_wait_for_reg_bit(chstat_reg,
514 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 523 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
515 dev_err(&spi->dev, 524 dev_err(&spi->dev,
@@ -522,10 +531,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
522 } 531 }
523 532
524 *rx++ = __raw_readl(rx_reg); 533 *rx++ = __raw_readl(rx_reg);
525#ifdef VERBOSE 534 dev_vdbg(&spi->dev, "read-%d %02x\n",
526 dev_dbg(&spi->dev, "read-%d %02x\n",
527 word_len, *(rx - 1)); 535 word_len, *(rx - 1));
528#endif
529 } 536 }
530 } while (c); 537 } while (c);
531 } else if (word_len <= 16) { 538 } else if (word_len <= 16) {
@@ -542,10 +549,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
542 dev_err(&spi->dev, "TXS timed out\n"); 549 dev_err(&spi->dev, "TXS timed out\n");
543 goto out; 550 goto out;
544 } 551 }
545#ifdef VERBOSE 552 dev_vdbg(&spi->dev, "write-%d %04x\n",
546 dev_dbg(&spi->dev, "write-%d %04x\n",
547 word_len, *tx); 553 word_len, *tx);
548#endif
549 __raw_writel(*tx++, tx_reg); 554 __raw_writel(*tx++, tx_reg);
550 } 555 }
551 if (rx != NULL) { 556 if (rx != NULL) {
@@ -559,10 +564,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
559 (l & OMAP2_MCSPI_CHCONF_TURBO)) { 564 (l & OMAP2_MCSPI_CHCONF_TURBO)) {
560 omap2_mcspi_set_enable(spi, 0); 565 omap2_mcspi_set_enable(spi, 0);
561 *rx++ = __raw_readl(rx_reg); 566 *rx++ = __raw_readl(rx_reg);
562#ifdef VERBOSE 567 dev_vdbg(&spi->dev, "read-%d %04x\n",
563 dev_dbg(&spi->dev, "read-%d %04x\n",
564 word_len, *(rx - 1)); 568 word_len, *(rx - 1));
565#endif
566 if (mcspi_wait_for_reg_bit(chstat_reg, 569 if (mcspi_wait_for_reg_bit(chstat_reg,
567 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 570 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
568 dev_err(&spi->dev, 571 dev_err(&spi->dev,
@@ -575,10 +578,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
575 } 578 }
576 579
577 *rx++ = __raw_readl(rx_reg); 580 *rx++ = __raw_readl(rx_reg);
578#ifdef VERBOSE 581 dev_vdbg(&spi->dev, "read-%d %04x\n",
579 dev_dbg(&spi->dev, "read-%d %04x\n",
580 word_len, *(rx - 1)); 582 word_len, *(rx - 1));
581#endif
582 } 583 }
583 } while (c); 584 } while (c);
584 } else if (word_len <= 32) { 585 } else if (word_len <= 32) {
@@ -595,10 +596,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
595 dev_err(&spi->dev, "TXS timed out\n"); 596 dev_err(&spi->dev, "TXS timed out\n");
596 goto out; 597 goto out;
597 } 598 }
598#ifdef VERBOSE 599 dev_vdbg(&spi->dev, "write-%d %08x\n",
599 dev_dbg(&spi->dev, "write-%d %08x\n",
600 word_len, *tx); 600 word_len, *tx);
601#endif
602 __raw_writel(*tx++, tx_reg); 601 __raw_writel(*tx++, tx_reg);
603 } 602 }
604 if (rx != NULL) { 603 if (rx != NULL) {
@@ -612,10 +611,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
612 (l & OMAP2_MCSPI_CHCONF_TURBO)) { 611 (l & OMAP2_MCSPI_CHCONF_TURBO)) {
613 omap2_mcspi_set_enable(spi, 0); 612 omap2_mcspi_set_enable(spi, 0);
614 *rx++ = __raw_readl(rx_reg); 613 *rx++ = __raw_readl(rx_reg);
615#ifdef VERBOSE 614 dev_vdbg(&spi->dev, "read-%d %08x\n",
616 dev_dbg(&spi->dev, "read-%d %08x\n",
617 word_len, *(rx - 1)); 615 word_len, *(rx - 1));
618#endif
619 if (mcspi_wait_for_reg_bit(chstat_reg, 616 if (mcspi_wait_for_reg_bit(chstat_reg,
620 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 617 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
621 dev_err(&spi->dev, 618 dev_err(&spi->dev,
@@ -628,10 +625,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
628 } 625 }
629 626
630 *rx++ = __raw_readl(rx_reg); 627 *rx++ = __raw_readl(rx_reg);
631#ifdef VERBOSE 628 dev_vdbg(&spi->dev, "read-%d %08x\n",
632 dev_dbg(&spi->dev, "read-%d %08x\n",
633 word_len, *(rx - 1)); 629 word_len, *(rx - 1));
634#endif
635 } 630 }
636 } while (c); 631 } while (c);
637 } 632 }
@@ -644,6 +639,12 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
644 } else if (mcspi_wait_for_reg_bit(chstat_reg, 639 } else if (mcspi_wait_for_reg_bit(chstat_reg,
645 OMAP2_MCSPI_CHSTAT_EOT) < 0) 640 OMAP2_MCSPI_CHSTAT_EOT) < 0)
646 dev_err(&spi->dev, "EOT timed out\n"); 641 dev_err(&spi->dev, "EOT timed out\n");
642
643 /* disable chan to purge rx datas received in TX_ONLY transfer,
644 * otherwise these rx datas will affect the direct following
645 * RX_ONLY transfer.
646 */
647 omap2_mcspi_set_enable(spi, 0);
647 } 648 }
648out: 649out:
649 omap2_mcspi_set_enable(spi, 1); 650 omap2_mcspi_set_enable(spi, 1);
diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c
index 3aea50da7b29..0b677dc041ad 100644
--- a/drivers/spi/orion_spi.c
+++ b/drivers/spi/orion_spi.c
@@ -404,7 +404,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m)
404 goto msg_rejected; 404 goto msg_rejected;
405 } 405 }
406 406
407 if ((t != NULL) && t->bits_per_word) 407 if (t->bits_per_word)
408 bits_per_word = t->bits_per_word; 408 bits_per_word = t->bits_per_word;
409 409
410 if ((bits_per_word != 8) && (bits_per_word != 16)) { 410 if ((bits_per_word != 8) && (bits_per_word != 16)) {
@@ -415,7 +415,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m)
415 goto msg_rejected; 415 goto msg_rejected;
416 } 416 }
417 /*make sure buffer length is even when working in 16 bit mode*/ 417 /*make sure buffer length is even when working in 16 bit mode*/
418 if ((t != NULL) && (t->bits_per_word == 16) && (t->len & 1)) { 418 if ((t->bits_per_word == 16) && (t->len & 1)) {
419 dev_err(&spi->dev, 419 dev_err(&spi->dev,
420 "message rejected : " 420 "message rejected : "
421 "odd data length (%d) while in 16 bit mode\n", 421 "odd data length (%d) while in 16 bit mode\n",
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index 10a6dc3d37ac..ab483a0ec6d0 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Blackfin On-Chip SPI Driver 2 * Blackfin On-Chip SPI Driver
3 * 3 *
4 * Copyright 2004-2007 Analog Devices Inc. 4 * Copyright 2004-2010 Analog Devices Inc.
5 * 5 *
6 * Enter bugs at http://blackfin.uclinux.org/ 6 * Enter bugs at http://blackfin.uclinux.org/
7 * 7 *
@@ -41,13 +41,16 @@ MODULE_LICENSE("GPL");
41#define RUNNING_STATE ((void *)1) 41#define RUNNING_STATE ((void *)1)
42#define DONE_STATE ((void *)2) 42#define DONE_STATE ((void *)2)
43#define ERROR_STATE ((void *)-1) 43#define ERROR_STATE ((void *)-1)
44#define QUEUE_RUNNING 0
45#define QUEUE_STOPPED 1
46 44
47/* Value to send if no TX value is supplied */ 45struct bfin_spi_master_data;
48#define SPI_IDLE_TXVAL 0x0000
49 46
50struct driver_data { 47struct bfin_spi_transfer_ops {
48 void (*write) (struct bfin_spi_master_data *);
49 void (*read) (struct bfin_spi_master_data *);
50 void (*duplex) (struct bfin_spi_master_data *);
51};
52
53struct bfin_spi_master_data {
51 /* Driver model hookup */ 54 /* Driver model hookup */
52 struct platform_device *pdev; 55 struct platform_device *pdev;
53 56
@@ -69,7 +72,7 @@ struct driver_data {
69 spinlock_t lock; 72 spinlock_t lock;
70 struct list_head queue; 73 struct list_head queue;
71 int busy; 74 int busy;
72 int run; 75 bool running;
73 76
74 /* Message Transfer pump */ 77 /* Message Transfer pump */
75 struct tasklet_struct pump_transfers; 78 struct tasklet_struct pump_transfers;
@@ -77,7 +80,7 @@ struct driver_data {
77 /* Current message transfer state info */ 80 /* Current message transfer state info */
78 struct spi_message *cur_msg; 81 struct spi_message *cur_msg;
79 struct spi_transfer *cur_transfer; 82 struct spi_transfer *cur_transfer;
80 struct chip_data *cur_chip; 83 struct bfin_spi_slave_data *cur_chip;
81 size_t len_in_bytes; 84 size_t len_in_bytes;
82 size_t len; 85 size_t len;
83 void *tx; 86 void *tx;
@@ -92,38 +95,37 @@ struct driver_data {
92 dma_addr_t rx_dma; 95 dma_addr_t rx_dma;
93 dma_addr_t tx_dma; 96 dma_addr_t tx_dma;
94 97
98 int irq_requested;
99 int spi_irq;
100
95 size_t rx_map_len; 101 size_t rx_map_len;
96 size_t tx_map_len; 102 size_t tx_map_len;
97 u8 n_bytes; 103 u8 n_bytes;
104 u16 ctrl_reg;
105 u16 flag_reg;
106
98 int cs_change; 107 int cs_change;
99 void (*write) (struct driver_data *); 108 const struct bfin_spi_transfer_ops *ops;
100 void (*read) (struct driver_data *);
101 void (*duplex) (struct driver_data *);
102}; 109};
103 110
104struct chip_data { 111struct bfin_spi_slave_data {
105 u16 ctl_reg; 112 u16 ctl_reg;
106 u16 baud; 113 u16 baud;
107 u16 flag; 114 u16 flag;
108 115
109 u8 chip_select_num; 116 u8 chip_select_num;
110 u8 n_bytes;
111 u8 width; /* 0 or 1 */
112 u8 enable_dma; 117 u8 enable_dma;
113 u8 bits_per_word; /* 8 or 16 */
114 u8 cs_change_per_word;
115 u16 cs_chg_udelay; /* Some devices require > 255usec delay */ 118 u16 cs_chg_udelay; /* Some devices require > 255usec delay */
116 u32 cs_gpio; 119 u32 cs_gpio;
117 u16 idle_tx_val; 120 u16 idle_tx_val;
118 void (*write) (struct driver_data *); 121 u8 pio_interrupt; /* use spi data irq */
119 void (*read) (struct driver_data *); 122 const struct bfin_spi_transfer_ops *ops;
120 void (*duplex) (struct driver_data *);
121}; 123};
122 124
123#define DEFINE_SPI_REG(reg, off) \ 125#define DEFINE_SPI_REG(reg, off) \
124static inline u16 read_##reg(struct driver_data *drv_data) \ 126static inline u16 read_##reg(struct bfin_spi_master_data *drv_data) \
125 { return bfin_read16(drv_data->regs_base + off); } \ 127 { return bfin_read16(drv_data->regs_base + off); } \
126static inline void write_##reg(struct driver_data *drv_data, u16 v) \ 128static inline void write_##reg(struct bfin_spi_master_data *drv_data, u16 v) \
127 { bfin_write16(drv_data->regs_base + off, v); } 129 { bfin_write16(drv_data->regs_base + off, v); }
128 130
129DEFINE_SPI_REG(CTRL, 0x00) 131DEFINE_SPI_REG(CTRL, 0x00)
@@ -134,7 +136,7 @@ DEFINE_SPI_REG(RDBR, 0x10)
134DEFINE_SPI_REG(BAUD, 0x14) 136DEFINE_SPI_REG(BAUD, 0x14)
135DEFINE_SPI_REG(SHAW, 0x18) 137DEFINE_SPI_REG(SHAW, 0x18)
136 138
137static void bfin_spi_enable(struct driver_data *drv_data) 139static void bfin_spi_enable(struct bfin_spi_master_data *drv_data)
138{ 140{
139 u16 cr; 141 u16 cr;
140 142
@@ -142,7 +144,7 @@ static void bfin_spi_enable(struct driver_data *drv_data)
142 write_CTRL(drv_data, (cr | BIT_CTL_ENABLE)); 144 write_CTRL(drv_data, (cr | BIT_CTL_ENABLE));
143} 145}
144 146
145static void bfin_spi_disable(struct driver_data *drv_data) 147static void bfin_spi_disable(struct bfin_spi_master_data *drv_data)
146{ 148{
147 u16 cr; 149 u16 cr;
148 150
@@ -165,7 +167,7 @@ static u16 hz_to_spi_baud(u32 speed_hz)
165 return spi_baud; 167 return spi_baud;
166} 168}
167 169
168static int bfin_spi_flush(struct driver_data *drv_data) 170static int bfin_spi_flush(struct bfin_spi_master_data *drv_data)
169{ 171{
170 unsigned long limit = loops_per_jiffy << 1; 172 unsigned long limit = loops_per_jiffy << 1;
171 173
@@ -179,13 +181,12 @@ static int bfin_spi_flush(struct driver_data *drv_data)
179} 181}
180 182
181/* Chip select operation functions for cs_change flag */ 183/* Chip select operation functions for cs_change flag */
182static void bfin_spi_cs_active(struct driver_data *drv_data, struct chip_data *chip) 184static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip)
183{ 185{
184 if (likely(chip->chip_select_num)) { 186 if (likely(chip->chip_select_num < MAX_CTRL_CS)) {
185 u16 flag = read_FLAG(drv_data); 187 u16 flag = read_FLAG(drv_data);
186 188
187 flag |= chip->flag; 189 flag &= ~chip->flag;
188 flag &= ~(chip->flag << 8);
189 190
190 write_FLAG(drv_data, flag); 191 write_FLAG(drv_data, flag);
191 } else { 192 } else {
@@ -193,13 +194,13 @@ static void bfin_spi_cs_active(struct driver_data *drv_data, struct chip_data *c
193 } 194 }
194} 195}
195 196
196static void bfin_spi_cs_deactive(struct driver_data *drv_data, struct chip_data *chip) 197static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data,
198 struct bfin_spi_slave_data *chip)
197{ 199{
198 if (likely(chip->chip_select_num)) { 200 if (likely(chip->chip_select_num < MAX_CTRL_CS)) {
199 u16 flag = read_FLAG(drv_data); 201 u16 flag = read_FLAG(drv_data);
200 202
201 flag &= ~chip->flag; 203 flag |= chip->flag;
202 flag |= (chip->flag << 8);
203 204
204 write_FLAG(drv_data, flag); 205 write_FLAG(drv_data, flag);
205 } else { 206 } else {
@@ -211,16 +212,43 @@ static void bfin_spi_cs_deactive(struct driver_data *drv_data, struct chip_data
211 udelay(chip->cs_chg_udelay); 212 udelay(chip->cs_chg_udelay);
212} 213}
213 214
215/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */
216static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data,
217 struct bfin_spi_slave_data *chip)
218{
219 if (chip->chip_select_num < MAX_CTRL_CS) {
220 u16 flag = read_FLAG(drv_data);
221
222 flag |= (chip->flag >> 8);
223
224 write_FLAG(drv_data, flag);
225 }
226}
227
228static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data,
229 struct bfin_spi_slave_data *chip)
230{
231 if (chip->chip_select_num < MAX_CTRL_CS) {
232 u16 flag = read_FLAG(drv_data);
233
234 flag &= ~(chip->flag >> 8);
235
236 write_FLAG(drv_data, flag);
237 }
238}
239
214/* stop controller and re-config current chip*/ 240/* stop controller and re-config current chip*/
215static void bfin_spi_restore_state(struct driver_data *drv_data) 241static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data)
216{ 242{
217 struct chip_data *chip = drv_data->cur_chip; 243 struct bfin_spi_slave_data *chip = drv_data->cur_chip;
218 244
219 /* Clear status and disable clock */ 245 /* Clear status and disable clock */
220 write_STAT(drv_data, BIT_STAT_CLR); 246 write_STAT(drv_data, BIT_STAT_CLR);
221 bfin_spi_disable(drv_data); 247 bfin_spi_disable(drv_data);
222 dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); 248 dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n");
223 249
250 SSYNC();
251
224 /* Load the registers */ 252 /* Load the registers */
225 write_CTRL(drv_data, chip->ctl_reg); 253 write_CTRL(drv_data, chip->ctl_reg);
226 write_BAUD(drv_data, chip->baud); 254 write_BAUD(drv_data, chip->baud);
@@ -230,49 +258,12 @@ static void bfin_spi_restore_state(struct driver_data *drv_data)
230} 258}
231 259
232/* used to kick off transfer in rx mode and read unwanted RX data */ 260/* used to kick off transfer in rx mode and read unwanted RX data */
233static inline void bfin_spi_dummy_read(struct driver_data *drv_data) 261static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data)
234{ 262{
235 (void) read_RDBR(drv_data); 263 (void) read_RDBR(drv_data);
236} 264}
237 265
238static void bfin_spi_null_writer(struct driver_data *drv_data) 266static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data)
239{
240 u8 n_bytes = drv_data->n_bytes;
241 u16 tx_val = drv_data->cur_chip->idle_tx_val;
242
243 /* clear RXS (we check for RXS inside the loop) */
244 bfin_spi_dummy_read(drv_data);
245
246 while (drv_data->tx < drv_data->tx_end) {
247 write_TDBR(drv_data, tx_val);
248 drv_data->tx += n_bytes;
249 /* wait until transfer finished.
250 checking SPIF or TXS may not guarantee transfer completion */
251 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
252 cpu_relax();
253 /* discard RX data and clear RXS */
254 bfin_spi_dummy_read(drv_data);
255 }
256}
257
258static void bfin_spi_null_reader(struct driver_data *drv_data)
259{
260 u8 n_bytes = drv_data->n_bytes;
261 u16 tx_val = drv_data->cur_chip->idle_tx_val;
262
263 /* discard old RX data and clear RXS */
264 bfin_spi_dummy_read(drv_data);
265
266 while (drv_data->rx < drv_data->rx_end) {
267 write_TDBR(drv_data, tx_val);
268 drv_data->rx += n_bytes;
269 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
270 cpu_relax();
271 bfin_spi_dummy_read(drv_data);
272 }
273}
274
275static void bfin_spi_u8_writer(struct driver_data *drv_data)
276{ 267{
277 /* clear RXS (we check for RXS inside the loop) */ 268 /* clear RXS (we check for RXS inside the loop) */
278 bfin_spi_dummy_read(drv_data); 269 bfin_spi_dummy_read(drv_data);
@@ -288,25 +279,7 @@ static void bfin_spi_u8_writer(struct driver_data *drv_data)
288 } 279 }
289} 280}
290 281
291static void bfin_spi_u8_cs_chg_writer(struct driver_data *drv_data) 282static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data)
292{
293 struct chip_data *chip = drv_data->cur_chip;
294
295 /* clear RXS (we check for RXS inside the loop) */
296 bfin_spi_dummy_read(drv_data);
297
298 while (drv_data->tx < drv_data->tx_end) {
299 bfin_spi_cs_active(drv_data, chip);
300 write_TDBR(drv_data, (*(u8 *) (drv_data->tx++)));
301 /* make sure transfer finished before deactiving CS */
302 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
303 cpu_relax();
304 bfin_spi_dummy_read(drv_data);
305 bfin_spi_cs_deactive(drv_data, chip);
306 }
307}
308
309static void bfin_spi_u8_reader(struct driver_data *drv_data)
310{ 283{
311 u16 tx_val = drv_data->cur_chip->idle_tx_val; 284 u16 tx_val = drv_data->cur_chip->idle_tx_val;
312 285
@@ -321,25 +294,7 @@ static void bfin_spi_u8_reader(struct driver_data *drv_data)
321 } 294 }
322} 295}
323 296
324static void bfin_spi_u8_cs_chg_reader(struct driver_data *drv_data) 297static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data)
325{
326 struct chip_data *chip = drv_data->cur_chip;
327 u16 tx_val = chip->idle_tx_val;
328
329 /* discard old RX data and clear RXS */
330 bfin_spi_dummy_read(drv_data);
331
332 while (drv_data->rx < drv_data->rx_end) {
333 bfin_spi_cs_active(drv_data, chip);
334 write_TDBR(drv_data, tx_val);
335 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
336 cpu_relax();
337 *(u8 *) (drv_data->rx++) = read_RDBR(drv_data);
338 bfin_spi_cs_deactive(drv_data, chip);
339 }
340}
341
342static void bfin_spi_u8_duplex(struct driver_data *drv_data)
343{ 298{
344 /* discard old RX data and clear RXS */ 299 /* discard old RX data and clear RXS */
345 bfin_spi_dummy_read(drv_data); 300 bfin_spi_dummy_read(drv_data);
@@ -352,24 +307,13 @@ static void bfin_spi_u8_duplex(struct driver_data *drv_data)
352 } 307 }
353} 308}
354 309
355static void bfin_spi_u8_cs_chg_duplex(struct driver_data *drv_data) 310static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = {
356{ 311 .write = bfin_spi_u8_writer,
357 struct chip_data *chip = drv_data->cur_chip; 312 .read = bfin_spi_u8_reader,
358 313 .duplex = bfin_spi_u8_duplex,
359 /* discard old RX data and clear RXS */ 314};
360 bfin_spi_dummy_read(drv_data);
361
362 while (drv_data->rx < drv_data->rx_end) {
363 bfin_spi_cs_active(drv_data, chip);
364 write_TDBR(drv_data, (*(u8 *) (drv_data->tx++)));
365 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
366 cpu_relax();
367 *(u8 *) (drv_data->rx++) = read_RDBR(drv_data);
368 bfin_spi_cs_deactive(drv_data, chip);
369 }
370}
371 315
372static void bfin_spi_u16_writer(struct driver_data *drv_data) 316static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data)
373{ 317{
374 /* clear RXS (we check for RXS inside the loop) */ 318 /* clear RXS (we check for RXS inside the loop) */
375 bfin_spi_dummy_read(drv_data); 319 bfin_spi_dummy_read(drv_data);
@@ -386,26 +330,7 @@ static void bfin_spi_u16_writer(struct driver_data *drv_data)
386 } 330 }
387} 331}
388 332
389static void bfin_spi_u16_cs_chg_writer(struct driver_data *drv_data) 333static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data)
390{
391 struct chip_data *chip = drv_data->cur_chip;
392
393 /* clear RXS (we check for RXS inside the loop) */
394 bfin_spi_dummy_read(drv_data);
395
396 while (drv_data->tx < drv_data->tx_end) {
397 bfin_spi_cs_active(drv_data, chip);
398 write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
399 drv_data->tx += 2;
400 /* make sure transfer finished before deactiving CS */
401 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
402 cpu_relax();
403 bfin_spi_dummy_read(drv_data);
404 bfin_spi_cs_deactive(drv_data, chip);
405 }
406}
407
408static void bfin_spi_u16_reader(struct driver_data *drv_data)
409{ 334{
410 u16 tx_val = drv_data->cur_chip->idle_tx_val; 335 u16 tx_val = drv_data->cur_chip->idle_tx_val;
411 336
@@ -421,26 +346,7 @@ static void bfin_spi_u16_reader(struct driver_data *drv_data)
421 } 346 }
422} 347}
423 348
424static void bfin_spi_u16_cs_chg_reader(struct driver_data *drv_data) 349static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data)
425{
426 struct chip_data *chip = drv_data->cur_chip;
427 u16 tx_val = chip->idle_tx_val;
428
429 /* discard old RX data and clear RXS */
430 bfin_spi_dummy_read(drv_data);
431
432 while (drv_data->rx < drv_data->rx_end) {
433 bfin_spi_cs_active(drv_data, chip);
434 write_TDBR(drv_data, tx_val);
435 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
436 cpu_relax();
437 *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
438 drv_data->rx += 2;
439 bfin_spi_cs_deactive(drv_data, chip);
440 }
441}
442
443static void bfin_spi_u16_duplex(struct driver_data *drv_data)
444{ 350{
445 /* discard old RX data and clear RXS */ 351 /* discard old RX data and clear RXS */
446 bfin_spi_dummy_read(drv_data); 352 bfin_spi_dummy_read(drv_data);
@@ -455,27 +361,14 @@ static void bfin_spi_u16_duplex(struct driver_data *drv_data)
455 } 361 }
456} 362}
457 363
458static void bfin_spi_u16_cs_chg_duplex(struct driver_data *drv_data) 364static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = {
459{ 365 .write = bfin_spi_u16_writer,
460 struct chip_data *chip = drv_data->cur_chip; 366 .read = bfin_spi_u16_reader,
461 367 .duplex = bfin_spi_u16_duplex,
462 /* discard old RX data and clear RXS */ 368};
463 bfin_spi_dummy_read(drv_data);
464
465 while (drv_data->rx < drv_data->rx_end) {
466 bfin_spi_cs_active(drv_data, chip);
467 write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
468 drv_data->tx += 2;
469 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
470 cpu_relax();
471 *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
472 drv_data->rx += 2;
473 bfin_spi_cs_deactive(drv_data, chip);
474 }
475}
476 369
477/* test if ther is more transfer to be done */ 370/* test if there is more transfer to be done */
478static void *bfin_spi_next_transfer(struct driver_data *drv_data) 371static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data)
479{ 372{
480 struct spi_message *msg = drv_data->cur_msg; 373 struct spi_message *msg = drv_data->cur_msg;
481 struct spi_transfer *trans = drv_data->cur_transfer; 374 struct spi_transfer *trans = drv_data->cur_transfer;
@@ -494,9 +387,9 @@ static void *bfin_spi_next_transfer(struct driver_data *drv_data)
494 * caller already set message->status; 387 * caller already set message->status;
495 * dma and pio irqs are blocked give finished message back 388 * dma and pio irqs are blocked give finished message back
496 */ 389 */
497static void bfin_spi_giveback(struct driver_data *drv_data) 390static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data)
498{ 391{
499 struct chip_data *chip = drv_data->cur_chip; 392 struct bfin_spi_slave_data *chip = drv_data->cur_chip;
500 struct spi_transfer *last_transfer; 393 struct spi_transfer *last_transfer;
501 unsigned long flags; 394 unsigned long flags;
502 struct spi_message *msg; 395 struct spi_message *msg;
@@ -525,10 +418,83 @@ static void bfin_spi_giveback(struct driver_data *drv_data)
525 msg->complete(msg->context); 418 msg->complete(msg->context);
526} 419}
527 420
421/* spi data irq handler */
422static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
423{
424 struct bfin_spi_master_data *drv_data = dev_id;
425 struct bfin_spi_slave_data *chip = drv_data->cur_chip;
426 struct spi_message *msg = drv_data->cur_msg;
427 int n_bytes = drv_data->n_bytes;
428
429 /* wait until transfer finished. */
430 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
431 cpu_relax();
432
433 if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) ||
434 (drv_data->rx && drv_data->rx >= (drv_data->rx_end - n_bytes))) {
435 /* last read */
436 if (drv_data->rx) {
437 dev_dbg(&drv_data->pdev->dev, "last read\n");
438 if (n_bytes == 2)
439 *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
440 else if (n_bytes == 1)
441 *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
442 drv_data->rx += n_bytes;
443 }
444
445 msg->actual_length += drv_data->len_in_bytes;
446 if (drv_data->cs_change)
447 bfin_spi_cs_deactive(drv_data, chip);
448 /* Move to next transfer */
449 msg->state = bfin_spi_next_transfer(drv_data);
450
451 disable_irq_nosync(drv_data->spi_irq);
452
453 /* Schedule transfer tasklet */
454 tasklet_schedule(&drv_data->pump_transfers);
455 return IRQ_HANDLED;
456 }
457
458 if (drv_data->rx && drv_data->tx) {
459 /* duplex */
460 dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n");
461 if (drv_data->n_bytes == 2) {
462 *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
463 write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
464 } else if (drv_data->n_bytes == 1) {
465 *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
466 write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
467 }
468 } else if (drv_data->rx) {
469 /* read */
470 dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n");
471 if (drv_data->n_bytes == 2)
472 *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
473 else if (drv_data->n_bytes == 1)
474 *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
475 write_TDBR(drv_data, chip->idle_tx_val);
476 } else if (drv_data->tx) {
477 /* write */
478 dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n");
479 bfin_spi_dummy_read(drv_data);
480 if (drv_data->n_bytes == 2)
481 write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
482 else if (drv_data->n_bytes == 1)
483 write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
484 }
485
486 if (drv_data->tx)
487 drv_data->tx += n_bytes;
488 if (drv_data->rx)
489 drv_data->rx += n_bytes;
490
491 return IRQ_HANDLED;
492}
493
528static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) 494static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
529{ 495{
530 struct driver_data *drv_data = dev_id; 496 struct bfin_spi_master_data *drv_data = dev_id;
531 struct chip_data *chip = drv_data->cur_chip; 497 struct bfin_spi_slave_data *chip = drv_data->cur_chip;
532 struct spi_message *msg = drv_data->cur_msg; 498 struct spi_message *msg = drv_data->cur_msg;
533 unsigned long timeout; 499 unsigned long timeout;
534 unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel); 500 unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel);
@@ -540,10 +506,6 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
540 506
541 clear_dma_irqstat(drv_data->dma_channel); 507 clear_dma_irqstat(drv_data->dma_channel);
542 508
543 /* Wait for DMA to complete */
544 while (get_dma_curr_irqstat(drv_data->dma_channel) & DMA_RUN)
545 cpu_relax();
546
547 /* 509 /*
548 * wait for the last transaction shifted out. HRM states: 510 * wait for the last transaction shifted out. HRM states:
549 * at this point there may still be data in the SPI DMA FIFO waiting 511 * at this point there may still be data in the SPI DMA FIFO waiting
@@ -551,8 +513,8 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
551 * register until it goes low for 2 successive reads 513 * register until it goes low for 2 successive reads
552 */ 514 */
553 if (drv_data->tx != NULL) { 515 if (drv_data->tx != NULL) {
554 while ((read_STAT(drv_data) & TXS) || 516 while ((read_STAT(drv_data) & BIT_STAT_TXS) ||
555 (read_STAT(drv_data) & TXS)) 517 (read_STAT(drv_data) & BIT_STAT_TXS))
556 cpu_relax(); 518 cpu_relax();
557 } 519 }
558 520
@@ -561,14 +523,14 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
561 dmastat, read_STAT(drv_data)); 523 dmastat, read_STAT(drv_data));
562 524
563 timeout = jiffies + HZ; 525 timeout = jiffies + HZ;
564 while (!(read_STAT(drv_data) & SPIF)) 526 while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
565 if (!time_before(jiffies, timeout)) { 527 if (!time_before(jiffies, timeout)) {
566 dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF"); 528 dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF");
567 break; 529 break;
568 } else 530 } else
569 cpu_relax(); 531 cpu_relax();
570 532
571 if ((dmastat & DMA_ERR) && (spistat & RBSY)) { 533 if ((dmastat & DMA_ERR) && (spistat & BIT_STAT_RBSY)) {
572 msg->state = ERROR_STATE; 534 msg->state = ERROR_STATE;
573 dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n"); 535 dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n");
574 } else { 536 } else {
@@ -588,20 +550,20 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
588 dev_dbg(&drv_data->pdev->dev, 550 dev_dbg(&drv_data->pdev->dev,
589 "disable dma channel irq%d\n", 551 "disable dma channel irq%d\n",
590 drv_data->dma_channel); 552 drv_data->dma_channel);
591 dma_disable_irq(drv_data->dma_channel); 553 dma_disable_irq_nosync(drv_data->dma_channel);
592 554
593 return IRQ_HANDLED; 555 return IRQ_HANDLED;
594} 556}
595 557
596static void bfin_spi_pump_transfers(unsigned long data) 558static void bfin_spi_pump_transfers(unsigned long data)
597{ 559{
598 struct driver_data *drv_data = (struct driver_data *)data; 560 struct bfin_spi_master_data *drv_data = (struct bfin_spi_master_data *)data;
599 struct spi_message *message = NULL; 561 struct spi_message *message = NULL;
600 struct spi_transfer *transfer = NULL; 562 struct spi_transfer *transfer = NULL;
601 struct spi_transfer *previous = NULL; 563 struct spi_transfer *previous = NULL;
602 struct chip_data *chip = NULL; 564 struct bfin_spi_slave_data *chip = NULL;
603 u8 width; 565 unsigned int bits_per_word;
604 u16 cr, dma_width, dma_config; 566 u16 cr, cr_width, dma_width, dma_config;
605 u32 tranf_success = 1; 567 u32 tranf_success = 1;
606 u8 full_duplex = 0; 568 u8 full_duplex = 0;
607 569
@@ -639,7 +601,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
639 udelay(previous->delay_usecs); 601 udelay(previous->delay_usecs);
640 } 602 }
641 603
642 /* Setup the transfer state based on the type of transfer */ 604 /* Flush any existing transfers that may be sitting in the hardware */
643 if (bfin_spi_flush(drv_data) == 0) { 605 if (bfin_spi_flush(drv_data) == 0) {
644 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); 606 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
645 message->status = -EIO; 607 message->status = -EIO;
@@ -679,52 +641,31 @@ static void bfin_spi_pump_transfers(unsigned long data)
679 drv_data->cs_change = transfer->cs_change; 641 drv_data->cs_change = transfer->cs_change;
680 642
681 /* Bits per word setup */ 643 /* Bits per word setup */
682 switch (transfer->bits_per_word) { 644 bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
683 case 8: 645 if (bits_per_word == 8) {
684 drv_data->n_bytes = 1; 646 drv_data->n_bytes = 1;
685 width = CFG_SPI_WORDSIZE8; 647 drv_data->len = transfer->len;
686 drv_data->read = chip->cs_change_per_word ? 648 cr_width = 0;
687 bfin_spi_u8_cs_chg_reader : bfin_spi_u8_reader; 649 drv_data->ops = &bfin_bfin_spi_transfer_ops_u8;
688 drv_data->write = chip->cs_change_per_word ? 650 } else if (bits_per_word == 16) {
689 bfin_spi_u8_cs_chg_writer : bfin_spi_u8_writer;
690 drv_data->duplex = chip->cs_change_per_word ?
691 bfin_spi_u8_cs_chg_duplex : bfin_spi_u8_duplex;
692 break;
693
694 case 16:
695 drv_data->n_bytes = 2; 651 drv_data->n_bytes = 2;
696 width = CFG_SPI_WORDSIZE16;
697 drv_data->read = chip->cs_change_per_word ?
698 bfin_spi_u16_cs_chg_reader : bfin_spi_u16_reader;
699 drv_data->write = chip->cs_change_per_word ?
700 bfin_spi_u16_cs_chg_writer : bfin_spi_u16_writer;
701 drv_data->duplex = chip->cs_change_per_word ?
702 bfin_spi_u16_cs_chg_duplex : bfin_spi_u16_duplex;
703 break;
704
705 default:
706 /* No change, the same as default setting */
707 drv_data->n_bytes = chip->n_bytes;
708 width = chip->width;
709 drv_data->write = drv_data->tx ? chip->write : bfin_spi_null_writer;
710 drv_data->read = drv_data->rx ? chip->read : bfin_spi_null_reader;
711 drv_data->duplex = chip->duplex ? chip->duplex : bfin_spi_null_writer;
712 break;
713 }
714 cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD));
715 cr |= (width << 8);
716 write_CTRL(drv_data, cr);
717
718 if (width == CFG_SPI_WORDSIZE16) {
719 drv_data->len = (transfer->len) >> 1; 652 drv_data->len = (transfer->len) >> 1;
653 cr_width = BIT_CTL_WORDSIZE;
654 drv_data->ops = &bfin_bfin_spi_transfer_ops_u16;
720 } else { 655 } else {
721 drv_data->len = transfer->len; 656 dev_err(&drv_data->pdev->dev, "transfer: unsupported bits_per_word\n");
657 message->status = -EINVAL;
658 bfin_spi_giveback(drv_data);
659 return;
722 } 660 }
661 cr = read_CTRL(drv_data) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE);
662 cr |= cr_width;
663 write_CTRL(drv_data, cr);
664
723 dev_dbg(&drv_data->pdev->dev, 665 dev_dbg(&drv_data->pdev->dev,
724 "transfer: drv_data->write is %p, chip->write is %p, null_wr is %p\n", 666 "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n",
725 drv_data->write, chip->write, bfin_spi_null_writer); 667 drv_data->ops, chip->ops, &bfin_bfin_spi_transfer_ops_u8);
726 668
727 /* speed and width has been set on per message */
728 message->state = RUNNING_STATE; 669 message->state = RUNNING_STATE;
729 dma_config = 0; 670 dma_config = 0;
730 671
@@ -735,13 +676,11 @@ static void bfin_spi_pump_transfers(unsigned long data)
735 write_BAUD(drv_data, chip->baud); 676 write_BAUD(drv_data, chip->baud);
736 677
737 write_STAT(drv_data, BIT_STAT_CLR); 678 write_STAT(drv_data, BIT_STAT_CLR);
738 cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD)); 679 bfin_spi_cs_active(drv_data, chip);
739 if (drv_data->cs_change)
740 bfin_spi_cs_active(drv_data, chip);
741 680
742 dev_dbg(&drv_data->pdev->dev, 681 dev_dbg(&drv_data->pdev->dev,
743 "now pumping a transfer: width is %d, len is %d\n", 682 "now pumping a transfer: width is %d, len is %d\n",
744 width, transfer->len); 683 cr_width, transfer->len);
745 684
746 /* 685 /*
747 * Try to map dma buffer and do a dma transfer. If successful use, 686 * Try to map dma buffer and do a dma transfer. If successful use,
@@ -760,7 +699,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
760 /* config dma channel */ 699 /* config dma channel */
761 dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n"); 700 dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n");
762 set_dma_x_count(drv_data->dma_channel, drv_data->len); 701 set_dma_x_count(drv_data->dma_channel, drv_data->len);
763 if (width == CFG_SPI_WORDSIZE16) { 702 if (cr_width == BIT_CTL_WORDSIZE) {
764 set_dma_x_modify(drv_data->dma_channel, 2); 703 set_dma_x_modify(drv_data->dma_channel, 2);
765 dma_width = WDSIZE_16; 704 dma_width = WDSIZE_16;
766 } else { 705 } else {
@@ -846,73 +785,100 @@ static void bfin_spi_pump_transfers(unsigned long data)
846 dma_enable_irq(drv_data->dma_channel); 785 dma_enable_irq(drv_data->dma_channel);
847 local_irq_restore(flags); 786 local_irq_restore(flags);
848 787
849 } else { 788 return;
850 /* IO mode write then read */ 789 }
851 dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n");
852
853 /* we always use SPI_WRITE mode. SPI_READ mode
854 seems to have problems with setting up the
855 output value in TDBR prior to the transfer. */
856 write_CTRL(drv_data, (cr | CFG_SPI_WRITE));
857
858 if (full_duplex) {
859 /* full duplex mode */
860 BUG_ON((drv_data->tx_end - drv_data->tx) !=
861 (drv_data->rx_end - drv_data->rx));
862 dev_dbg(&drv_data->pdev->dev,
863 "IO duplex: cr is 0x%x\n", cr);
864
865 drv_data->duplex(drv_data);
866 790
867 if (drv_data->tx != drv_data->tx_end) 791 /*
868 tranf_success = 0; 792 * We always use SPI_WRITE mode (transfer starts with TDBR write).
869 } else if (drv_data->tx != NULL) { 793 * SPI_READ mode (transfer starts with RDBR read) seems to have
870 /* write only half duplex */ 794 * problems with setting up the output value in TDBR prior to the
871 dev_dbg(&drv_data->pdev->dev, 795 * start of the transfer.
872 "IO write: cr is 0x%x\n", cr); 796 */
797 write_CTRL(drv_data, cr | BIT_CTL_TXMOD);
873 798
874 drv_data->write(drv_data); 799 if (chip->pio_interrupt) {
800 /* SPI irq should have been disabled by now */
875 801
876 if (drv_data->tx != drv_data->tx_end) 802 /* discard old RX data and clear RXS */
877 tranf_success = 0; 803 bfin_spi_dummy_read(drv_data);
878 } else if (drv_data->rx != NULL) {
879 /* read only half duplex */
880 dev_dbg(&drv_data->pdev->dev,
881 "IO read: cr is 0x%x\n", cr);
882 804
883 drv_data->read(drv_data); 805 /* start transfer */
884 if (drv_data->rx != drv_data->rx_end) 806 if (drv_data->tx == NULL)
885 tranf_success = 0; 807 write_TDBR(drv_data, chip->idle_tx_val);
808 else {
809 if (bits_per_word == 8)
810 write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
811 else
812 write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
813 drv_data->tx += drv_data->n_bytes;
886 } 814 }
887 815
888 if (!tranf_success) { 816 /* once TDBR is empty, interrupt is triggered */
889 dev_dbg(&drv_data->pdev->dev, 817 enable_irq(drv_data->spi_irq);
890 "IO write error!\n"); 818 return;
891 message->state = ERROR_STATE; 819 }
892 } else { 820
893 /* Update total byte transfered */ 821 /* IO mode */
894 message->actual_length += drv_data->len_in_bytes; 822 dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n");
895 /* Move to next transfer of this msg */ 823
896 message->state = bfin_spi_next_transfer(drv_data); 824 if (full_duplex) {
897 if (drv_data->cs_change) 825 /* full duplex mode */
898 bfin_spi_cs_deactive(drv_data, chip); 826 BUG_ON((drv_data->tx_end - drv_data->tx) !=
899 } 827 (drv_data->rx_end - drv_data->rx));
900 /* Schedule next transfer tasklet */ 828 dev_dbg(&drv_data->pdev->dev,
901 tasklet_schedule(&drv_data->pump_transfers); 829 "IO duplex: cr is 0x%x\n", cr);
830
831 drv_data->ops->duplex(drv_data);
832
833 if (drv_data->tx != drv_data->tx_end)
834 tranf_success = 0;
835 } else if (drv_data->tx != NULL) {
836 /* write only half duplex */
837 dev_dbg(&drv_data->pdev->dev,
838 "IO write: cr is 0x%x\n", cr);
839
840 drv_data->ops->write(drv_data);
841
842 if (drv_data->tx != drv_data->tx_end)
843 tranf_success = 0;
844 } else if (drv_data->rx != NULL) {
845 /* read only half duplex */
846 dev_dbg(&drv_data->pdev->dev,
847 "IO read: cr is 0x%x\n", cr);
848
849 drv_data->ops->read(drv_data);
850 if (drv_data->rx != drv_data->rx_end)
851 tranf_success = 0;
852 }
853
854 if (!tranf_success) {
855 dev_dbg(&drv_data->pdev->dev,
856 "IO write error!\n");
857 message->state = ERROR_STATE;
858 } else {
859 /* Update total byte transfered */
860 message->actual_length += drv_data->len_in_bytes;
861 /* Move to next transfer of this msg */
862 message->state = bfin_spi_next_transfer(drv_data);
863 if (drv_data->cs_change)
864 bfin_spi_cs_deactive(drv_data, chip);
902 } 865 }
866
867 /* Schedule next transfer tasklet */
868 tasklet_schedule(&drv_data->pump_transfers);
903} 869}
904 870
905/* pop a msg from queue and kick off real transfer */ 871/* pop a msg from queue and kick off real transfer */
906static void bfin_spi_pump_messages(struct work_struct *work) 872static void bfin_spi_pump_messages(struct work_struct *work)
907{ 873{
908 struct driver_data *drv_data; 874 struct bfin_spi_master_data *drv_data;
909 unsigned long flags; 875 unsigned long flags;
910 876
911 drv_data = container_of(work, struct driver_data, pump_messages); 877 drv_data = container_of(work, struct bfin_spi_master_data, pump_messages);
912 878
913 /* Lock queue and check for queue work */ 879 /* Lock queue and check for queue work */
914 spin_lock_irqsave(&drv_data->lock, flags); 880 spin_lock_irqsave(&drv_data->lock, flags);
915 if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { 881 if (list_empty(&drv_data->queue) || !drv_data->running) {
916 /* pumper kicked off but no work to do */ 882 /* pumper kicked off but no work to do */
917 drv_data->busy = 0; 883 drv_data->busy = 0;
918 spin_unlock_irqrestore(&drv_data->lock, flags); 884 spin_unlock_irqrestore(&drv_data->lock, flags);
@@ -962,12 +928,12 @@ static void bfin_spi_pump_messages(struct work_struct *work)
962 */ 928 */
963static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) 929static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg)
964{ 930{
965 struct driver_data *drv_data = spi_master_get_devdata(spi->master); 931 struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
966 unsigned long flags; 932 unsigned long flags;
967 933
968 spin_lock_irqsave(&drv_data->lock, flags); 934 spin_lock_irqsave(&drv_data->lock, flags);
969 935
970 if (drv_data->run == QUEUE_STOPPED) { 936 if (!drv_data->running) {
971 spin_unlock_irqrestore(&drv_data->lock, flags); 937 spin_unlock_irqrestore(&drv_data->lock, flags);
972 return -ESHUTDOWN; 938 return -ESHUTDOWN;
973 } 939 }
@@ -979,7 +945,7 @@ static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg)
979 dev_dbg(&spi->dev, "adding an msg in transfer() \n"); 945 dev_dbg(&spi->dev, "adding an msg in transfer() \n");
980 list_add_tail(&msg->queue, &drv_data->queue); 946 list_add_tail(&msg->queue, &drv_data->queue);
981 947
982 if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) 948 if (drv_data->running && !drv_data->busy)
983 queue_work(drv_data->workqueue, &drv_data->pump_messages); 949 queue_work(drv_data->workqueue, &drv_data->pump_messages);
984 950
985 spin_unlock_irqrestore(&drv_data->lock, flags); 951 spin_unlock_irqrestore(&drv_data->lock, flags);
@@ -1003,147 +969,184 @@ static u16 ssel[][MAX_SPI_SSEL] = {
1003 P_SPI2_SSEL6, P_SPI2_SSEL7}, 969 P_SPI2_SSEL6, P_SPI2_SSEL7},
1004}; 970};
1005 971
1006/* first setup for new devices */ 972/* setup for devices (may be called multiple times -- not just first setup) */
1007static int bfin_spi_setup(struct spi_device *spi) 973static int bfin_spi_setup(struct spi_device *spi)
1008{ 974{
1009 struct bfin5xx_spi_chip *chip_info = NULL; 975 struct bfin5xx_spi_chip *chip_info;
1010 struct chip_data *chip; 976 struct bfin_spi_slave_data *chip = NULL;
1011 struct driver_data *drv_data = spi_master_get_devdata(spi->master); 977 struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
1012 int ret; 978 u16 bfin_ctl_reg;
1013 979 int ret = -EINVAL;
1014 if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
1015 return -EINVAL;
1016 980
1017 /* Only alloc (or use chip_info) on first setup */ 981 /* Only alloc (or use chip_info) on first setup */
982 chip_info = NULL;
1018 chip = spi_get_ctldata(spi); 983 chip = spi_get_ctldata(spi);
1019 if (chip == NULL) { 984 if (chip == NULL) {
1020 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 985 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
1021 if (!chip) 986 if (!chip) {
1022 return -ENOMEM; 987 dev_err(&spi->dev, "cannot allocate chip data\n");
988 ret = -ENOMEM;
989 goto error;
990 }
1023 991
1024 chip->enable_dma = 0; 992 chip->enable_dma = 0;
1025 chip_info = spi->controller_data; 993 chip_info = spi->controller_data;
1026 } 994 }
1027 995
996 /* Let people set non-standard bits directly */
997 bfin_ctl_reg = BIT_CTL_OPENDRAIN | BIT_CTL_EMISO |
998 BIT_CTL_PSSE | BIT_CTL_GM | BIT_CTL_SZ;
999
1028 /* chip_info isn't always needed */ 1000 /* chip_info isn't always needed */
1029 if (chip_info) { 1001 if (chip_info) {
1030 /* Make sure people stop trying to set fields via ctl_reg 1002 /* Make sure people stop trying to set fields via ctl_reg
1031 * when they should actually be using common SPI framework. 1003 * when they should actually be using common SPI framework.
1032 * Currently we let through: WOM EMISO PSSE GM SZ TIMOD. 1004 * Currently we let through: WOM EMISO PSSE GM SZ.
1033 * Not sure if a user actually needs/uses any of these, 1005 * Not sure if a user actually needs/uses any of these,
1034 * but let's assume (for now) they do. 1006 * but let's assume (for now) they do.
1035 */ 1007 */
1036 if (chip_info->ctl_reg & (SPE|MSTR|CPOL|CPHA|LSBF|SIZE)) { 1008 if (chip_info->ctl_reg & ~bfin_ctl_reg) {
1037 dev_err(&spi->dev, "do not set bits in ctl_reg " 1009 dev_err(&spi->dev, "do not set bits in ctl_reg "
1038 "that the SPI framework manages\n"); 1010 "that the SPI framework manages\n");
1039 return -EINVAL; 1011 goto error;
1040 } 1012 }
1041
1042 chip->enable_dma = chip_info->enable_dma != 0 1013 chip->enable_dma = chip_info->enable_dma != 0
1043 && drv_data->master_info->enable_dma; 1014 && drv_data->master_info->enable_dma;
1044 chip->ctl_reg = chip_info->ctl_reg; 1015 chip->ctl_reg = chip_info->ctl_reg;
1045 chip->bits_per_word = chip_info->bits_per_word;
1046 chip->cs_change_per_word = chip_info->cs_change_per_word;
1047 chip->cs_chg_udelay = chip_info->cs_chg_udelay; 1016 chip->cs_chg_udelay = chip_info->cs_chg_udelay;
1048 chip->cs_gpio = chip_info->cs_gpio;
1049 chip->idle_tx_val = chip_info->idle_tx_val; 1017 chip->idle_tx_val = chip_info->idle_tx_val;
1018 chip->pio_interrupt = chip_info->pio_interrupt;
1019 spi->bits_per_word = chip_info->bits_per_word;
1020 } else {
1021 /* force a default base state */
1022 chip->ctl_reg &= bfin_ctl_reg;
1023 }
1024
1025 if (spi->bits_per_word != 8 && spi->bits_per_word != 16) {
1026 dev_err(&spi->dev, "%d bits_per_word is not supported\n",
1027 spi->bits_per_word);
1028 goto error;
1050 } 1029 }
1051 1030
1052 /* translate common spi framework into our register */ 1031 /* translate common spi framework into our register */
1032 if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) {
1033 dev_err(&spi->dev, "unsupported spi modes detected\n");
1034 goto error;
1035 }
1053 if (spi->mode & SPI_CPOL) 1036 if (spi->mode & SPI_CPOL)
1054 chip->ctl_reg |= CPOL; 1037 chip->ctl_reg |= BIT_CTL_CPOL;
1055 if (spi->mode & SPI_CPHA) 1038 if (spi->mode & SPI_CPHA)
1056 chip->ctl_reg |= CPHA; 1039 chip->ctl_reg |= BIT_CTL_CPHA;
1057 if (spi->mode & SPI_LSB_FIRST) 1040 if (spi->mode & SPI_LSB_FIRST)
1058 chip->ctl_reg |= LSBF; 1041 chip->ctl_reg |= BIT_CTL_LSBF;
1059 /* we dont support running in slave mode (yet?) */ 1042 /* we dont support running in slave mode (yet?) */
1060 chip->ctl_reg |= MSTR; 1043 chip->ctl_reg |= BIT_CTL_MASTER;
1061 1044
1062 /* 1045 /*
1046 * Notice: for blackfin, the speed_hz is the value of register
1047 * SPI_BAUD, not the real baudrate
1048 */
1049 chip->baud = hz_to_spi_baud(spi->max_speed_hz);
1050 chip->chip_select_num = spi->chip_select;
1051 if (chip->chip_select_num < MAX_CTRL_CS) {
1052 if (!(spi->mode & SPI_CPHA))
1053 dev_warn(&spi->dev, "Warning: SPI CPHA not set:"
1054 " Slave Select not under software control!\n"
1055 " See Documentation/blackfin/bfin-spi-notes.txt");
1056
1057 chip->flag = (1 << spi->chip_select) << 8;
1058 } else
1059 chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS;
1060
1061 if (chip->enable_dma && chip->pio_interrupt) {
1062 dev_err(&spi->dev, "enable_dma is set, "
1063 "do not set pio_interrupt\n");
1064 goto error;
1065 }
1066 /*
1063 * if any one SPI chip is registered and wants DMA, request the 1067 * if any one SPI chip is registered and wants DMA, request the
1064 * DMA channel for it 1068 * DMA channel for it
1065 */ 1069 */
1066 if (chip->enable_dma && !drv_data->dma_requested) { 1070 if (chip->enable_dma && !drv_data->dma_requested) {
1067 /* register dma irq handler */ 1071 /* register dma irq handler */
1068 if (request_dma(drv_data->dma_channel, "BFIN_SPI_DMA") < 0) { 1072 ret = request_dma(drv_data->dma_channel, "BFIN_SPI_DMA");
1069 dev_dbg(&spi->dev, 1073 if (ret) {
1074 dev_err(&spi->dev,
1070 "Unable to request BlackFin SPI DMA channel\n"); 1075 "Unable to request BlackFin SPI DMA channel\n");
1071 return -ENODEV; 1076 goto error;
1072 } 1077 }
1073 if (set_dma_callback(drv_data->dma_channel, 1078 drv_data->dma_requested = 1;
1074 bfin_spi_dma_irq_handler, drv_data) < 0) { 1079
1075 dev_dbg(&spi->dev, "Unable to set dma callback\n"); 1080 ret = set_dma_callback(drv_data->dma_channel,
1076 return -EPERM; 1081 bfin_spi_dma_irq_handler, drv_data);
1082 if (ret) {
1083 dev_err(&spi->dev, "Unable to set dma callback\n");
1084 goto error;
1077 } 1085 }
1078 dma_disable_irq(drv_data->dma_channel); 1086 dma_disable_irq(drv_data->dma_channel);
1079 drv_data->dma_requested = 1;
1080 } 1087 }
1081 1088
1082 /* 1089 if (chip->pio_interrupt && !drv_data->irq_requested) {
1083 * Notice: for blackfin, the speed_hz is the value of register 1090 ret = request_irq(drv_data->spi_irq, bfin_spi_pio_irq_handler,
1084 * SPI_BAUD, not the real baudrate 1091 IRQF_DISABLED, "BFIN_SPI", drv_data);
1085 */ 1092 if (ret) {
1086 chip->baud = hz_to_spi_baud(spi->max_speed_hz); 1093 dev_err(&spi->dev, "Unable to register spi IRQ\n");
1087 chip->flag = 1 << (spi->chip_select); 1094 goto error;
1088 chip->chip_select_num = spi->chip_select; 1095 }
1096 drv_data->irq_requested = 1;
1097 /* we use write mode, spi irq has to be disabled here */
1098 disable_irq(drv_data->spi_irq);
1099 }
1089 1100
1090 if (chip->chip_select_num == 0) { 1101 if (chip->chip_select_num >= MAX_CTRL_CS) {
1091 ret = gpio_request(chip->cs_gpio, spi->modalias); 1102 ret = gpio_request(chip->cs_gpio, spi->modalias);
1092 if (ret) { 1103 if (ret) {
1093 if (drv_data->dma_requested) 1104 dev_err(&spi->dev, "gpio_request() error\n");
1094 free_dma(drv_data->dma_channel); 1105 goto pin_error;
1095 return ret;
1096 } 1106 }
1097 gpio_direction_output(chip->cs_gpio, 1); 1107 gpio_direction_output(chip->cs_gpio, 1);
1098 } 1108 }
1099 1109
1100 switch (chip->bits_per_word) {
1101 case 8:
1102 chip->n_bytes = 1;
1103 chip->width = CFG_SPI_WORDSIZE8;
1104 chip->read = chip->cs_change_per_word ?
1105 bfin_spi_u8_cs_chg_reader : bfin_spi_u8_reader;
1106 chip->write = chip->cs_change_per_word ?
1107 bfin_spi_u8_cs_chg_writer : bfin_spi_u8_writer;
1108 chip->duplex = chip->cs_change_per_word ?
1109 bfin_spi_u8_cs_chg_duplex : bfin_spi_u8_duplex;
1110 break;
1111
1112 case 16:
1113 chip->n_bytes = 2;
1114 chip->width = CFG_SPI_WORDSIZE16;
1115 chip->read = chip->cs_change_per_word ?
1116 bfin_spi_u16_cs_chg_reader : bfin_spi_u16_reader;
1117 chip->write = chip->cs_change_per_word ?
1118 bfin_spi_u16_cs_chg_writer : bfin_spi_u16_writer;
1119 chip->duplex = chip->cs_change_per_word ?
1120 bfin_spi_u16_cs_chg_duplex : bfin_spi_u16_duplex;
1121 break;
1122
1123 default:
1124 dev_err(&spi->dev, "%d bits_per_word is not supported\n",
1125 chip->bits_per_word);
1126 if (chip_info)
1127 kfree(chip);
1128 return -ENODEV;
1129 }
1130
1131 dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n", 1110 dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n",
1132 spi->modalias, chip->width, chip->enable_dma); 1111 spi->modalias, spi->bits_per_word, chip->enable_dma);
1133 dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n", 1112 dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n",
1134 chip->ctl_reg, chip->flag); 1113 chip->ctl_reg, chip->flag);
1135 1114
1136 spi_set_ctldata(spi, chip); 1115 spi_set_ctldata(spi, chip);
1137 1116
1138 dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num); 1117 dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num);
1139 if ((chip->chip_select_num > 0) 1118 if (chip->chip_select_num < MAX_CTRL_CS) {
1140 && (chip->chip_select_num <= spi->master->num_chipselect)) 1119 ret = peripheral_request(ssel[spi->master->bus_num]
1141 peripheral_request(ssel[spi->master->bus_num] 1120 [chip->chip_select_num-1], spi->modalias);
1142 [chip->chip_select_num-1], spi->modalias); 1121 if (ret) {
1122 dev_err(&spi->dev, "peripheral_request() error\n");
1123 goto pin_error;
1124 }
1125 }
1143 1126
1127 bfin_spi_cs_enable(drv_data, chip);
1144 bfin_spi_cs_deactive(drv_data, chip); 1128 bfin_spi_cs_deactive(drv_data, chip);
1145 1129
1146 return 0; 1130 return 0;
1131
1132 pin_error:
1133 if (chip->chip_select_num >= MAX_CTRL_CS)
1134 gpio_free(chip->cs_gpio);
1135 else
1136 peripheral_free(ssel[spi->master->bus_num]
1137 [chip->chip_select_num - 1]);
1138 error:
1139 if (chip) {
1140 if (drv_data->dma_requested)
1141 free_dma(drv_data->dma_channel);
1142 drv_data->dma_requested = 0;
1143
1144 kfree(chip);
1145 /* prevent free 'chip' twice */
1146 spi_set_ctldata(spi, NULL);
1147 }
1148
1149 return ret;
1147} 1150}
1148 1151
1149/* 1152/*
@@ -1152,28 +1155,30 @@ static int bfin_spi_setup(struct spi_device *spi)
1152 */ 1155 */
1153static void bfin_spi_cleanup(struct spi_device *spi) 1156static void bfin_spi_cleanup(struct spi_device *spi)
1154{ 1157{
1155 struct chip_data *chip = spi_get_ctldata(spi); 1158 struct bfin_spi_slave_data *chip = spi_get_ctldata(spi);
1159 struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
1156 1160
1157 if (!chip) 1161 if (!chip)
1158 return; 1162 return;
1159 1163
1160 if ((chip->chip_select_num > 0) 1164 if (chip->chip_select_num < MAX_CTRL_CS) {
1161 && (chip->chip_select_num <= spi->master->num_chipselect))
1162 peripheral_free(ssel[spi->master->bus_num] 1165 peripheral_free(ssel[spi->master->bus_num]
1163 [chip->chip_select_num-1]); 1166 [chip->chip_select_num-1]);
1164 1167 bfin_spi_cs_disable(drv_data, chip);
1165 if (chip->chip_select_num == 0) 1168 } else
1166 gpio_free(chip->cs_gpio); 1169 gpio_free(chip->cs_gpio);
1167 1170
1168 kfree(chip); 1171 kfree(chip);
1172 /* prevent free 'chip' twice */
1173 spi_set_ctldata(spi, NULL);
1169} 1174}
1170 1175
1171static inline int bfin_spi_init_queue(struct driver_data *drv_data) 1176static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data)
1172{ 1177{
1173 INIT_LIST_HEAD(&drv_data->queue); 1178 INIT_LIST_HEAD(&drv_data->queue);
1174 spin_lock_init(&drv_data->lock); 1179 spin_lock_init(&drv_data->lock);
1175 1180
1176 drv_data->run = QUEUE_STOPPED; 1181 drv_data->running = false;
1177 drv_data->busy = 0; 1182 drv_data->busy = 0;
1178 1183
1179 /* init transfer tasklet */ 1184 /* init transfer tasklet */
@@ -1190,18 +1195,18 @@ static inline int bfin_spi_init_queue(struct driver_data *drv_data)
1190 return 0; 1195 return 0;
1191} 1196}
1192 1197
1193static inline int bfin_spi_start_queue(struct driver_data *drv_data) 1198static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data)
1194{ 1199{
1195 unsigned long flags; 1200 unsigned long flags;
1196 1201
1197 spin_lock_irqsave(&drv_data->lock, flags); 1202 spin_lock_irqsave(&drv_data->lock, flags);
1198 1203
1199 if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { 1204 if (drv_data->running || drv_data->busy) {
1200 spin_unlock_irqrestore(&drv_data->lock, flags); 1205 spin_unlock_irqrestore(&drv_data->lock, flags);
1201 return -EBUSY; 1206 return -EBUSY;
1202 } 1207 }
1203 1208
1204 drv_data->run = QUEUE_RUNNING; 1209 drv_data->running = true;
1205 drv_data->cur_msg = NULL; 1210 drv_data->cur_msg = NULL;
1206 drv_data->cur_transfer = NULL; 1211 drv_data->cur_transfer = NULL;
1207 drv_data->cur_chip = NULL; 1212 drv_data->cur_chip = NULL;
@@ -1212,7 +1217,7 @@ static inline int bfin_spi_start_queue(struct driver_data *drv_data)
1212 return 0; 1217 return 0;
1213} 1218}
1214 1219
1215static inline int bfin_spi_stop_queue(struct driver_data *drv_data) 1220static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
1216{ 1221{
1217 unsigned long flags; 1222 unsigned long flags;
1218 unsigned limit = 500; 1223 unsigned limit = 500;
@@ -1226,7 +1231,7 @@ static inline int bfin_spi_stop_queue(struct driver_data *drv_data)
1226 * execution path (pump_messages) would be required to call wake_up or 1231 * execution path (pump_messages) would be required to call wake_up or
1227 * friends on every SPI message. Do this instead 1232 * friends on every SPI message. Do this instead
1228 */ 1233 */
1229 drv_data->run = QUEUE_STOPPED; 1234 drv_data->running = false;
1230 while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { 1235 while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
1231 spin_unlock_irqrestore(&drv_data->lock, flags); 1236 spin_unlock_irqrestore(&drv_data->lock, flags);
1232 msleep(10); 1237 msleep(10);
@@ -1241,7 +1246,7 @@ static inline int bfin_spi_stop_queue(struct driver_data *drv_data)
1241 return status; 1246 return status;
1242} 1247}
1243 1248
1244static inline int bfin_spi_destroy_queue(struct driver_data *drv_data) 1249static inline int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data)
1245{ 1250{
1246 int status; 1251 int status;
1247 1252
@@ -1259,14 +1264,14 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
1259 struct device *dev = &pdev->dev; 1264 struct device *dev = &pdev->dev;
1260 struct bfin5xx_spi_master *platform_info; 1265 struct bfin5xx_spi_master *platform_info;
1261 struct spi_master *master; 1266 struct spi_master *master;
1262 struct driver_data *drv_data = 0; 1267 struct bfin_spi_master_data *drv_data;
1263 struct resource *res; 1268 struct resource *res;
1264 int status = 0; 1269 int status = 0;
1265 1270
1266 platform_info = dev->platform_data; 1271 platform_info = dev->platform_data;
1267 1272
1268 /* Allocate master with space for drv_data */ 1273 /* Allocate master with space for drv_data */
1269 master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); 1274 master = spi_alloc_master(dev, sizeof(*drv_data));
1270 if (!master) { 1275 if (!master) {
1271 dev_err(&pdev->dev, "can not alloc spi_master\n"); 1276 dev_err(&pdev->dev, "can not alloc spi_master\n");
1272 return -ENOMEM; 1277 return -ENOMEM;
@@ -1302,11 +1307,19 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
1302 goto out_error_ioremap; 1307 goto out_error_ioremap;
1303 } 1308 }
1304 1309
1305 drv_data->dma_channel = platform_get_irq(pdev, 0); 1310 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1306 if (drv_data->dma_channel < 0) { 1311 if (res == NULL) {
1307 dev_err(dev, "No DMA channel specified\n"); 1312 dev_err(dev, "No DMA channel specified\n");
1308 status = -ENOENT; 1313 status = -ENOENT;
1309 goto out_error_no_dma_ch; 1314 goto out_error_free_io;
1315 }
1316 drv_data->dma_channel = res->start;
1317
1318 drv_data->spi_irq = platform_get_irq(pdev, 0);
1319 if (drv_data->spi_irq < 0) {
1320 dev_err(dev, "No spi pio irq specified\n");
1321 status = -ENOENT;
1322 goto out_error_free_io;
1310 } 1323 }
1311 1324
1312 /* Initial and start queue */ 1325 /* Initial and start queue */
@@ -1328,6 +1341,12 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
1328 goto out_error_queue_alloc; 1341 goto out_error_queue_alloc;
1329 } 1342 }
1330 1343
1344 /* Reset SPI registers. If these registers were used by the boot loader,
1345 * the sky may fall on your head if you enable the dma controller.
1346 */
1347 write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER);
1348 write_FLAG(drv_data, 0xFF00);
1349
1331 /* Register with the SPI framework */ 1350 /* Register with the SPI framework */
1332 platform_set_drvdata(pdev, drv_data); 1351 platform_set_drvdata(pdev, drv_data);
1333 status = spi_register_master(master); 1352 status = spi_register_master(master);
@@ -1343,7 +1362,7 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
1343 1362
1344out_error_queue_alloc: 1363out_error_queue_alloc:
1345 bfin_spi_destroy_queue(drv_data); 1364 bfin_spi_destroy_queue(drv_data);
1346out_error_no_dma_ch: 1365out_error_free_io:
1347 iounmap((void *) drv_data->regs_base); 1366 iounmap((void *) drv_data->regs_base);
1348out_error_ioremap: 1367out_error_ioremap:
1349out_error_get_res: 1368out_error_get_res:
@@ -1355,7 +1374,7 @@ out_error_get_res:
1355/* stop hardware and remove the driver */ 1374/* stop hardware and remove the driver */
1356static int __devexit bfin_spi_remove(struct platform_device *pdev) 1375static int __devexit bfin_spi_remove(struct platform_device *pdev)
1357{ 1376{
1358 struct driver_data *drv_data = platform_get_drvdata(pdev); 1377 struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
1359 int status = 0; 1378 int status = 0;
1360 1379
1361 if (!drv_data) 1380 if (!drv_data)
@@ -1375,6 +1394,11 @@ static int __devexit bfin_spi_remove(struct platform_device *pdev)
1375 free_dma(drv_data->dma_channel); 1394 free_dma(drv_data->dma_channel);
1376 } 1395 }
1377 1396
1397 if (drv_data->irq_requested) {
1398 free_irq(drv_data->spi_irq, drv_data);
1399 drv_data->irq_requested = 0;
1400 }
1401
1378 /* Disconnect from the SPI framework */ 1402 /* Disconnect from the SPI framework */
1379 spi_unregister_master(drv_data->master); 1403 spi_unregister_master(drv_data->master);
1380 1404
@@ -1389,26 +1413,32 @@ static int __devexit bfin_spi_remove(struct platform_device *pdev)
1389#ifdef CONFIG_PM 1413#ifdef CONFIG_PM
1390static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state) 1414static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state)
1391{ 1415{
1392 struct driver_data *drv_data = platform_get_drvdata(pdev); 1416 struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
1393 int status = 0; 1417 int status = 0;
1394 1418
1395 status = bfin_spi_stop_queue(drv_data); 1419 status = bfin_spi_stop_queue(drv_data);
1396 if (status != 0) 1420 if (status != 0)
1397 return status; 1421 return status;
1398 1422
1399 /* stop hardware */ 1423 drv_data->ctrl_reg = read_CTRL(drv_data);
1400 bfin_spi_disable(drv_data); 1424 drv_data->flag_reg = read_FLAG(drv_data);
1425
1426 /*
1427 * reset SPI_CTL and SPI_FLG registers
1428 */
1429 write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER);
1430 write_FLAG(drv_data, 0xFF00);
1401 1431
1402 return 0; 1432 return 0;
1403} 1433}
1404 1434
1405static int bfin_spi_resume(struct platform_device *pdev) 1435static int bfin_spi_resume(struct platform_device *pdev)
1406{ 1436{
1407 struct driver_data *drv_data = platform_get_drvdata(pdev); 1437 struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
1408 int status = 0; 1438 int status = 0;
1409 1439
1410 /* Enable the SPI interface */ 1440 write_CTRL(drv_data, drv_data->ctrl_reg);
1411 bfin_spi_enable(drv_data); 1441 write_FLAG(drv_data, drv_data->flag_reg);
1412 1442
1413 /* Start the queue running */ 1443 /* Start the queue running */
1414 status = bfin_spi_start_queue(drv_data); 1444 status = bfin_spi_start_queue(drv_data);
@@ -1439,7 +1469,7 @@ static int __init bfin_spi_init(void)
1439{ 1469{
1440 return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe); 1470 return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe);
1441} 1471}
1442module_init(bfin_spi_init); 1472subsys_initcall(bfin_spi_init);
1443 1473
1444static void __exit bfin_spi_exit(void) 1474static void __exit bfin_spi_exit(void)
1445{ 1475{
diff --git a/drivers/spi/spi_fsl_espi.c b/drivers/spi/spi_fsl_espi.c
new file mode 100644
index 000000000000..e3b4f6451966
--- /dev/null
+++ b/drivers/spi/spi_fsl_espi.c
@@ -0,0 +1,748 @@
1/*
2 * Freescale eSPI controller driver.
3 *
4 * Copyright 2010 Freescale Semiconductor, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11#include <linux/module.h>
12#include <linux/delay.h>
13#include <linux/irq.h>
14#include <linux/spi/spi.h>
15#include <linux/platform_device.h>
16#include <linux/fsl_devices.h>
17#include <linux/mm.h>
18#include <linux/of.h>
19#include <linux/of_platform.h>
20#include <linux/of_spi.h>
21#include <linux/interrupt.h>
22#include <linux/err.h>
23#include <sysdev/fsl_soc.h>
24
25#include "spi_fsl_lib.h"
26
27/* eSPI Controller registers */
28struct fsl_espi_reg {
29 __be32 mode; /* 0x000 - eSPI mode register */
30 __be32 event; /* 0x004 - eSPI event register */
31 __be32 mask; /* 0x008 - eSPI mask register */
32 __be32 command; /* 0x00c - eSPI command register */
33 __be32 transmit; /* 0x010 - eSPI transmit FIFO access register*/
34 __be32 receive; /* 0x014 - eSPI receive FIFO access register*/
35 u8 res[8]; /* 0x018 - 0x01c reserved */
36 __be32 csmode[4]; /* 0x020 - 0x02c eSPI cs mode register */
37};
38
39struct fsl_espi_transfer {
40 const void *tx_buf;
41 void *rx_buf;
42 unsigned len;
43 unsigned n_tx;
44 unsigned n_rx;
45 unsigned actual_length;
46 int status;
47};
48
49/* eSPI Controller mode register definitions */
50#define SPMODE_ENABLE (1 << 31)
51#define SPMODE_LOOP (1 << 30)
52#define SPMODE_TXTHR(x) ((x) << 8)
53#define SPMODE_RXTHR(x) ((x) << 0)
54
55/* eSPI Controller CS mode register definitions */
56#define CSMODE_CI_INACTIVEHIGH (1 << 31)
57#define CSMODE_CP_BEGIN_EDGECLK (1 << 30)
58#define CSMODE_REV (1 << 29)
59#define CSMODE_DIV16 (1 << 28)
60#define CSMODE_PM(x) ((x) << 24)
61#define CSMODE_POL_1 (1 << 20)
62#define CSMODE_LEN(x) ((x) << 16)
63#define CSMODE_BEF(x) ((x) << 12)
64#define CSMODE_AFT(x) ((x) << 8)
65#define CSMODE_CG(x) ((x) << 3)
66
67/* Default mode/csmode for eSPI controller */
68#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3))
69#define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \
70 | CSMODE_AFT(0) | CSMODE_CG(1))
71
72/* SPIE register values */
73#define SPIE_NE 0x00000200 /* Not empty */
74#define SPIE_NF 0x00000100 /* Not full */
75
76/* SPIM register values */
77#define SPIM_NE 0x00000200 /* Not empty */
78#define SPIM_NF 0x00000100 /* Not full */
79#define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F)
80#define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F)
81
82/* SPCOM register values */
83#define SPCOM_CS(x) ((x) << 30)
84#define SPCOM_TRANLEN(x) ((x) << 0)
85#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */
86
87static void fsl_espi_change_mode(struct spi_device *spi)
88{
89 struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
90 struct spi_mpc8xxx_cs *cs = spi->controller_state;
91 struct fsl_espi_reg *reg_base = mspi->reg_base;
92 __be32 __iomem *mode = &reg_base->csmode[spi->chip_select];
93 __be32 __iomem *espi_mode = &reg_base->mode;
94 u32 tmp;
95 unsigned long flags;
96
97 /* Turn off IRQs locally to minimize time that SPI is disabled. */
98 local_irq_save(flags);
99
100 /* Turn off SPI unit prior changing mode */
101 tmp = mpc8xxx_spi_read_reg(espi_mode);
102 mpc8xxx_spi_write_reg(espi_mode, tmp & ~SPMODE_ENABLE);
103 mpc8xxx_spi_write_reg(mode, cs->hw_mode);
104 mpc8xxx_spi_write_reg(espi_mode, tmp);
105
106 local_irq_restore(flags);
107}
108
109static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi)
110{
111 u32 data;
112 u16 data_h;
113 u16 data_l;
114 const u32 *tx = mpc8xxx_spi->tx;
115
116 if (!tx)
117 return 0;
118
119 data = *tx++ << mpc8xxx_spi->tx_shift;
120 data_l = data & 0xffff;
121 data_h = (data >> 16) & 0xffff;
122 swab16s(&data_l);
123 swab16s(&data_h);
124 data = data_h | data_l;
125
126 mpc8xxx_spi->tx = tx;
127 return data;
128}
129
130static int fsl_espi_setup_transfer(struct spi_device *spi,
131 struct spi_transfer *t)
132{
133 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
134 int bits_per_word = 0;
135 u8 pm;
136 u32 hz = 0;
137 struct spi_mpc8xxx_cs *cs = spi->controller_state;
138
139 if (t) {
140 bits_per_word = t->bits_per_word;
141 hz = t->speed_hz;
142 }
143
144 /* spi_transfer level calls that work per-word */
145 if (!bits_per_word)
146 bits_per_word = spi->bits_per_word;
147
148 /* Make sure its a bit width we support [4..16] */
149 if ((bits_per_word < 4) || (bits_per_word > 16))
150 return -EINVAL;
151
152 if (!hz)
153 hz = spi->max_speed_hz;
154
155 cs->rx_shift = 0;
156 cs->tx_shift = 0;
157 cs->get_rx = mpc8xxx_spi_rx_buf_u32;
158 cs->get_tx = mpc8xxx_spi_tx_buf_u32;
159 if (bits_per_word <= 8) {
160 cs->rx_shift = 8 - bits_per_word;
161 } else if (bits_per_word <= 16) {
162 cs->rx_shift = 16 - bits_per_word;
163 if (spi->mode & SPI_LSB_FIRST)
164 cs->get_tx = fsl_espi_tx_buf_lsb;
165 } else {
166 return -EINVAL;
167 }
168
169 mpc8xxx_spi->rx_shift = cs->rx_shift;
170 mpc8xxx_spi->tx_shift = cs->tx_shift;
171 mpc8xxx_spi->get_rx = cs->get_rx;
172 mpc8xxx_spi->get_tx = cs->get_tx;
173
174 bits_per_word = bits_per_word - 1;
175
176 /* mask out bits we are going to set */
177 cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF));
178
179 cs->hw_mode |= CSMODE_LEN(bits_per_word);
180
181 if ((mpc8xxx_spi->spibrg / hz) > 64) {
182 cs->hw_mode |= CSMODE_DIV16;
183 pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1;
184
185 WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. "
186 "Will use %d Hz instead.\n", dev_name(&spi->dev),
187 hz, mpc8xxx_spi->spibrg / 1024);
188 if (pm > 16)
189 pm = 16;
190 } else {
191 pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1;
192 }
193 if (pm)
194 pm--;
195
196 cs->hw_mode |= CSMODE_PM(pm);
197
198 fsl_espi_change_mode(spi);
199 return 0;
200}
201
202static int fsl_espi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t,
203 unsigned int len)
204{
205 u32 word;
206 struct fsl_espi_reg *reg_base = mspi->reg_base;
207
208 mspi->count = len;
209
210 /* enable rx ints */
211 mpc8xxx_spi_write_reg(&reg_base->mask, SPIM_NE);
212
213 /* transmit word */
214 word = mspi->get_tx(mspi);
215 mpc8xxx_spi_write_reg(&reg_base->transmit, word);
216
217 return 0;
218}
219
220static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
221{
222 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
223 struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base;
224 unsigned int len = t->len;
225 u8 bits_per_word;
226 int ret;
227
228 bits_per_word = spi->bits_per_word;
229 if (t->bits_per_word)
230 bits_per_word = t->bits_per_word;
231
232 mpc8xxx_spi->len = t->len;
233 len = roundup(len, 4) / 4;
234
235 mpc8xxx_spi->tx = t->tx_buf;
236 mpc8xxx_spi->rx = t->rx_buf;
237
238 INIT_COMPLETION(mpc8xxx_spi->done);
239
240 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */
241 if ((t->len - 1) > SPCOM_TRANLEN_MAX) {
242 dev_err(mpc8xxx_spi->dev, "Transaction length (%d)"
243 " beyond the SPCOM[TRANLEN] field\n", t->len);
244 return -EINVAL;
245 }
246 mpc8xxx_spi_write_reg(&reg_base->command,
247 (SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1)));
248
249 ret = fsl_espi_cpu_bufs(mpc8xxx_spi, t, len);
250 if (ret)
251 return ret;
252
253 wait_for_completion(&mpc8xxx_spi->done);
254
255 /* disable rx ints */
256 mpc8xxx_spi_write_reg(&reg_base->mask, 0);
257
258 return mpc8xxx_spi->count;
259}
260
261static void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd)
262{
263 if (cmd[1] && cmd[2] && cmd[3]) {
264 cmd[1] = (u8)(addr >> 16);
265 cmd[2] = (u8)(addr >> 8);
266 cmd[3] = (u8)(addr >> 0);
267 }
268}
269
270static unsigned int fsl_espi_cmd2addr(u8 *cmd)
271{
272 if (cmd[1] && cmd[2] && cmd[3])
273 return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0;
274
275 return 0;
276}
277
278static void fsl_espi_do_trans(struct spi_message *m,
279 struct fsl_espi_transfer *tr)
280{
281 struct spi_device *spi = m->spi;
282 struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
283 struct fsl_espi_transfer *espi_trans = tr;
284 struct spi_message message;
285 struct spi_transfer *t, *first, trans;
286 int status = 0;
287
288 spi_message_init(&message);
289 memset(&trans, 0, sizeof(trans));
290
291 first = list_first_entry(&m->transfers, struct spi_transfer,
292 transfer_list);
293 list_for_each_entry(t, &m->transfers, transfer_list) {
294 if ((first->bits_per_word != t->bits_per_word) ||
295 (first->speed_hz != t->speed_hz)) {
296 espi_trans->status = -EINVAL;
297 dev_err(mspi->dev, "bits_per_word/speed_hz should be"
298 " same for the same SPI transfer\n");
299 return;
300 }
301
302 trans.speed_hz = t->speed_hz;
303 trans.bits_per_word = t->bits_per_word;
304 trans.delay_usecs = max(first->delay_usecs, t->delay_usecs);
305 }
306
307 trans.len = espi_trans->len;
308 trans.tx_buf = espi_trans->tx_buf;
309 trans.rx_buf = espi_trans->rx_buf;
310 spi_message_add_tail(&trans, &message);
311
312 list_for_each_entry(t, &message.transfers, transfer_list) {
313 if (t->bits_per_word || t->speed_hz) {
314 status = -EINVAL;
315
316 status = fsl_espi_setup_transfer(spi, t);
317 if (status < 0)
318 break;
319 }
320
321 if (t->len)
322 status = fsl_espi_bufs(spi, t);
323
324 if (status) {
325 status = -EMSGSIZE;
326 break;
327 }
328
329 if (t->delay_usecs)
330 udelay(t->delay_usecs);
331 }
332
333 espi_trans->status = status;
334 fsl_espi_setup_transfer(spi, NULL);
335}
336
337static void fsl_espi_cmd_trans(struct spi_message *m,
338 struct fsl_espi_transfer *trans, u8 *rx_buff)
339{
340 struct spi_transfer *t;
341 u8 *local_buf;
342 int i = 0;
343 struct fsl_espi_transfer *espi_trans = trans;
344
345 local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL);
346 if (!local_buf) {
347 espi_trans->status = -ENOMEM;
348 return;
349 }
350
351 list_for_each_entry(t, &m->transfers, transfer_list) {
352 if (t->tx_buf) {
353 memcpy(local_buf + i, t->tx_buf, t->len);
354 i += t->len;
355 }
356 }
357
358 espi_trans->tx_buf = local_buf;
359 espi_trans->rx_buf = local_buf + espi_trans->n_tx;
360 fsl_espi_do_trans(m, espi_trans);
361
362 espi_trans->actual_length = espi_trans->len;
363 kfree(local_buf);
364}
365
366static void fsl_espi_rw_trans(struct spi_message *m,
367 struct fsl_espi_transfer *trans, u8 *rx_buff)
368{
369 struct fsl_espi_transfer *espi_trans = trans;
370 unsigned int n_tx = espi_trans->n_tx;
371 unsigned int n_rx = espi_trans->n_rx;
372 struct spi_transfer *t;
373 u8 *local_buf;
374 u8 *rx_buf = rx_buff;
375 unsigned int trans_len;
376 unsigned int addr;
377 int i, pos, loop;
378
379 local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL);
380 if (!local_buf) {
381 espi_trans->status = -ENOMEM;
382 return;
383 }
384
385 for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) {
386 trans_len = n_rx - pos;
387 if (trans_len > SPCOM_TRANLEN_MAX - n_tx)
388 trans_len = SPCOM_TRANLEN_MAX - n_tx;
389
390 i = 0;
391 list_for_each_entry(t, &m->transfers, transfer_list) {
392 if (t->tx_buf) {
393 memcpy(local_buf + i, t->tx_buf, t->len);
394 i += t->len;
395 }
396 }
397
398 addr = fsl_espi_cmd2addr(local_buf);
399 addr += pos;
400 fsl_espi_addr2cmd(addr, local_buf);
401
402 espi_trans->n_tx = n_tx;
403 espi_trans->n_rx = trans_len;
404 espi_trans->len = trans_len + n_tx;
405 espi_trans->tx_buf = local_buf;
406 espi_trans->rx_buf = local_buf + n_tx;
407 fsl_espi_do_trans(m, espi_trans);
408
409 memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len);
410
411 if (loop > 0)
412 espi_trans->actual_length += espi_trans->len - n_tx;
413 else
414 espi_trans->actual_length += espi_trans->len;
415 }
416
417 kfree(local_buf);
418}
419
420static void fsl_espi_do_one_msg(struct spi_message *m)
421{
422 struct spi_transfer *t;
423 u8 *rx_buf = NULL;
424 unsigned int n_tx = 0;
425 unsigned int n_rx = 0;
426 struct fsl_espi_transfer espi_trans;
427
428 list_for_each_entry(t, &m->transfers, transfer_list) {
429 if (t->tx_buf)
430 n_tx += t->len;
431 if (t->rx_buf) {
432 n_rx += t->len;
433 rx_buf = t->rx_buf;
434 }
435 }
436
437 espi_trans.n_tx = n_tx;
438 espi_trans.n_rx = n_rx;
439 espi_trans.len = n_tx + n_rx;
440 espi_trans.actual_length = 0;
441 espi_trans.status = 0;
442
443 if (!rx_buf)
444 fsl_espi_cmd_trans(m, &espi_trans, NULL);
445 else
446 fsl_espi_rw_trans(m, &espi_trans, rx_buf);
447
448 m->actual_length = espi_trans.actual_length;
449 m->status = espi_trans.status;
450 m->complete(m->context);
451}
452
453static int fsl_espi_setup(struct spi_device *spi)
454{
455 struct mpc8xxx_spi *mpc8xxx_spi;
456 struct fsl_espi_reg *reg_base;
457 int retval;
458 u32 hw_mode;
459 u32 loop_mode;
460 struct spi_mpc8xxx_cs *cs = spi->controller_state;
461
462 if (!spi->max_speed_hz)
463 return -EINVAL;
464
465 if (!cs) {
466 cs = kzalloc(sizeof *cs, GFP_KERNEL);
467 if (!cs)
468 return -ENOMEM;
469 spi->controller_state = cs;
470 }
471
472 mpc8xxx_spi = spi_master_get_devdata(spi->master);
473 reg_base = mpc8xxx_spi->reg_base;
474
475 hw_mode = cs->hw_mode; /* Save orginal settings */
476 cs->hw_mode = mpc8xxx_spi_read_reg(
477 &reg_base->csmode[spi->chip_select]);
478 /* mask out bits we are going to set */
479 cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH
480 | CSMODE_REV);
481
482 if (spi->mode & SPI_CPHA)
483 cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK;
484 if (spi->mode & SPI_CPOL)
485 cs->hw_mode |= CSMODE_CI_INACTIVEHIGH;
486 if (!(spi->mode & SPI_LSB_FIRST))
487 cs->hw_mode |= CSMODE_REV;
488
489 /* Handle the loop mode */
490 loop_mode = mpc8xxx_spi_read_reg(&reg_base->mode);
491 loop_mode &= ~SPMODE_LOOP;
492 if (spi->mode & SPI_LOOP)
493 loop_mode |= SPMODE_LOOP;
494 mpc8xxx_spi_write_reg(&reg_base->mode, loop_mode);
495
496 retval = fsl_espi_setup_transfer(spi, NULL);
497 if (retval < 0) {
498 cs->hw_mode = hw_mode; /* Restore settings */
499 return retval;
500 }
501 return 0;
502}
503
504void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
505{
506 struct fsl_espi_reg *reg_base = mspi->reg_base;
507
508 /* We need handle RX first */
509 if (events & SPIE_NE) {
510 u32 rx_data;
511
512 /* Spin until RX is done */
513 while (SPIE_RXCNT(events) < min(4, mspi->len)) {
514 cpu_relax();
515 events = mpc8xxx_spi_read_reg(&reg_base->event);
516 }
517 mspi->len -= 4;
518
519 rx_data = mpc8xxx_spi_read_reg(&reg_base->receive);
520
521 if (mspi->rx)
522 mspi->get_rx(rx_data, mspi);
523 }
524
525 if (!(events & SPIE_NF)) {
526 int ret;
527
528 /* spin until TX is done */
529 ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg(
530 &reg_base->event)) & SPIE_NF) == 0, 1000, 0);
531 if (!ret) {
532 dev_err(mspi->dev, "tired waiting for SPIE_NF\n");
533 return;
534 }
535 }
536
537 /* Clear the events */
538 mpc8xxx_spi_write_reg(&reg_base->event, events);
539
540 mspi->count -= 1;
541 if (mspi->count) {
542 u32 word = mspi->get_tx(mspi);
543
544 mpc8xxx_spi_write_reg(&reg_base->transmit, word);
545 } else {
546 complete(&mspi->done);
547 }
548}
549
550static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
551{
552 struct mpc8xxx_spi *mspi = context_data;
553 struct fsl_espi_reg *reg_base = mspi->reg_base;
554 irqreturn_t ret = IRQ_NONE;
555 u32 events;
556
557 /* Get interrupt events(tx/rx) */
558 events = mpc8xxx_spi_read_reg(&reg_base->event);
559 if (events)
560 ret = IRQ_HANDLED;
561
562 dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events);
563
564 fsl_espi_cpu_irq(mspi, events);
565
566 return ret;
567}
568
569static void fsl_espi_remove(struct mpc8xxx_spi *mspi)
570{
571 iounmap(mspi->reg_base);
572}
573
574static struct spi_master * __devinit fsl_espi_probe(struct device *dev,
575 struct resource *mem, unsigned int irq)
576{
577 struct fsl_spi_platform_data *pdata = dev->platform_data;
578 struct spi_master *master;
579 struct mpc8xxx_spi *mpc8xxx_spi;
580 struct fsl_espi_reg *reg_base;
581 u32 regval;
582 int i, ret = 0;
583
584 master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
585 if (!master) {
586 ret = -ENOMEM;
587 goto err;
588 }
589
590 dev_set_drvdata(dev, master);
591
592 ret = mpc8xxx_spi_probe(dev, mem, irq);
593 if (ret)
594 goto err_probe;
595
596 master->setup = fsl_espi_setup;
597
598 mpc8xxx_spi = spi_master_get_devdata(master);
599 mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg;
600 mpc8xxx_spi->spi_remove = fsl_espi_remove;
601
602 mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
603 if (!mpc8xxx_spi->reg_base) {
604 ret = -ENOMEM;
605 goto err_probe;
606 }
607
608 reg_base = mpc8xxx_spi->reg_base;
609
610 /* Register for SPI Interrupt */
611 ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq,
612 0, "fsl_espi", mpc8xxx_spi);
613 if (ret)
614 goto free_irq;
615
616 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
617 mpc8xxx_spi->rx_shift = 16;
618 mpc8xxx_spi->tx_shift = 24;
619 }
620
621 /* SPI controller initializations */
622 mpc8xxx_spi_write_reg(&reg_base->mode, 0);
623 mpc8xxx_spi_write_reg(&reg_base->mask, 0);
624 mpc8xxx_spi_write_reg(&reg_base->command, 0);
625 mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
626
627 /* Init eSPI CS mode register */
628 for (i = 0; i < pdata->max_chipselect; i++)
629 mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL);
630
631 /* Enable SPI interface */
632 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
633
634 mpc8xxx_spi_write_reg(&reg_base->mode, regval);
635
636 ret = spi_register_master(master);
637 if (ret < 0)
638 goto unreg_master;
639
640 dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq);
641
642 return master;
643
644unreg_master:
645 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
646free_irq:
647 iounmap(mpc8xxx_spi->reg_base);
648err_probe:
649 spi_master_put(master);
650err:
651 return ERR_PTR(ret);
652}
653
654static int of_fsl_espi_get_chipselects(struct device *dev)
655{
656 struct device_node *np = dev->of_node;
657 struct fsl_spi_platform_data *pdata = dev->platform_data;
658 const u32 *prop;
659 int len;
660
661 prop = of_get_property(np, "fsl,espi-num-chipselects", &len);
662 if (!prop || len < sizeof(*prop)) {
663 dev_err(dev, "No 'fsl,espi-num-chipselects' property\n");
664 return -EINVAL;
665 }
666
667 pdata->max_chipselect = *prop;
668 pdata->cs_control = NULL;
669
670 return 0;
671}
672
673static int __devinit of_fsl_espi_probe(struct platform_device *ofdev,
674 const struct of_device_id *ofid)
675{
676 struct device *dev = &ofdev->dev;
677 struct device_node *np = ofdev->dev.of_node;
678 struct spi_master *master;
679 struct resource mem;
680 struct resource irq;
681 int ret = -ENOMEM;
682
683 ret = of_mpc8xxx_spi_probe(ofdev, ofid);
684 if (ret)
685 return ret;
686
687 ret = of_fsl_espi_get_chipselects(dev);
688 if (ret)
689 goto err;
690
691 ret = of_address_to_resource(np, 0, &mem);
692 if (ret)
693 goto err;
694
695 ret = of_irq_to_resource(np, 0, &irq);
696 if (!ret) {
697 ret = -EINVAL;
698 goto err;
699 }
700
701 master = fsl_espi_probe(dev, &mem, irq.start);
702 if (IS_ERR(master)) {
703 ret = PTR_ERR(master);
704 goto err;
705 }
706
707 return 0;
708
709err:
710 return ret;
711}
712
713static int __devexit of_fsl_espi_remove(struct platform_device *dev)
714{
715 return mpc8xxx_spi_remove(&dev->dev);
716}
717
718static const struct of_device_id of_fsl_espi_match[] = {
719 { .compatible = "fsl,mpc8536-espi" },
720 {}
721};
722MODULE_DEVICE_TABLE(of, of_fsl_espi_match);
723
724static struct of_platform_driver fsl_espi_driver = {
725 .driver = {
726 .name = "fsl_espi",
727 .owner = THIS_MODULE,
728 .of_match_table = of_fsl_espi_match,
729 },
730 .probe = of_fsl_espi_probe,
731 .remove = __devexit_p(of_fsl_espi_remove),
732};
733
734static int __init fsl_espi_init(void)
735{
736 return of_register_platform_driver(&fsl_espi_driver);
737}
738module_init(fsl_espi_init);
739
740static void __exit fsl_espi_exit(void)
741{
742 of_unregister_platform_driver(&fsl_espi_driver);
743}
744module_exit(fsl_espi_exit);
745
746MODULE_AUTHOR("Mingkai Hu");
747MODULE_DESCRIPTION("Enhanced Freescale SPI Driver");
748MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_fsl_lib.c b/drivers/spi/spi_fsl_lib.c
new file mode 100644
index 000000000000..5cd741fdb5c3
--- /dev/null
+++ b/drivers/spi/spi_fsl_lib.c
@@ -0,0 +1,237 @@
1/*
2 * Freescale SPI/eSPI controller driver library.
3 *
4 * Maintainer: Kumar Gala
5 *
6 * Copyright (C) 2006 Polycom, Inc.
7 *
8 * CPM SPI and QE buffer descriptors mode support:
9 * Copyright (c) 2009 MontaVista Software, Inc.
10 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
11 *
12 * Copyright 2010 Freescale Semiconductor, Inc.
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 of the License, or (at your
17 * option) any later version.
18 */
19#include <linux/kernel.h>
20#include <linux/interrupt.h>
21#include <linux/fsl_devices.h>
22#include <linux/dma-mapping.h>
23#include <linux/mm.h>
24#include <linux/of_platform.h>
25#include <linux/of_spi.h>
26#include <sysdev/fsl_soc.h>
27
28#include "spi_fsl_lib.h"
29
30#define MPC8XXX_SPI_RX_BUF(type) \
31void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
32{ \
33 type *rx = mpc8xxx_spi->rx; \
34 *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \
35 mpc8xxx_spi->rx = rx; \
36}
37
38#define MPC8XXX_SPI_TX_BUF(type) \
39u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
40{ \
41 u32 data; \
42 const type *tx = mpc8xxx_spi->tx; \
43 if (!tx) \
44 return 0; \
45 data = *tx++ << mpc8xxx_spi->tx_shift; \
46 mpc8xxx_spi->tx = tx; \
47 return data; \
48}
49
50MPC8XXX_SPI_RX_BUF(u8)
51MPC8XXX_SPI_RX_BUF(u16)
52MPC8XXX_SPI_RX_BUF(u32)
53MPC8XXX_SPI_TX_BUF(u8)
54MPC8XXX_SPI_TX_BUF(u16)
55MPC8XXX_SPI_TX_BUF(u32)
56
57struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata)
58{
59 return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
60}
61
62void mpc8xxx_spi_work(struct work_struct *work)
63{
64 struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi,
65 work);
66
67 spin_lock_irq(&mpc8xxx_spi->lock);
68 while (!list_empty(&mpc8xxx_spi->queue)) {
69 struct spi_message *m = container_of(mpc8xxx_spi->queue.next,
70 struct spi_message, queue);
71
72 list_del_init(&m->queue);
73 spin_unlock_irq(&mpc8xxx_spi->lock);
74
75 if (mpc8xxx_spi->spi_do_one_msg)
76 mpc8xxx_spi->spi_do_one_msg(m);
77
78 spin_lock_irq(&mpc8xxx_spi->lock);
79 }
80 spin_unlock_irq(&mpc8xxx_spi->lock);
81}
82
83int mpc8xxx_spi_transfer(struct spi_device *spi,
84 struct spi_message *m)
85{
86 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
87 unsigned long flags;
88
89 m->actual_length = 0;
90 m->status = -EINPROGRESS;
91
92 spin_lock_irqsave(&mpc8xxx_spi->lock, flags);
93 list_add_tail(&m->queue, &mpc8xxx_spi->queue);
94 queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work);
95 spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags);
96
97 return 0;
98}
99
100void mpc8xxx_spi_cleanup(struct spi_device *spi)
101{
102 kfree(spi->controller_state);
103}
104
105const char *mpc8xxx_spi_strmode(unsigned int flags)
106{
107 if (flags & SPI_QE_CPU_MODE) {
108 return "QE CPU";
109 } else if (flags & SPI_CPM_MODE) {
110 if (flags & SPI_QE)
111 return "QE";
112 else if (flags & SPI_CPM2)
113 return "CPM2";
114 else
115 return "CPM1";
116 }
117 return "CPU";
118}
119
120int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
121 unsigned int irq)
122{
123 struct fsl_spi_platform_data *pdata = dev->platform_data;
124 struct spi_master *master;
125 struct mpc8xxx_spi *mpc8xxx_spi;
126 int ret = 0;
127
128 master = dev_get_drvdata(dev);
129
130 /* the spi->mode bits understood by this driver: */
131 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
132 | SPI_LSB_FIRST | SPI_LOOP;
133
134 master->transfer = mpc8xxx_spi_transfer;
135 master->cleanup = mpc8xxx_spi_cleanup;
136 master->dev.of_node = dev->of_node;
137
138 mpc8xxx_spi = spi_master_get_devdata(master);
139 mpc8xxx_spi->dev = dev;
140 mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8;
141 mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8;
142 mpc8xxx_spi->flags = pdata->flags;
143 mpc8xxx_spi->spibrg = pdata->sysclk;
144 mpc8xxx_spi->irq = irq;
145
146 mpc8xxx_spi->rx_shift = 0;
147 mpc8xxx_spi->tx_shift = 0;
148
149 init_completion(&mpc8xxx_spi->done);
150
151 master->bus_num = pdata->bus_num;
152 master->num_chipselect = pdata->max_chipselect;
153
154 spin_lock_init(&mpc8xxx_spi->lock);
155 init_completion(&mpc8xxx_spi->done);
156 INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work);
157 INIT_LIST_HEAD(&mpc8xxx_spi->queue);
158
159 mpc8xxx_spi->workqueue = create_singlethread_workqueue(
160 dev_name(master->dev.parent));
161 if (mpc8xxx_spi->workqueue == NULL) {
162 ret = -EBUSY;
163 goto err;
164 }
165
166 return 0;
167
168err:
169 return ret;
170}
171
172int __devexit mpc8xxx_spi_remove(struct device *dev)
173{
174 struct mpc8xxx_spi *mpc8xxx_spi;
175 struct spi_master *master;
176
177 master = dev_get_drvdata(dev);
178 mpc8xxx_spi = spi_master_get_devdata(master);
179
180 flush_workqueue(mpc8xxx_spi->workqueue);
181 destroy_workqueue(mpc8xxx_spi->workqueue);
182 spi_unregister_master(master);
183
184 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
185
186 if (mpc8xxx_spi->spi_remove)
187 mpc8xxx_spi->spi_remove(mpc8xxx_spi);
188
189 return 0;
190}
191
192int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev,
193 const struct of_device_id *ofid)
194{
195 struct device *dev = &ofdev->dev;
196 struct device_node *np = ofdev->dev.of_node;
197 struct mpc8xxx_spi_probe_info *pinfo;
198 struct fsl_spi_platform_data *pdata;
199 const void *prop;
200 int ret = -ENOMEM;
201
202 pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL);
203 if (!pinfo)
204 return -ENOMEM;
205
206 pdata = &pinfo->pdata;
207 dev->platform_data = pdata;
208
209 /* Allocate bus num dynamically. */
210 pdata->bus_num = -1;
211
212 /* SPI controller is either clocked from QE or SoC clock. */
213 pdata->sysclk = get_brgfreq();
214 if (pdata->sysclk == -1) {
215 pdata->sysclk = fsl_get_sys_freq();
216 if (pdata->sysclk == -1) {
217 ret = -ENODEV;
218 goto err;
219 }
220 }
221
222 prop = of_get_property(np, "mode", NULL);
223 if (prop && !strcmp(prop, "cpu-qe"))
224 pdata->flags = SPI_QE_CPU_MODE;
225 else if (prop && !strcmp(prop, "qe"))
226 pdata->flags = SPI_CPM_MODE | SPI_QE;
227 else if (of_device_is_compatible(np, "fsl,cpm2-spi"))
228 pdata->flags = SPI_CPM_MODE | SPI_CPM2;
229 else if (of_device_is_compatible(np, "fsl,cpm1-spi"))
230 pdata->flags = SPI_CPM_MODE | SPI_CPM1;
231
232 return 0;
233
234err:
235 kfree(pinfo);
236 return ret;
237}
diff --git a/drivers/spi/spi_fsl_lib.h b/drivers/spi/spi_fsl_lib.h
new file mode 100644
index 000000000000..281e060977cd
--- /dev/null
+++ b/drivers/spi/spi_fsl_lib.h
@@ -0,0 +1,124 @@
1/*
2 * Freescale SPI/eSPI controller driver library.
3 *
4 * Maintainer: Kumar Gala
5 *
6 * Copyright 2010 Freescale Semiconductor, Inc.
7 * Copyright (C) 2006 Polycom, Inc.
8 *
9 * CPM SPI and QE buffer descriptors mode support:
10 * Copyright (c) 2009 MontaVista Software, Inc.
11 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18#ifndef __SPI_FSL_LIB_H__
19#define __SPI_FSL_LIB_H__
20
21#include <asm/io.h>
22
23/* SPI/eSPI Controller driver's private data. */
24struct mpc8xxx_spi {
25 struct device *dev;
26 void *reg_base;
27
28 /* rx & tx bufs from the spi_transfer */
29 const void *tx;
30 void *rx;
31#ifdef CONFIG_SPI_FSL_ESPI
32 int len;
33#endif
34
35 int subblock;
36 struct spi_pram __iomem *pram;
37 struct cpm_buf_desc __iomem *tx_bd;
38 struct cpm_buf_desc __iomem *rx_bd;
39
40 struct spi_transfer *xfer_in_progress;
41
42 /* dma addresses for CPM transfers */
43 dma_addr_t tx_dma;
44 dma_addr_t rx_dma;
45 bool map_tx_dma;
46 bool map_rx_dma;
47
48 dma_addr_t dma_dummy_tx;
49 dma_addr_t dma_dummy_rx;
50
51 /* functions to deal with different sized buffers */
52 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
53 u32(*get_tx) (struct mpc8xxx_spi *);
54
55 /* hooks for different controller driver */
56 void (*spi_do_one_msg) (struct spi_message *m);
57 void (*spi_remove) (struct mpc8xxx_spi *mspi);
58
59 unsigned int count;
60 unsigned int irq;
61
62 unsigned nsecs; /* (clock cycle time)/2 */
63
64 u32 spibrg; /* SPIBRG input clock */
65 u32 rx_shift; /* RX data reg shift when in qe mode */
66 u32 tx_shift; /* TX data reg shift when in qe mode */
67
68 unsigned int flags;
69
70 struct workqueue_struct *workqueue;
71 struct work_struct work;
72
73 struct list_head queue;
74 spinlock_t lock;
75
76 struct completion done;
77};
78
79struct spi_mpc8xxx_cs {
80 /* functions to deal with different sized buffers */
81 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
82 u32 (*get_tx) (struct mpc8xxx_spi *);
83 u32 rx_shift; /* RX data reg shift when in qe mode */
84 u32 tx_shift; /* TX data reg shift when in qe mode */
85 u32 hw_mode; /* Holds HW mode register settings */
86};
87
88static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val)
89{
90 out_be32(reg, val);
91}
92
93static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
94{
95 return in_be32(reg);
96}
97
98struct mpc8xxx_spi_probe_info {
99 struct fsl_spi_platform_data pdata;
100 int *gpios;
101 bool *alow_flags;
102};
103
104extern u32 mpc8xxx_spi_tx_buf_u8(struct mpc8xxx_spi *mpc8xxx_spi);
105extern u32 mpc8xxx_spi_tx_buf_u16(struct mpc8xxx_spi *mpc8xxx_spi);
106extern u32 mpc8xxx_spi_tx_buf_u32(struct mpc8xxx_spi *mpc8xxx_spi);
107extern void mpc8xxx_spi_rx_buf_u8(u32 data, struct mpc8xxx_spi *mpc8xxx_spi);
108extern void mpc8xxx_spi_rx_buf_u16(u32 data, struct mpc8xxx_spi *mpc8xxx_spi);
109extern void mpc8xxx_spi_rx_buf_u32(u32 data, struct mpc8xxx_spi *mpc8xxx_spi);
110
111extern struct mpc8xxx_spi_probe_info *to_of_pinfo(
112 struct fsl_spi_platform_data *pdata);
113extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi,
114 struct spi_transfer *t, unsigned int len);
115extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m);
116extern void mpc8xxx_spi_cleanup(struct spi_device *spi);
117extern const char *mpc8xxx_spi_strmode(unsigned int flags);
118extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
119 unsigned int irq);
120extern int mpc8xxx_spi_remove(struct device *dev);
121extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev,
122 const struct of_device_id *ofid);
123
124#endif /* __SPI_FSL_LIB_H__ */
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_fsl_spi.c
index 1dd86b835cd8..7ca52d3ae8f8 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_fsl_spi.c
@@ -1,9 +1,10 @@
1/* 1/*
2 * MPC8xxx SPI controller driver. 2 * Freescale SPI controller driver.
3 * 3 *
4 * Maintainer: Kumar Gala 4 * Maintainer: Kumar Gala
5 * 5 *
6 * Copyright (C) 2006 Polycom, Inc. 6 * Copyright (C) 2006 Polycom, Inc.
7 * Copyright 2010 Freescale Semiconductor, Inc.
7 * 8 *
8 * CPM SPI and QE buffer descriptors mode support: 9 * CPM SPI and QE buffer descriptors mode support:
9 * Copyright (c) 2009 MontaVista Software, Inc. 10 * Copyright (c) 2009 MontaVista Software, Inc.
@@ -15,18 +16,11 @@
15 * option) any later version. 16 * option) any later version.
16 */ 17 */
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/bug.h>
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/io.h>
25#include <linux/completion.h>
26#include <linux/interrupt.h> 21#include <linux/interrupt.h>
27#include <linux/delay.h> 22#include <linux/delay.h>
28#include <linux/irq.h> 23#include <linux/irq.h>
29#include <linux/device.h>
30#include <linux/spi/spi.h> 24#include <linux/spi/spi.h>
31#include <linux/spi/spi_bitbang.h> 25#include <linux/spi/spi_bitbang.h>
32#include <linux/platform_device.h> 26#include <linux/platform_device.h>
@@ -38,12 +32,12 @@
38#include <linux/of_platform.h> 32#include <linux/of_platform.h>
39#include <linux/gpio.h> 33#include <linux/gpio.h>
40#include <linux/of_gpio.h> 34#include <linux/of_gpio.h>
41#include <linux/slab.h>
42 35
43#include <sysdev/fsl_soc.h> 36#include <sysdev/fsl_soc.h>
44#include <asm/cpm.h> 37#include <asm/cpm.h>
45#include <asm/qe.h> 38#include <asm/qe.h>
46#include <asm/irq.h> 39
40#include "spi_fsl_lib.h"
47 41
48/* CPM1 and CPM2 are mutually exclusive. */ 42/* CPM1 and CPM2 are mutually exclusive. */
49#ifdef CONFIG_CPM1 43#ifdef CONFIG_CPM1
@@ -55,7 +49,7 @@
55#endif 49#endif
56 50
57/* SPI Controller registers */ 51/* SPI Controller registers */
58struct mpc8xxx_spi_reg { 52struct fsl_spi_reg {
59 u8 res1[0x20]; 53 u8 res1[0x20];
60 __be32 mode; 54 __be32 mode;
61 __be32 event; 55 __be32 event;
@@ -80,7 +74,7 @@ struct mpc8xxx_spi_reg {
80 74
81/* 75/*
82 * Default for SPI Mode: 76 * Default for SPI Mode:
83 * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk 77 * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk
84 */ 78 */
85#define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \ 79#define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \
86 SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf)) 80 SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf))
@@ -102,112 +96,16 @@ struct mpc8xxx_spi_reg {
102#define SPI_PRAM_SIZE 0x100 96#define SPI_PRAM_SIZE 0x100
103#define SPI_MRBLR ((unsigned int)PAGE_SIZE) 97#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
104 98
105/* SPI Controller driver's private data. */ 99static void *fsl_dummy_rx;
106struct mpc8xxx_spi { 100static DEFINE_MUTEX(fsl_dummy_rx_lock);
107 struct device *dev; 101static int fsl_dummy_rx_refcnt;
108 struct mpc8xxx_spi_reg __iomem *base;
109
110 /* rx & tx bufs from the spi_transfer */
111 const void *tx;
112 void *rx;
113
114 int subblock;
115 struct spi_pram __iomem *pram;
116 struct cpm_buf_desc __iomem *tx_bd;
117 struct cpm_buf_desc __iomem *rx_bd;
118
119 struct spi_transfer *xfer_in_progress;
120
121 /* dma addresses for CPM transfers */
122 dma_addr_t tx_dma;
123 dma_addr_t rx_dma;
124 bool map_tx_dma;
125 bool map_rx_dma;
126
127 dma_addr_t dma_dummy_tx;
128 dma_addr_t dma_dummy_rx;
129
130 /* functions to deal with different sized buffers */
131 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
132 u32(*get_tx) (struct mpc8xxx_spi *);
133
134 unsigned int count;
135 unsigned int irq;
136
137 unsigned nsecs; /* (clock cycle time)/2 */
138
139 u32 spibrg; /* SPIBRG input clock */
140 u32 rx_shift; /* RX data reg shift when in qe mode */
141 u32 tx_shift; /* TX data reg shift when in qe mode */
142
143 unsigned int flags;
144
145 struct workqueue_struct *workqueue;
146 struct work_struct work;
147
148 struct list_head queue;
149 spinlock_t lock;
150
151 struct completion done;
152};
153
154static void *mpc8xxx_dummy_rx;
155static DEFINE_MUTEX(mpc8xxx_dummy_rx_lock);
156static int mpc8xxx_dummy_rx_refcnt;
157
158struct spi_mpc8xxx_cs {
159 /* functions to deal with different sized buffers */
160 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
161 u32 (*get_tx) (struct mpc8xxx_spi *);
162 u32 rx_shift; /* RX data reg shift when in qe mode */
163 u32 tx_shift; /* TX data reg shift when in qe mode */
164 u32 hw_mode; /* Holds HW mode register settings */
165};
166
167static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val)
168{
169 out_be32(reg, val);
170}
171
172static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
173{
174 return in_be32(reg);
175}
176
177#define MPC83XX_SPI_RX_BUF(type) \
178static \
179void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
180{ \
181 type *rx = mpc8xxx_spi->rx; \
182 *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \
183 mpc8xxx_spi->rx = rx; \
184}
185
186#define MPC83XX_SPI_TX_BUF(type) \
187static \
188u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
189{ \
190 u32 data; \
191 const type *tx = mpc8xxx_spi->tx; \
192 if (!tx) \
193 return 0; \
194 data = *tx++ << mpc8xxx_spi->tx_shift; \
195 mpc8xxx_spi->tx = tx; \
196 return data; \
197}
198 102
199MPC83XX_SPI_RX_BUF(u8) 103static void fsl_spi_change_mode(struct spi_device *spi)
200MPC83XX_SPI_RX_BUF(u16)
201MPC83XX_SPI_RX_BUF(u32)
202MPC83XX_SPI_TX_BUF(u8)
203MPC83XX_SPI_TX_BUF(u16)
204MPC83XX_SPI_TX_BUF(u32)
205
206static void mpc8xxx_spi_change_mode(struct spi_device *spi)
207{ 104{
208 struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); 105 struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
209 struct spi_mpc8xxx_cs *cs = spi->controller_state; 106 struct spi_mpc8xxx_cs *cs = spi->controller_state;
210 __be32 __iomem *mode = &mspi->base->mode; 107 struct fsl_spi_reg *reg_base = mspi->reg_base;
108 __be32 __iomem *mode = &reg_base->mode;
211 unsigned long flags; 109 unsigned long flags;
212 110
213 if (cs->hw_mode == mpc8xxx_spi_read_reg(mode)) 111 if (cs->hw_mode == mpc8xxx_spi_read_reg(mode))
@@ -238,7 +136,7 @@ static void mpc8xxx_spi_change_mode(struct spi_device *spi)
238 local_irq_restore(flags); 136 local_irq_restore(flags);
239} 137}
240 138
241static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value) 139static void fsl_spi_chipselect(struct spi_device *spi, int value)
242{ 140{
243 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); 141 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
244 struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data; 142 struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data;
@@ -256,18 +154,17 @@ static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
256 mpc8xxx_spi->get_rx = cs->get_rx; 154 mpc8xxx_spi->get_rx = cs->get_rx;
257 mpc8xxx_spi->get_tx = cs->get_tx; 155 mpc8xxx_spi->get_tx = cs->get_tx;
258 156
259 mpc8xxx_spi_change_mode(spi); 157 fsl_spi_change_mode(spi);
260 158
261 if (pdata->cs_control) 159 if (pdata->cs_control)
262 pdata->cs_control(spi, pol); 160 pdata->cs_control(spi, pol);
263 } 161 }
264} 162}
265 163
266static int 164static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
267mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, 165 struct spi_device *spi,
268 struct spi_device *spi, 166 struct mpc8xxx_spi *mpc8xxx_spi,
269 struct mpc8xxx_spi *mpc8xxx_spi, 167 int bits_per_word)
270 int bits_per_word)
271{ 168{
272 cs->rx_shift = 0; 169 cs->rx_shift = 0;
273 cs->tx_shift = 0; 170 cs->tx_shift = 0;
@@ -307,10 +204,9 @@ mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
307 return bits_per_word; 204 return bits_per_word;
308} 205}
309 206
310static int 207static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
311mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, 208 struct spi_device *spi,
312 struct spi_device *spi, 209 int bits_per_word)
313 int bits_per_word)
314{ 210{
315 /* QE uses Little Endian for words > 8 211 /* QE uses Little Endian for words > 8
316 * so transform all words > 8 into 8 bits 212 * so transform all words > 8 into 8 bits
@@ -326,13 +222,13 @@ mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
326 return bits_per_word; 222 return bits_per_word;
327} 223}
328 224
329static 225static int fsl_spi_setup_transfer(struct spi_device *spi,
330int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) 226 struct spi_transfer *t)
331{ 227{
332 struct mpc8xxx_spi *mpc8xxx_spi; 228 struct mpc8xxx_spi *mpc8xxx_spi;
333 int bits_per_word; 229 int bits_per_word = 0;
334 u8 pm; 230 u8 pm;
335 u32 hz; 231 u32 hz = 0;
336 struct spi_mpc8xxx_cs *cs = spi->controller_state; 232 struct spi_mpc8xxx_cs *cs = spi->controller_state;
337 233
338 mpc8xxx_spi = spi_master_get_devdata(spi->master); 234 mpc8xxx_spi = spi_master_get_devdata(spi->master);
@@ -340,9 +236,6 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
340 if (t) { 236 if (t) {
341 bits_per_word = t->bits_per_word; 237 bits_per_word = t->bits_per_word;
342 hz = t->speed_hz; 238 hz = t->speed_hz;
343 } else {
344 bits_per_word = 0;
345 hz = 0;
346 } 239 }
347 240
348 /* spi_transfer level calls that work per-word */ 241 /* spi_transfer level calls that work per-word */
@@ -388,23 +281,25 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
388 hz, mpc8xxx_spi->spibrg / 1024); 281 hz, mpc8xxx_spi->spibrg / 1024);
389 if (pm > 16) 282 if (pm > 16)
390 pm = 16; 283 pm = 16;
391 } else 284 } else {
392 pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; 285 pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1;
286 }
393 if (pm) 287 if (pm)
394 pm--; 288 pm--;
395 289
396 cs->hw_mode |= SPMODE_PM(pm); 290 cs->hw_mode |= SPMODE_PM(pm);
397 291
398 mpc8xxx_spi_change_mode(spi); 292 fsl_spi_change_mode(spi);
399 return 0; 293 return 0;
400} 294}
401 295
402static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) 296static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
403{ 297{
404 struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd; 298 struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
405 struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd; 299 struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
406 unsigned int xfer_len = min(mspi->count, SPI_MRBLR); 300 unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
407 unsigned int xfer_ofs; 301 unsigned int xfer_ofs;
302 struct fsl_spi_reg *reg_base = mspi->reg_base;
408 303
409 xfer_ofs = mspi->xfer_in_progress->len - mspi->count; 304 xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
410 305
@@ -424,13 +319,14 @@ static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
424 BD_SC_LAST); 319 BD_SC_LAST);
425 320
426 /* start transfer */ 321 /* start transfer */
427 mpc8xxx_spi_write_reg(&mspi->base->command, SPCOM_STR); 322 mpc8xxx_spi_write_reg(&reg_base->command, SPCOM_STR);
428} 323}
429 324
430static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi, 325static int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
431 struct spi_transfer *t, bool is_dma_mapped) 326 struct spi_transfer *t, bool is_dma_mapped)
432{ 327{
433 struct device *dev = mspi->dev; 328 struct device *dev = mspi->dev;
329 struct fsl_spi_reg *reg_base = mspi->reg_base;
434 330
435 if (is_dma_mapped) { 331 if (is_dma_mapped) {
436 mspi->map_tx_dma = 0; 332 mspi->map_tx_dma = 0;
@@ -475,13 +371,13 @@ static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
475 } 371 }
476 372
477 /* enable rx ints */ 373 /* enable rx ints */
478 mpc8xxx_spi_write_reg(&mspi->base->mask, SPIE_RXB); 374 mpc8xxx_spi_write_reg(&reg_base->mask, SPIE_RXB);
479 375
480 mspi->xfer_in_progress = t; 376 mspi->xfer_in_progress = t;
481 mspi->count = t->len; 377 mspi->count = t->len;
482 378
483 /* start CPM transfers */ 379 /* start CPM transfers */
484 mpc8xxx_spi_cpm_bufs_start(mspi); 380 fsl_spi_cpm_bufs_start(mspi);
485 381
486 return 0; 382 return 0;
487 383
@@ -491,7 +387,7 @@ err_rx_dma:
491 return -ENOMEM; 387 return -ENOMEM;
492} 388}
493 389
494static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) 390static void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
495{ 391{
496 struct device *dev = mspi->dev; 392 struct device *dev = mspi->dev;
497 struct spi_transfer *t = mspi->xfer_in_progress; 393 struct spi_transfer *t = mspi->xfer_in_progress;
@@ -503,31 +399,34 @@ static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
503 mspi->xfer_in_progress = NULL; 399 mspi->xfer_in_progress = NULL;
504} 400}
505 401
506static int mpc8xxx_spi_cpu_bufs(struct mpc8xxx_spi *mspi, 402static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
507 struct spi_transfer *t, unsigned int len) 403 struct spi_transfer *t, unsigned int len)
508{ 404{
509 u32 word; 405 u32 word;
406 struct fsl_spi_reg *reg_base = mspi->reg_base;
510 407
511 mspi->count = len; 408 mspi->count = len;
512 409
513 /* enable rx ints */ 410 /* enable rx ints */
514 mpc8xxx_spi_write_reg(&mspi->base->mask, SPIM_NE); 411 mpc8xxx_spi_write_reg(&reg_base->mask, SPIM_NE);
515 412
516 /* transmit word */ 413 /* transmit word */
517 word = mspi->get_tx(mspi); 414 word = mspi->get_tx(mspi);
518 mpc8xxx_spi_write_reg(&mspi->base->transmit, word); 415 mpc8xxx_spi_write_reg(&reg_base->transmit, word);
519 416
520 return 0; 417 return 0;
521} 418}
522 419
523static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t, 420static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
524 bool is_dma_mapped) 421 bool is_dma_mapped)
525{ 422{
526 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); 423 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
424 struct fsl_spi_reg *reg_base;
527 unsigned int len = t->len; 425 unsigned int len = t->len;
528 u8 bits_per_word; 426 u8 bits_per_word;
529 int ret; 427 int ret;
530 428
429 reg_base = mpc8xxx_spi->reg_base;
531 bits_per_word = spi->bits_per_word; 430 bits_per_word = spi->bits_per_word;
532 if (t->bits_per_word) 431 if (t->bits_per_word)
533 bits_per_word = t->bits_per_word; 432 bits_per_word = t->bits_per_word;
@@ -551,24 +450,24 @@ static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
551 INIT_COMPLETION(mpc8xxx_spi->done); 450 INIT_COMPLETION(mpc8xxx_spi->done);
552 451
553 if (mpc8xxx_spi->flags & SPI_CPM_MODE) 452 if (mpc8xxx_spi->flags & SPI_CPM_MODE)
554 ret = mpc8xxx_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped); 453 ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
555 else 454 else
556 ret = mpc8xxx_spi_cpu_bufs(mpc8xxx_spi, t, len); 455 ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len);
557 if (ret) 456 if (ret)
558 return ret; 457 return ret;
559 458
560 wait_for_completion(&mpc8xxx_spi->done); 459 wait_for_completion(&mpc8xxx_spi->done);
561 460
562 /* disable rx ints */ 461 /* disable rx ints */
563 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0); 462 mpc8xxx_spi_write_reg(&reg_base->mask, 0);
564 463
565 if (mpc8xxx_spi->flags & SPI_CPM_MODE) 464 if (mpc8xxx_spi->flags & SPI_CPM_MODE)
566 mpc8xxx_spi_cpm_bufs_complete(mpc8xxx_spi); 465 fsl_spi_cpm_bufs_complete(mpc8xxx_spi);
567 466
568 return mpc8xxx_spi->count; 467 return mpc8xxx_spi->count;
569} 468}
570 469
571static void mpc8xxx_spi_do_one_msg(struct spi_message *m) 470static void fsl_spi_do_one_msg(struct spi_message *m)
572{ 471{
573 struct spi_device *spi = m->spi; 472 struct spi_device *spi = m->spi;
574 struct spi_transfer *t; 473 struct spi_transfer *t;
@@ -584,18 +483,18 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m)
584 status = -EINVAL; 483 status = -EINVAL;
585 484
586 if (cs_change) 485 if (cs_change)
587 status = mpc8xxx_spi_setup_transfer(spi, t); 486 status = fsl_spi_setup_transfer(spi, t);
588 if (status < 0) 487 if (status < 0)
589 break; 488 break;
590 } 489 }
591 490
592 if (cs_change) { 491 if (cs_change) {
593 mpc8xxx_spi_chipselect(spi, BITBANG_CS_ACTIVE); 492 fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE);
594 ndelay(nsecs); 493 ndelay(nsecs);
595 } 494 }
596 cs_change = t->cs_change; 495 cs_change = t->cs_change;
597 if (t->len) 496 if (t->len)
598 status = mpc8xxx_spi_bufs(spi, t, m->is_dma_mapped); 497 status = fsl_spi_bufs(spi, t, m->is_dma_mapped);
599 if (status) { 498 if (status) {
600 status = -EMSGSIZE; 499 status = -EMSGSIZE;
601 break; 500 break;
@@ -607,7 +506,7 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m)
607 506
608 if (cs_change) { 507 if (cs_change) {
609 ndelay(nsecs); 508 ndelay(nsecs);
610 mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE); 509 fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
611 ndelay(nsecs); 510 ndelay(nsecs);
612 } 511 }
613 } 512 }
@@ -617,35 +516,16 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m)
617 516
618 if (status || !cs_change) { 517 if (status || !cs_change) {
619 ndelay(nsecs); 518 ndelay(nsecs);
620 mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE); 519 fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
621 } 520 }
622 521
623 mpc8xxx_spi_setup_transfer(spi, NULL); 522 fsl_spi_setup_transfer(spi, NULL);
624}
625
626static void mpc8xxx_spi_work(struct work_struct *work)
627{
628 struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi,
629 work);
630
631 spin_lock_irq(&mpc8xxx_spi->lock);
632 while (!list_empty(&mpc8xxx_spi->queue)) {
633 struct spi_message *m = container_of(mpc8xxx_spi->queue.next,
634 struct spi_message, queue);
635
636 list_del_init(&m->queue);
637 spin_unlock_irq(&mpc8xxx_spi->lock);
638
639 mpc8xxx_spi_do_one_msg(m);
640
641 spin_lock_irq(&mpc8xxx_spi->lock);
642 }
643 spin_unlock_irq(&mpc8xxx_spi->lock);
644} 523}
645 524
646static int mpc8xxx_spi_setup(struct spi_device *spi) 525static int fsl_spi_setup(struct spi_device *spi)
647{ 526{
648 struct mpc8xxx_spi *mpc8xxx_spi; 527 struct mpc8xxx_spi *mpc8xxx_spi;
528 struct fsl_spi_reg *reg_base;
649 int retval; 529 int retval;
650 u32 hw_mode; 530 u32 hw_mode;
651 struct spi_mpc8xxx_cs *cs = spi->controller_state; 531 struct spi_mpc8xxx_cs *cs = spi->controller_state;
@@ -661,8 +541,10 @@ static int mpc8xxx_spi_setup(struct spi_device *spi)
661 } 541 }
662 mpc8xxx_spi = spi_master_get_devdata(spi->master); 542 mpc8xxx_spi = spi_master_get_devdata(spi->master);
663 543
544 reg_base = mpc8xxx_spi->reg_base;
545
664 hw_mode = cs->hw_mode; /* Save original settings */ 546 hw_mode = cs->hw_mode; /* Save original settings */
665 cs->hw_mode = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode); 547 cs->hw_mode = mpc8xxx_spi_read_reg(&reg_base->mode);
666 /* mask out bits we are going to set */ 548 /* mask out bits we are going to set */
667 cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH 549 cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH
668 | SPMODE_REV | SPMODE_LOOP); 550 | SPMODE_REV | SPMODE_LOOP);
@@ -676,7 +558,7 @@ static int mpc8xxx_spi_setup(struct spi_device *spi)
676 if (spi->mode & SPI_LOOP) 558 if (spi->mode & SPI_LOOP)
677 cs->hw_mode |= SPMODE_LOOP; 559 cs->hw_mode |= SPMODE_LOOP;
678 560
679 retval = mpc8xxx_spi_setup_transfer(spi, NULL); 561 retval = fsl_spi_setup_transfer(spi, NULL);
680 if (retval < 0) { 562 if (retval < 0) {
681 cs->hw_mode = hw_mode; /* Restore settings */ 563 cs->hw_mode = hw_mode; /* Restore settings */
682 return retval; 564 return retval;
@@ -684,9 +566,10 @@ static int mpc8xxx_spi_setup(struct spi_device *spi)
684 return 0; 566 return 0;
685} 567}
686 568
687static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) 569static void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
688{ 570{
689 u16 len; 571 u16 len;
572 struct fsl_spi_reg *reg_base = mspi->reg_base;
690 573
691 dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__, 574 dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
692 in_be16(&mspi->rx_bd->cbd_datlen), mspi->count); 575 in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
@@ -698,20 +581,22 @@ static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
698 } 581 }
699 582
700 /* Clear the events */ 583 /* Clear the events */
701 mpc8xxx_spi_write_reg(&mspi->base->event, events); 584 mpc8xxx_spi_write_reg(&reg_base->event, events);
702 585
703 mspi->count -= len; 586 mspi->count -= len;
704 if (mspi->count) 587 if (mspi->count)
705 mpc8xxx_spi_cpm_bufs_start(mspi); 588 fsl_spi_cpm_bufs_start(mspi);
706 else 589 else
707 complete(&mspi->done); 590 complete(&mspi->done);
708} 591}
709 592
710static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) 593static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
711{ 594{
595 struct fsl_spi_reg *reg_base = mspi->reg_base;
596
712 /* We need handle RX first */ 597 /* We need handle RX first */
713 if (events & SPIE_NE) { 598 if (events & SPIE_NE) {
714 u32 rx_data = mpc8xxx_spi_read_reg(&mspi->base->receive); 599 u32 rx_data = mpc8xxx_spi_read_reg(&reg_base->receive);
715 600
716 if (mspi->rx) 601 if (mspi->rx)
717 mspi->get_rx(rx_data, mspi); 602 mspi->get_rx(rx_data, mspi);
@@ -720,102 +605,80 @@ static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
720 if ((events & SPIE_NF) == 0) 605 if ((events & SPIE_NF) == 0)
721 /* spin until TX is done */ 606 /* spin until TX is done */
722 while (((events = 607 while (((events =
723 mpc8xxx_spi_read_reg(&mspi->base->event)) & 608 mpc8xxx_spi_read_reg(&reg_base->event)) &
724 SPIE_NF) == 0) 609 SPIE_NF) == 0)
725 cpu_relax(); 610 cpu_relax();
726 611
727 /* Clear the events */ 612 /* Clear the events */
728 mpc8xxx_spi_write_reg(&mspi->base->event, events); 613 mpc8xxx_spi_write_reg(&reg_base->event, events);
729 614
730 mspi->count -= 1; 615 mspi->count -= 1;
731 if (mspi->count) { 616 if (mspi->count) {
732 u32 word = mspi->get_tx(mspi); 617 u32 word = mspi->get_tx(mspi);
733 618
734 mpc8xxx_spi_write_reg(&mspi->base->transmit, word); 619 mpc8xxx_spi_write_reg(&reg_base->transmit, word);
735 } else { 620 } else {
736 complete(&mspi->done); 621 complete(&mspi->done);
737 } 622 }
738} 623}
739 624
740static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data) 625static irqreturn_t fsl_spi_irq(s32 irq, void *context_data)
741{ 626{
742 struct mpc8xxx_spi *mspi = context_data; 627 struct mpc8xxx_spi *mspi = context_data;
743 irqreturn_t ret = IRQ_NONE; 628 irqreturn_t ret = IRQ_NONE;
744 u32 events; 629 u32 events;
630 struct fsl_spi_reg *reg_base = mspi->reg_base;
745 631
746 /* Get interrupt events(tx/rx) */ 632 /* Get interrupt events(tx/rx) */
747 events = mpc8xxx_spi_read_reg(&mspi->base->event); 633 events = mpc8xxx_spi_read_reg(&reg_base->event);
748 if (events) 634 if (events)
749 ret = IRQ_HANDLED; 635 ret = IRQ_HANDLED;
750 636
751 dev_dbg(mspi->dev, "%s: events %x\n", __func__, events); 637 dev_dbg(mspi->dev, "%s: events %x\n", __func__, events);
752 638
753 if (mspi->flags & SPI_CPM_MODE) 639 if (mspi->flags & SPI_CPM_MODE)
754 mpc8xxx_spi_cpm_irq(mspi, events); 640 fsl_spi_cpm_irq(mspi, events);
755 else 641 else
756 mpc8xxx_spi_cpu_irq(mspi, events); 642 fsl_spi_cpu_irq(mspi, events);
757 643
758 return ret; 644 return ret;
759} 645}
760 646
761static int mpc8xxx_spi_transfer(struct spi_device *spi, 647static void *fsl_spi_alloc_dummy_rx(void)
762 struct spi_message *m)
763{ 648{
764 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); 649 mutex_lock(&fsl_dummy_rx_lock);
765 unsigned long flags;
766 650
767 m->actual_length = 0; 651 if (!fsl_dummy_rx)
768 m->status = -EINPROGRESS; 652 fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
653 if (fsl_dummy_rx)
654 fsl_dummy_rx_refcnt++;
769 655
770 spin_lock_irqsave(&mpc8xxx_spi->lock, flags); 656 mutex_unlock(&fsl_dummy_rx_lock);
771 list_add_tail(&m->queue, &mpc8xxx_spi->queue);
772 queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work);
773 spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags);
774 657
775 return 0; 658 return fsl_dummy_rx;
776} 659}
777 660
778 661static void fsl_spi_free_dummy_rx(void)
779static void mpc8xxx_spi_cleanup(struct spi_device *spi)
780{ 662{
781 kfree(spi->controller_state); 663 mutex_lock(&fsl_dummy_rx_lock);
782}
783 664
784static void *mpc8xxx_spi_alloc_dummy_rx(void) 665 switch (fsl_dummy_rx_refcnt) {
785{
786 mutex_lock(&mpc8xxx_dummy_rx_lock);
787
788 if (!mpc8xxx_dummy_rx)
789 mpc8xxx_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
790 if (mpc8xxx_dummy_rx)
791 mpc8xxx_dummy_rx_refcnt++;
792
793 mutex_unlock(&mpc8xxx_dummy_rx_lock);
794
795 return mpc8xxx_dummy_rx;
796}
797
798static void mpc8xxx_spi_free_dummy_rx(void)
799{
800 mutex_lock(&mpc8xxx_dummy_rx_lock);
801
802 switch (mpc8xxx_dummy_rx_refcnt) {
803 case 0: 666 case 0:
804 WARN_ON(1); 667 WARN_ON(1);
805 break; 668 break;
806 case 1: 669 case 1:
807 kfree(mpc8xxx_dummy_rx); 670 kfree(fsl_dummy_rx);
808 mpc8xxx_dummy_rx = NULL; 671 fsl_dummy_rx = NULL;
809 /* fall through */ 672 /* fall through */
810 default: 673 default:
811 mpc8xxx_dummy_rx_refcnt--; 674 fsl_dummy_rx_refcnt--;
812 break; 675 break;
813 } 676 }
814 677
815 mutex_unlock(&mpc8xxx_dummy_rx_lock); 678 mutex_unlock(&fsl_dummy_rx_lock);
816} 679}
817 680
818static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) 681static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
819{ 682{
820 struct device *dev = mspi->dev; 683 struct device *dev = mspi->dev;
821 struct device_node *np = dev->of_node; 684 struct device_node *np = dev->of_node;
@@ -869,7 +732,7 @@ static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
869 return pram_ofs; 732 return pram_ofs;
870} 733}
871 734
872static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) 735static int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
873{ 736{
874 struct device *dev = mspi->dev; 737 struct device *dev = mspi->dev;
875 struct device_node *np = dev->of_node; 738 struct device_node *np = dev->of_node;
@@ -881,7 +744,7 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
881 if (!(mspi->flags & SPI_CPM_MODE)) 744 if (!(mspi->flags & SPI_CPM_MODE))
882 return 0; 745 return 0;
883 746
884 if (!mpc8xxx_spi_alloc_dummy_rx()) 747 if (!fsl_spi_alloc_dummy_rx())
885 return -ENOMEM; 748 return -ENOMEM;
886 749
887 if (mspi->flags & SPI_QE) { 750 if (mspi->flags & SPI_QE) {
@@ -902,7 +765,7 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
902 } 765 }
903 } 766 }
904 767
905 pram_ofs = mpc8xxx_spi_cpm_get_pram(mspi); 768 pram_ofs = fsl_spi_cpm_get_pram(mspi);
906 if (IS_ERR_VALUE(pram_ofs)) { 769 if (IS_ERR_VALUE(pram_ofs)) {
907 dev_err(dev, "can't allocate spi parameter ram\n"); 770 dev_err(dev, "can't allocate spi parameter ram\n");
908 goto err_pram; 771 goto err_pram;
@@ -922,7 +785,7 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
922 goto err_dummy_tx; 785 goto err_dummy_tx;
923 } 786 }
924 787
925 mspi->dma_dummy_rx = dma_map_single(dev, mpc8xxx_dummy_rx, SPI_MRBLR, 788 mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR,
926 DMA_FROM_DEVICE); 789 DMA_FROM_DEVICE);
927 if (dma_mapping_error(dev, mspi->dma_dummy_rx)) { 790 if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
928 dev_err(dev, "unable to map dummy rx buffer\n"); 791 dev_err(dev, "unable to map dummy rx buffer\n");
@@ -960,11 +823,11 @@ err_dummy_tx:
960err_bds: 823err_bds:
961 cpm_muram_free(pram_ofs); 824 cpm_muram_free(pram_ofs);
962err_pram: 825err_pram:
963 mpc8xxx_spi_free_dummy_rx(); 826 fsl_spi_free_dummy_rx();
964 return -ENOMEM; 827 return -ENOMEM;
965} 828}
966 829
967static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi) 830static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
968{ 831{
969 struct device *dev = mspi->dev; 832 struct device *dev = mspi->dev;
970 833
@@ -972,30 +835,22 @@ static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi)
972 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); 835 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
973 cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); 836 cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
974 cpm_muram_free(cpm_muram_offset(mspi->pram)); 837 cpm_muram_free(cpm_muram_offset(mspi->pram));
975 mpc8xxx_spi_free_dummy_rx(); 838 fsl_spi_free_dummy_rx();
976} 839}
977 840
978static const char *mpc8xxx_spi_strmode(unsigned int flags) 841static void fsl_spi_remove(struct mpc8xxx_spi *mspi)
979{ 842{
980 if (flags & SPI_QE_CPU_MODE) { 843 iounmap(mspi->reg_base);
981 return "QE CPU"; 844 fsl_spi_cpm_free(mspi);
982 } else if (flags & SPI_CPM_MODE) {
983 if (flags & SPI_QE)
984 return "QE";
985 else if (flags & SPI_CPM2)
986 return "CPM2";
987 else
988 return "CPM1";
989 }
990 return "CPU";
991} 845}
992 846
993static struct spi_master * __devinit 847static struct spi_master * __devinit fsl_spi_probe(struct device *dev,
994mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) 848 struct resource *mem, unsigned int irq)
995{ 849{
996 struct fsl_spi_platform_data *pdata = dev->platform_data; 850 struct fsl_spi_platform_data *pdata = dev->platform_data;
997 struct spi_master *master; 851 struct spi_master *master;
998 struct mpc8xxx_spi *mpc8xxx_spi; 852 struct mpc8xxx_spi *mpc8xxx_spi;
853 struct fsl_spi_reg *reg_base;
999 u32 regval; 854 u32 regval;
1000 int ret = 0; 855 int ret = 0;
1001 856
@@ -1007,132 +862,77 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
1007 862
1008 dev_set_drvdata(dev, master); 863 dev_set_drvdata(dev, master);
1009 864
1010 /* the spi->mode bits understood by this driver: */ 865 ret = mpc8xxx_spi_probe(dev, mem, irq);
1011 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH 866 if (ret)
1012 | SPI_LSB_FIRST | SPI_LOOP; 867 goto err_probe;
1013 868
1014 master->setup = mpc8xxx_spi_setup; 869 master->setup = fsl_spi_setup;
1015 master->transfer = mpc8xxx_spi_transfer;
1016 master->cleanup = mpc8xxx_spi_cleanup;
1017 master->dev.of_node = dev->of_node;
1018 870
1019 mpc8xxx_spi = spi_master_get_devdata(master); 871 mpc8xxx_spi = spi_master_get_devdata(master);
1020 mpc8xxx_spi->dev = dev; 872 mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg;
1021 mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; 873 mpc8xxx_spi->spi_remove = fsl_spi_remove;
1022 mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; 874
1023 mpc8xxx_spi->flags = pdata->flags;
1024 mpc8xxx_spi->spibrg = pdata->sysclk;
1025 875
1026 ret = mpc8xxx_spi_cpm_init(mpc8xxx_spi); 876 ret = fsl_spi_cpm_init(mpc8xxx_spi);
1027 if (ret) 877 if (ret)
1028 goto err_cpm_init; 878 goto err_cpm_init;
1029 879
1030 mpc8xxx_spi->rx_shift = 0;
1031 mpc8xxx_spi->tx_shift = 0;
1032 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { 880 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
1033 mpc8xxx_spi->rx_shift = 16; 881 mpc8xxx_spi->rx_shift = 16;
1034 mpc8xxx_spi->tx_shift = 24; 882 mpc8xxx_spi->tx_shift = 24;
1035 } 883 }
1036 884
1037 init_completion(&mpc8xxx_spi->done); 885 mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
1038 886 if (mpc8xxx_spi->reg_base == NULL) {
1039 mpc8xxx_spi->base = ioremap(mem->start, resource_size(mem));
1040 if (mpc8xxx_spi->base == NULL) {
1041 ret = -ENOMEM; 887 ret = -ENOMEM;
1042 goto err_ioremap; 888 goto err_ioremap;
1043 } 889 }
1044 890
1045 mpc8xxx_spi->irq = irq;
1046
1047 /* Register for SPI Interrupt */ 891 /* Register for SPI Interrupt */
1048 ret = request_irq(mpc8xxx_spi->irq, mpc8xxx_spi_irq, 892 ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq,
1049 0, "mpc8xxx_spi", mpc8xxx_spi); 893 0, "fsl_spi", mpc8xxx_spi);
1050 894
1051 if (ret != 0) 895 if (ret != 0)
1052 goto unmap_io; 896 goto free_irq;
1053 897
1054 master->bus_num = pdata->bus_num; 898 reg_base = mpc8xxx_spi->reg_base;
1055 master->num_chipselect = pdata->max_chipselect;
1056 899
1057 /* SPI controller initializations */ 900 /* SPI controller initializations */
1058 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, 0); 901 mpc8xxx_spi_write_reg(&reg_base->mode, 0);
1059 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0); 902 mpc8xxx_spi_write_reg(&reg_base->mask, 0);
1060 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->command, 0); 903 mpc8xxx_spi_write_reg(&reg_base->command, 0);
1061 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, 0xffffffff); 904 mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
1062 905
1063 /* Enable SPI interface */ 906 /* Enable SPI interface */
1064 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; 907 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
1065 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) 908 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
1066 regval |= SPMODE_OP; 909 regval |= SPMODE_OP;
1067 910
1068 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval); 911 mpc8xxx_spi_write_reg(&reg_base->mode, regval);
1069 spin_lock_init(&mpc8xxx_spi->lock);
1070 init_completion(&mpc8xxx_spi->done);
1071 INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work);
1072 INIT_LIST_HEAD(&mpc8xxx_spi->queue);
1073
1074 mpc8xxx_spi->workqueue = create_singlethread_workqueue(
1075 dev_name(master->dev.parent));
1076 if (mpc8xxx_spi->workqueue == NULL) {
1077 ret = -EBUSY;
1078 goto free_irq;
1079 }
1080 912
1081 ret = spi_register_master(master); 913 ret = spi_register_master(master);
1082 if (ret < 0) 914 if (ret < 0)
1083 goto unreg_master; 915 goto unreg_master;
1084 916
1085 dev_info(dev, "at 0x%p (irq = %d), %s mode\n", mpc8xxx_spi->base, 917 dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base,
1086 mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags)); 918 mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
1087 919
1088 return master; 920 return master;
1089 921
1090unreg_master: 922unreg_master:
1091 destroy_workqueue(mpc8xxx_spi->workqueue);
1092free_irq:
1093 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); 923 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
1094unmap_io: 924free_irq:
1095 iounmap(mpc8xxx_spi->base); 925 iounmap(mpc8xxx_spi->reg_base);
1096err_ioremap: 926err_ioremap:
1097 mpc8xxx_spi_cpm_free(mpc8xxx_spi); 927 fsl_spi_cpm_free(mpc8xxx_spi);
1098err_cpm_init: 928err_cpm_init:
929err_probe:
1099 spi_master_put(master); 930 spi_master_put(master);
1100err: 931err:
1101 return ERR_PTR(ret); 932 return ERR_PTR(ret);
1102} 933}
1103 934
1104static int __devexit mpc8xxx_spi_remove(struct device *dev) 935static void fsl_spi_cs_control(struct spi_device *spi, bool on)
1105{
1106 struct mpc8xxx_spi *mpc8xxx_spi;
1107 struct spi_master *master;
1108
1109 master = dev_get_drvdata(dev);
1110 mpc8xxx_spi = spi_master_get_devdata(master);
1111
1112 flush_workqueue(mpc8xxx_spi->workqueue);
1113 destroy_workqueue(mpc8xxx_spi->workqueue);
1114 spi_unregister_master(master);
1115
1116 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
1117 iounmap(mpc8xxx_spi->base);
1118 mpc8xxx_spi_cpm_free(mpc8xxx_spi);
1119
1120 return 0;
1121}
1122
1123struct mpc8xxx_spi_probe_info {
1124 struct fsl_spi_platform_data pdata;
1125 int *gpios;
1126 bool *alow_flags;
1127};
1128
1129static struct mpc8xxx_spi_probe_info *
1130to_of_pinfo(struct fsl_spi_platform_data *pdata)
1131{
1132 return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
1133}
1134
1135static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on)
1136{ 936{
1137 struct device *dev = spi->dev.parent; 937 struct device *dev = spi->dev.parent;
1138 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data); 938 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data);
@@ -1143,7 +943,7 @@ static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on)
1143 gpio_set_value(gpio, on ^ alow); 943 gpio_set_value(gpio, on ^ alow);
1144} 944}
1145 945
1146static int of_mpc8xxx_spi_get_chipselects(struct device *dev) 946static int of_fsl_spi_get_chipselects(struct device *dev)
1147{ 947{
1148 struct device_node *np = dev->of_node; 948 struct device_node *np = dev->of_node;
1149 struct fsl_spi_platform_data *pdata = dev->platform_data; 949 struct fsl_spi_platform_data *pdata = dev->platform_data;
@@ -1204,7 +1004,7 @@ static int of_mpc8xxx_spi_get_chipselects(struct device *dev)
1204 } 1004 }
1205 1005
1206 pdata->max_chipselect = ngpios; 1006 pdata->max_chipselect = ngpios;
1207 pdata->cs_control = mpc8xxx_spi_cs_control; 1007 pdata->cs_control = fsl_spi_cs_control;
1208 1008
1209 return 0; 1009 return 0;
1210 1010
@@ -1223,7 +1023,7 @@ err_alloc_flags:
1223 return ret; 1023 return ret;
1224} 1024}
1225 1025
1226static int of_mpc8xxx_spi_free_chipselects(struct device *dev) 1026static int of_fsl_spi_free_chipselects(struct device *dev)
1227{ 1027{
1228 struct fsl_spi_platform_data *pdata = dev->platform_data; 1028 struct fsl_spi_platform_data *pdata = dev->platform_data;
1229 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); 1029 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
@@ -1242,50 +1042,21 @@ static int of_mpc8xxx_spi_free_chipselects(struct device *dev)
1242 return 0; 1042 return 0;
1243} 1043}
1244 1044
1245static int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev, 1045static int __devinit of_fsl_spi_probe(struct platform_device *ofdev,
1246 const struct of_device_id *ofid) 1046 const struct of_device_id *ofid)
1247{ 1047{
1248 struct device *dev = &ofdev->dev; 1048 struct device *dev = &ofdev->dev;
1249 struct device_node *np = ofdev->dev.of_node; 1049 struct device_node *np = ofdev->dev.of_node;
1250 struct mpc8xxx_spi_probe_info *pinfo;
1251 struct fsl_spi_platform_data *pdata;
1252 struct spi_master *master; 1050 struct spi_master *master;
1253 struct resource mem; 1051 struct resource mem;
1254 struct resource irq; 1052 struct resource irq;
1255 const void *prop;
1256 int ret = -ENOMEM; 1053 int ret = -ENOMEM;
1257 1054
1258 pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); 1055 ret = of_mpc8xxx_spi_probe(ofdev, ofid);
1259 if (!pinfo) 1056 if (ret)
1260 return -ENOMEM; 1057 return ret;
1261
1262 pdata = &pinfo->pdata;
1263 dev->platform_data = pdata;
1264
1265 /* Allocate bus num dynamically. */
1266 pdata->bus_num = -1;
1267
1268 /* SPI controller is either clocked from QE or SoC clock. */
1269 pdata->sysclk = get_brgfreq();
1270 if (pdata->sysclk == -1) {
1271 pdata->sysclk = fsl_get_sys_freq();
1272 if (pdata->sysclk == -1) {
1273 ret = -ENODEV;
1274 goto err_clk;
1275 }
1276 }
1277 1058
1278 prop = of_get_property(np, "mode", NULL); 1059 ret = of_fsl_spi_get_chipselects(dev);
1279 if (prop && !strcmp(prop, "cpu-qe"))
1280 pdata->flags = SPI_QE_CPU_MODE;
1281 else if (prop && !strcmp(prop, "qe"))
1282 pdata->flags = SPI_CPM_MODE | SPI_QE;
1283 else if (of_device_is_compatible(np, "fsl,cpm2-spi"))
1284 pdata->flags = SPI_CPM_MODE | SPI_CPM2;
1285 else if (of_device_is_compatible(np, "fsl,cpm1-spi"))
1286 pdata->flags = SPI_CPM_MODE | SPI_CPM1;
1287
1288 ret = of_mpc8xxx_spi_get_chipselects(dev);
1289 if (ret) 1060 if (ret)
1290 goto err; 1061 goto err;
1291 1062
@@ -1299,7 +1070,7 @@ static int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev,
1299 goto err; 1070 goto err;
1300 } 1071 }
1301 1072
1302 master = mpc8xxx_spi_probe(dev, &mem, irq.start); 1073 master = fsl_spi_probe(dev, &mem, irq.start);
1303 if (IS_ERR(master)) { 1074 if (IS_ERR(master)) {
1304 ret = PTR_ERR(master); 1075 ret = PTR_ERR(master);
1305 goto err; 1076 goto err;
@@ -1308,42 +1079,40 @@ static int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev,
1308 return 0; 1079 return 0;
1309 1080
1310err: 1081err:
1311 of_mpc8xxx_spi_free_chipselects(dev); 1082 of_fsl_spi_free_chipselects(dev);
1312err_clk:
1313 kfree(pinfo);
1314 return ret; 1083 return ret;
1315} 1084}
1316 1085
1317static int __devexit of_mpc8xxx_spi_remove(struct platform_device *ofdev) 1086static int __devexit of_fsl_spi_remove(struct platform_device *ofdev)
1318{ 1087{
1319 int ret; 1088 int ret;
1320 1089
1321 ret = mpc8xxx_spi_remove(&ofdev->dev); 1090 ret = mpc8xxx_spi_remove(&ofdev->dev);
1322 if (ret) 1091 if (ret)
1323 return ret; 1092 return ret;
1324 of_mpc8xxx_spi_free_chipselects(&ofdev->dev); 1093 of_fsl_spi_free_chipselects(&ofdev->dev);
1325 return 0; 1094 return 0;
1326} 1095}
1327 1096
1328static const struct of_device_id of_mpc8xxx_spi_match[] = { 1097static const struct of_device_id of_fsl_spi_match[] = {
1329 { .compatible = "fsl,spi" }, 1098 { .compatible = "fsl,spi" },
1330 {}, 1099 {}
1331}; 1100};
1332MODULE_DEVICE_TABLE(of, of_mpc8xxx_spi_match); 1101MODULE_DEVICE_TABLE(of, of_fsl_spi_match);
1333 1102
1334static struct of_platform_driver of_mpc8xxx_spi_driver = { 1103static struct of_platform_driver of_fsl_spi_driver = {
1335 .driver = { 1104 .driver = {
1336 .name = "mpc8xxx_spi", 1105 .name = "fsl_spi",
1337 .owner = THIS_MODULE, 1106 .owner = THIS_MODULE,
1338 .of_match_table = of_mpc8xxx_spi_match, 1107 .of_match_table = of_fsl_spi_match,
1339 }, 1108 },
1340 .probe = of_mpc8xxx_spi_probe, 1109 .probe = of_fsl_spi_probe,
1341 .remove = __devexit_p(of_mpc8xxx_spi_remove), 1110 .remove = __devexit_p(of_fsl_spi_remove),
1342}; 1111};
1343 1112
1344#ifdef CONFIG_MPC832x_RDB 1113#ifdef CONFIG_MPC832x_RDB
1345/* 1114/*
1346 * XXX XXX XXX 1115 * XXX XXX XXX
1347 * This is "legacy" platform driver, was used by the MPC8323E-RDB boards 1116 * This is "legacy" platform driver, was used by the MPC8323E-RDB boards
1348 * only. The driver should go away soon, since newer MPC8323E-RDB's device 1117 * only. The driver should go away soon, since newer MPC8323E-RDB's device
1349 * tree can work with OpenFirmware driver. But for now we support old trees 1118 * tree can work with OpenFirmware driver. But for now we support old trees
@@ -1366,7 +1135,7 @@ static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
1366 if (irq <= 0) 1135 if (irq <= 0)
1367 return -EINVAL; 1136 return -EINVAL;
1368 1137
1369 master = mpc8xxx_spi_probe(&pdev->dev, mem, irq); 1138 master = fsl_spi_probe(&pdev->dev, mem, irq);
1370 if (IS_ERR(master)) 1139 if (IS_ERR(master))
1371 return PTR_ERR(master); 1140 return PTR_ERR(master);
1372 return 0; 1141 return 0;
@@ -1405,21 +1174,20 @@ static void __init legacy_driver_register(void) {}
1405static void __exit legacy_driver_unregister(void) {} 1174static void __exit legacy_driver_unregister(void) {}
1406#endif /* CONFIG_MPC832x_RDB */ 1175#endif /* CONFIG_MPC832x_RDB */
1407 1176
1408static int __init mpc8xxx_spi_init(void) 1177static int __init fsl_spi_init(void)
1409{ 1178{
1410 legacy_driver_register(); 1179 legacy_driver_register();
1411 return of_register_platform_driver(&of_mpc8xxx_spi_driver); 1180 return of_register_platform_driver(&of_fsl_spi_driver);
1412} 1181}
1182module_init(fsl_spi_init);
1413 1183
1414static void __exit mpc8xxx_spi_exit(void) 1184static void __exit fsl_spi_exit(void)
1415{ 1185{
1416 of_unregister_platform_driver(&of_mpc8xxx_spi_driver); 1186 of_unregister_platform_driver(&of_fsl_spi_driver);
1417 legacy_driver_unregister(); 1187 legacy_driver_unregister();
1418} 1188}
1419 1189module_exit(fsl_spi_exit);
1420module_init(mpc8xxx_spi_init);
1421module_exit(mpc8xxx_spi_exit);
1422 1190
1423MODULE_AUTHOR("Kumar Gala"); 1191MODULE_AUTHOR("Kumar Gala");
1424MODULE_DESCRIPTION("Simple MPC8xxx SPI Driver"); 1192MODULE_DESCRIPTION("Simple Freescale SPI Driver");
1425MODULE_LICENSE("GPL"); 1193MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
index c3038da2648a..795828b90f45 100644
--- a/drivers/spi/spi_s3c64xx.c
+++ b/drivers/spi/spi_s3c64xx.c
@@ -261,15 +261,25 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
261 chcfg |= S3C64XX_SPI_CH_TXCH_ON; 261 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
262 if (dma_mode) { 262 if (dma_mode) {
263 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; 263 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
264 s3c2410_dma_config(sdd->tx_dmach, 1); 264 s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8);
265 s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd, 265 s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
266 xfer->tx_dma, xfer->len); 266 xfer->tx_dma, xfer->len);
267 s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START); 267 s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
268 } else { 268 } else {
269 unsigned char *buf = (unsigned char *) xfer->tx_buf; 269 switch (sdd->cur_bpw) {
270 int i = 0; 270 case 32:
271 while (i < xfer->len) 271 iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
272 writeb(buf[i++], regs + S3C64XX_SPI_TX_DATA); 272 xfer->tx_buf, xfer->len / 4);
273 break;
274 case 16:
275 iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
276 xfer->tx_buf, xfer->len / 2);
277 break;
278 default:
279 iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
280 xfer->tx_buf, xfer->len);
281 break;
282 }
273 } 283 }
274 } 284 }
275 285
@@ -286,7 +296,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
286 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) 296 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
287 | S3C64XX_SPI_PACKET_CNT_EN, 297 | S3C64XX_SPI_PACKET_CNT_EN,
288 regs + S3C64XX_SPI_PACKET_CNT); 298 regs + S3C64XX_SPI_PACKET_CNT);
289 s3c2410_dma_config(sdd->rx_dmach, 1); 299 s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8);
290 s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd, 300 s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
291 xfer->rx_dma, xfer->len); 301 xfer->rx_dma, xfer->len);
292 s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START); 302 s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
@@ -366,20 +376,26 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
366 return -EIO; 376 return -EIO;
367 } 377 }
368 } else { 378 } else {
369 unsigned char *buf;
370 int i;
371
372 /* If it was only Tx */ 379 /* If it was only Tx */
373 if (xfer->rx_buf == NULL) { 380 if (xfer->rx_buf == NULL) {
374 sdd->state &= ~TXBUSY; 381 sdd->state &= ~TXBUSY;
375 return 0; 382 return 0;
376 } 383 }
377 384
378 i = 0; 385 switch (sdd->cur_bpw) {
379 buf = xfer->rx_buf; 386 case 32:
380 while (i < xfer->len) 387 ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
381 buf[i++] = readb(regs + S3C64XX_SPI_RX_DATA); 388 xfer->rx_buf, xfer->len / 4);
382 389 break;
390 case 16:
391 ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
392 xfer->rx_buf, xfer->len / 2);
393 break;
394 default:
395 ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
396 xfer->rx_buf, xfer->len);
397 break;
398 }
383 sdd->state &= ~RXBUSY; 399 sdd->state &= ~RXBUSY;
384 } 400 }
385 401
@@ -399,13 +415,18 @@ static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
399 415
400static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) 416static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
401{ 417{
418 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
402 void __iomem *regs = sdd->regs; 419 void __iomem *regs = sdd->regs;
403 u32 val; 420 u32 val;
404 421
405 /* Disable Clock */ 422 /* Disable Clock */
406 val = readl(regs + S3C64XX_SPI_CLK_CFG); 423 if (sci->clk_from_cmu) {
407 val &= ~S3C64XX_SPI_ENCLK_ENABLE; 424 clk_disable(sdd->src_clk);
408 writel(val, regs + S3C64XX_SPI_CLK_CFG); 425 } else {
426 val = readl(regs + S3C64XX_SPI_CLK_CFG);
427 val &= ~S3C64XX_SPI_ENCLK_ENABLE;
428 writel(val, regs + S3C64XX_SPI_CLK_CFG);
429 }
409 430
410 /* Set Polarity and Phase */ 431 /* Set Polarity and Phase */
411 val = readl(regs + S3C64XX_SPI_CH_CFG); 432 val = readl(regs + S3C64XX_SPI_CH_CFG);
@@ -429,29 +450,39 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
429 switch (sdd->cur_bpw) { 450 switch (sdd->cur_bpw) {
430 case 32: 451 case 32:
431 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD; 452 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
453 val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
432 break; 454 break;
433 case 16: 455 case 16:
434 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD; 456 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
457 val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
435 break; 458 break;
436 default: 459 default:
437 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE; 460 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
461 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
438 break; 462 break;
439 } 463 }
440 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; /* Always 8bits wide */
441 464
442 writel(val, regs + S3C64XX_SPI_MODE_CFG); 465 writel(val, regs + S3C64XX_SPI_MODE_CFG);
443 466
444 /* Configure Clock */ 467 if (sci->clk_from_cmu) {
445 val = readl(regs + S3C64XX_SPI_CLK_CFG); 468 /* Configure Clock */
446 val &= ~S3C64XX_SPI_PSR_MASK; 469 /* There is half-multiplier before the SPI */
447 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) 470 clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
448 & S3C64XX_SPI_PSR_MASK); 471 /* Enable Clock */
449 writel(val, regs + S3C64XX_SPI_CLK_CFG); 472 clk_enable(sdd->src_clk);
450 473 } else {
451 /* Enable Clock */ 474 /* Configure Clock */
452 val = readl(regs + S3C64XX_SPI_CLK_CFG); 475 val = readl(regs + S3C64XX_SPI_CLK_CFG);
453 val |= S3C64XX_SPI_ENCLK_ENABLE; 476 val &= ~S3C64XX_SPI_PSR_MASK;
454 writel(val, regs + S3C64XX_SPI_CLK_CFG); 477 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
478 & S3C64XX_SPI_PSR_MASK);
479 writel(val, regs + S3C64XX_SPI_CLK_CFG);
480
481 /* Enable Clock */
482 val = readl(regs + S3C64XX_SPI_CLK_CFG);
483 val |= S3C64XX_SPI_ENCLK_ENABLE;
484 writel(val, regs + S3C64XX_SPI_CLK_CFG);
485 }
455} 486}
456 487
457static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, 488static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
@@ -499,6 +530,7 @@ static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
499static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, 530static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
500 struct spi_message *msg) 531 struct spi_message *msg)
501{ 532{
533 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
502 struct device *dev = &sdd->pdev->dev; 534 struct device *dev = &sdd->pdev->dev;
503 struct spi_transfer *xfer; 535 struct spi_transfer *xfer;
504 536
@@ -514,6 +546,9 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
514 /* Map until end or first fail */ 546 /* Map until end or first fail */
515 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 547 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
516 548
549 if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
550 continue;
551
517 if (xfer->tx_buf != NULL) { 552 if (xfer->tx_buf != NULL) {
518 xfer->tx_dma = dma_map_single(dev, 553 xfer->tx_dma = dma_map_single(dev,
519 (void *)xfer->tx_buf, xfer->len, 554 (void *)xfer->tx_buf, xfer->len,
@@ -545,6 +580,7 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
545static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, 580static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
546 struct spi_message *msg) 581 struct spi_message *msg)
547{ 582{
583 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
548 struct device *dev = &sdd->pdev->dev; 584 struct device *dev = &sdd->pdev->dev;
549 struct spi_transfer *xfer; 585 struct spi_transfer *xfer;
550 586
@@ -553,6 +589,9 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
553 589
554 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 590 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
555 591
592 if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
593 continue;
594
556 if (xfer->rx_buf != NULL 595 if (xfer->rx_buf != NULL
557 && xfer->rx_dma != XFER_DMAADDR_INVALID) 596 && xfer->rx_dma != XFER_DMAADDR_INVALID)
558 dma_unmap_single(dev, xfer->rx_dma, 597 dma_unmap_single(dev, xfer->rx_dma,
@@ -608,6 +647,14 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
608 bpw = xfer->bits_per_word ? : spi->bits_per_word; 647 bpw = xfer->bits_per_word ? : spi->bits_per_word;
609 speed = xfer->speed_hz ? : spi->max_speed_hz; 648 speed = xfer->speed_hz ? : spi->max_speed_hz;
610 649
650 if (xfer->len % (bpw / 8)) {
651 dev_err(&spi->dev,
652 "Xfer length(%u) not a multiple of word size(%u)\n",
653 xfer->len, bpw / 8);
654 status = -EIO;
655 goto out;
656 }
657
611 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { 658 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
612 sdd->cur_bpw = bpw; 659 sdd->cur_bpw = bpw;
613 sdd->cur_speed = speed; 660 sdd->cur_speed = speed;
@@ -798,7 +845,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
798 struct s3c64xx_spi_driver_data *sdd; 845 struct s3c64xx_spi_driver_data *sdd;
799 struct s3c64xx_spi_info *sci; 846 struct s3c64xx_spi_info *sci;
800 struct spi_message *msg; 847 struct spi_message *msg;
801 u32 psr, speed;
802 unsigned long flags; 848 unsigned long flags;
803 int err = 0; 849 int err = 0;
804 850
@@ -841,32 +887,37 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
841 } 887 }
842 888
843 /* Check if we can provide the requested rate */ 889 /* Check if we can provide the requested rate */
844 speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); /* Max possible */ 890 if (!sci->clk_from_cmu) {
845 891 u32 psr, speed;
846 if (spi->max_speed_hz > speed) 892
847 spi->max_speed_hz = speed; 893 /* Max possible */
848 894 speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
849 psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; 895
850 psr &= S3C64XX_SPI_PSR_MASK; 896 if (spi->max_speed_hz > speed)
851 if (psr == S3C64XX_SPI_PSR_MASK) 897 spi->max_speed_hz = speed;
852 psr--; 898
899 psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
900 psr &= S3C64XX_SPI_PSR_MASK;
901 if (psr == S3C64XX_SPI_PSR_MASK)
902 psr--;
903
904 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
905 if (spi->max_speed_hz < speed) {
906 if (psr+1 < S3C64XX_SPI_PSR_MASK) {
907 psr++;
908 } else {
909 err = -EINVAL;
910 goto setup_exit;
911 }
912 }
853 913
854 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); 914 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
855 if (spi->max_speed_hz < speed) { 915 if (spi->max_speed_hz >= speed)
856 if (psr+1 < S3C64XX_SPI_PSR_MASK) { 916 spi->max_speed_hz = speed;
857 psr++; 917 else
858 } else {
859 err = -EINVAL; 918 err = -EINVAL;
860 goto setup_exit;
861 }
862 } 919 }
863 920
864 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
865 if (spi->max_speed_hz >= speed)
866 spi->max_speed_hz = speed;
867 else
868 err = -EINVAL;
869
870setup_exit: 921setup_exit:
871 922
872 /* setup() returns with device de-selected */ 923 /* setup() returns with device de-selected */
@@ -888,7 +939,8 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
888 /* Disable Interrupts - we use Polling if not DMA mode */ 939 /* Disable Interrupts - we use Polling if not DMA mode */
889 writel(0, regs + S3C64XX_SPI_INT_EN); 940 writel(0, regs + S3C64XX_SPI_INT_EN);
890 941
891 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, 942 if (!sci->clk_from_cmu)
943 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
892 regs + S3C64XX_SPI_CLK_CFG); 944 regs + S3C64XX_SPI_CLK_CFG);
893 writel(0, regs + S3C64XX_SPI_MODE_CFG); 945 writel(0, regs + S3C64XX_SPI_MODE_CFG);
894 writel(0, regs + S3C64XX_SPI_PACKET_CNT); 946 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
diff --git a/drivers/spi/spi_topcliff_pch.c b/drivers/spi/spi_topcliff_pch.c
new file mode 100644
index 000000000000..58e187f45ec7
--- /dev/null
+++ b/drivers/spi/spi_topcliff_pch.c
@@ -0,0 +1,1303 @@
1/*
2 * SPI bus driver for the Topcliff PCH used by Intel SoCs
3 *
4 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#include <linux/delay.h>
21#include <linux/pci.h>
22#include <linux/wait.h>
23#include <linux/spi/spi.h>
24#include <linux/interrupt.h>
25#include <linux/sched.h>
26#include <linux/spi/spidev.h>
27#include <linux/module.h>
28#include <linux/device.h>
29
30/* Register offsets */
31#define PCH_SPCR 0x00 /* SPI control register */
32#define PCH_SPBRR 0x04 /* SPI baud rate register */
33#define PCH_SPSR 0x08 /* SPI status register */
34#define PCH_SPDWR 0x0C /* SPI write data register */
35#define PCH_SPDRR 0x10 /* SPI read data register */
36#define PCH_SSNXCR 0x18 /* SSN Expand Control Register */
37#define PCH_SRST 0x1C /* SPI reset register */
38
39#define PCH_SPSR_TFD 0x000007C0
40#define PCH_SPSR_RFD 0x0000F800
41
42#define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11)
43#define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6)
44
45#define PCH_RX_THOLD 7
46#define PCH_RX_THOLD_MAX 15
47
48#define PCH_MAX_BAUDRATE 5000000
49#define PCH_MAX_FIFO_DEPTH 16
50
51#define STATUS_RUNNING 1
52#define STATUS_EXITING 2
53#define PCH_SLEEP_TIME 10
54
55#define PCH_ADDRESS_SIZE 0x20
56
57#define SSN_LOW 0x02U
58#define SSN_NO_CONTROL 0x00U
59#define PCH_MAX_CS 0xFF
60#define PCI_DEVICE_ID_GE_SPI 0x8816
61
62#define SPCR_SPE_BIT (1 << 0)
63#define SPCR_MSTR_BIT (1 << 1)
64#define SPCR_LSBF_BIT (1 << 4)
65#define SPCR_CPHA_BIT (1 << 5)
66#define SPCR_CPOL_BIT (1 << 6)
67#define SPCR_TFIE_BIT (1 << 8)
68#define SPCR_RFIE_BIT (1 << 9)
69#define SPCR_FIE_BIT (1 << 10)
70#define SPCR_ORIE_BIT (1 << 11)
71#define SPCR_MDFIE_BIT (1 << 12)
72#define SPCR_FICLR_BIT (1 << 24)
73#define SPSR_TFI_BIT (1 << 0)
74#define SPSR_RFI_BIT (1 << 1)
75#define SPSR_FI_BIT (1 << 2)
76#define SPBRR_SIZE_BIT (1 << 10)
77
78#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
79
80#define SPCR_RFIC_FIELD 20
81#define SPCR_TFIC_FIELD 16
82
83#define SPSR_INT_BITS 0x1F
84#define MASK_SPBRR_SPBR_BITS (~((1 << 10) - 1))
85#define MASK_RFIC_SPCR_BITS (~(0xf << 20))
86#define MASK_TFIC_SPCR_BITS (~(0xf000f << 12))
87
88#define PCH_CLOCK_HZ 50000000
89#define PCH_MAX_SPBR 1023
90
91
92/**
93 * struct pch_spi_data - Holds the SPI channel specific details
94 * @io_remap_addr: The remapped PCI base address
95 * @master: Pointer to the SPI master structure
96 * @work: Reference to work queue handler
97 * @wk: Workqueue for carrying out execution of the
98 * requests
99 * @wait: Wait queue for waking up upon receiving an
100 * interrupt.
101 * @transfer_complete: Status of SPI Transfer
102 * @bcurrent_msg_processing: Status flag for message processing
103 * @lock: Lock for protecting this structure
104 * @queue: SPI Message queue
105 * @status: Status of the SPI driver
106 * @bpw_len: Length of data to be transferred in bits per
107 * word
108 * @transfer_active: Flag showing active transfer
109 * @tx_index: Transmit data count; for bookkeeping during
110 * transfer
111 * @rx_index: Receive data count; for bookkeeping during
112 * transfer
113 * @tx_buff: Buffer for data to be transmitted
114 * @rx_index: Buffer for Received data
115 * @n_curnt_chip: The chip number that this SPI driver currently
116 * operates on
117 * @current_chip: Reference to the current chip that this SPI
118 * driver currently operates on
119 * @current_msg: The current message that this SPI driver is
120 * handling
121 * @cur_trans: The current transfer that this SPI driver is
122 * handling
123 * @board_dat: Reference to the SPI device data structure
124 */
125struct pch_spi_data {
126 void __iomem *io_remap_addr;
127 struct spi_master *master;
128 struct work_struct work;
129 struct workqueue_struct *wk;
130 wait_queue_head_t wait;
131 u8 transfer_complete;
132 u8 bcurrent_msg_processing;
133 spinlock_t lock;
134 struct list_head queue;
135 u8 status;
136 u32 bpw_len;
137 u8 transfer_active;
138 u32 tx_index;
139 u32 rx_index;
140 u16 *pkt_tx_buff;
141 u16 *pkt_rx_buff;
142 u8 n_curnt_chip;
143 struct spi_device *current_chip;
144 struct spi_message *current_msg;
145 struct spi_transfer *cur_trans;
146 struct pch_spi_board_data *board_dat;
147};
148
149/**
150 * struct pch_spi_board_data - Holds the SPI device specific details
151 * @pdev: Pointer to the PCI device
152 * @irq_reg_sts: Status of IRQ registration
153 * @pci_req_sts: Status of pci_request_regions
154 * @suspend_sts: Status of suspend
155 * @data: Pointer to SPI channel data structure
156 */
157struct pch_spi_board_data {
158 struct pci_dev *pdev;
159 u8 irq_reg_sts;
160 u8 pci_req_sts;
161 u8 suspend_sts;
162 struct pch_spi_data *data;
163};
164
165static struct pci_device_id pch_spi_pcidev_id[] = {
166 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_GE_SPI)},
167 {0,}
168};
169
170/**
171 * pch_spi_writereg() - Performs register writes
172 * @master: Pointer to struct spi_master.
173 * @idx: Register offset.
174 * @val: Value to be written to register.
175 */
176static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val)
177{
178 struct pch_spi_data *data = spi_master_get_devdata(master);
179 iowrite32(val, (data->io_remap_addr + idx));
180}
181
182/**
183 * pch_spi_readreg() - Performs register reads
184 * @master: Pointer to struct spi_master.
185 * @idx: Register offset.
186 */
187static inline u32 pch_spi_readreg(struct spi_master *master, int idx)
188{
189 struct pch_spi_data *data = spi_master_get_devdata(master);
190 return ioread32(data->io_remap_addr + idx);
191}
192
193static inline void pch_spi_setclr_reg(struct spi_master *master, int idx,
194 u32 set, u32 clr)
195{
196 u32 tmp = pch_spi_readreg(master, idx);
197 tmp = (tmp & ~clr) | set;
198 pch_spi_writereg(master, idx, tmp);
199}
200
201static void pch_spi_set_master_mode(struct spi_master *master)
202{
203 pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0);
204}
205
206/**
207 * pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs
208 * @master: Pointer to struct spi_master.
209 */
210static void pch_spi_clear_fifo(struct spi_master *master)
211{
212 pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0);
213 pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT);
214}
215
216static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
217 void __iomem *io_remap_addr)
218{
219 u32 n_read, tx_index, rx_index, bpw_len;
220 u16 *pkt_rx_buffer, *pkt_tx_buff;
221 int read_cnt;
222 u32 reg_spcr_val;
223 void __iomem *spsr;
224 void __iomem *spdrr;
225 void __iomem *spdwr;
226
227 spsr = io_remap_addr + PCH_SPSR;
228 iowrite32(reg_spsr_val, spsr);
229
230 if (data->transfer_active) {
231 rx_index = data->rx_index;
232 tx_index = data->tx_index;
233 bpw_len = data->bpw_len;
234 pkt_rx_buffer = data->pkt_rx_buff;
235 pkt_tx_buff = data->pkt_tx_buff;
236
237 spdrr = io_remap_addr + PCH_SPDRR;
238 spdwr = io_remap_addr + PCH_SPDWR;
239
240 n_read = PCH_READABLE(reg_spsr_val);
241
242 for (read_cnt = 0; (read_cnt < n_read); read_cnt++) {
243 pkt_rx_buffer[rx_index++] = ioread32(spdrr);
244 if (tx_index < bpw_len)
245 iowrite32(pkt_tx_buff[tx_index++], spdwr);
246 }
247
248 /* disable RFI if not needed */
249 if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) {
250 reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR);
251 reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */
252
253 /* reset rx threshold */
254 reg_spcr_val &= MASK_RFIC_SPCR_BITS;
255 reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
256 iowrite32(((reg_spcr_val) &= (~(SPCR_RFIE_BIT))),
257 (io_remap_addr + PCH_SPCR));
258 }
259
260 /* update counts */
261 data->tx_index = tx_index;
262 data->rx_index = rx_index;
263
264 }
265
266 /* if transfer complete interrupt */
267 if (reg_spsr_val & SPSR_FI_BIT) {
268 /* disable FI & RFI interrupts */
269 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
270 SPCR_FIE_BIT | SPCR_TFIE_BIT);
271
272 /* transfer is completed;inform pch_spi_process_messages */
273 data->transfer_complete = true;
274 wake_up(&data->wait);
275 }
276}
277
278/**
279 * pch_spi_handler() - Interrupt handler
280 * @irq: The interrupt number.
281 * @dev_id: Pointer to struct pch_spi_board_data.
282 */
283static irqreturn_t pch_spi_handler(int irq, void *dev_id)
284{
285 u32 reg_spsr_val;
286 struct pch_spi_data *data;
287 void __iomem *spsr;
288 void __iomem *io_remap_addr;
289 irqreturn_t ret = IRQ_NONE;
290 struct pch_spi_board_data *board_dat = dev_id;
291
292 if (board_dat->suspend_sts) {
293 dev_dbg(&board_dat->pdev->dev,
294 "%s returning due to suspend\n", __func__);
295 return IRQ_NONE;
296 }
297
298 data = board_dat->data;
299 io_remap_addr = data->io_remap_addr;
300 spsr = io_remap_addr + PCH_SPSR;
301
302 reg_spsr_val = ioread32(spsr);
303
304 /* Check if the interrupt is for SPI device */
305 if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
306 pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
307 ret = IRQ_HANDLED;
308 }
309
310 dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n",
311 __func__, ret);
312
313 return ret;
314}
315
316/**
317 * pch_spi_set_baud_rate() - Sets SPBR field in SPBRR
318 * @master: Pointer to struct spi_master.
319 * @speed_hz: Baud rate.
320 */
321static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
322{
323 u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2);
324
325 /* if baud rate is less than we can support limit it */
326 if (n_spbr > PCH_MAX_SPBR)
327 n_spbr = PCH_MAX_SPBR;
328
329 pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, ~MASK_SPBRR_SPBR_BITS);
330}
331
332/**
333 * pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR
334 * @master: Pointer to struct spi_master.
335 * @bits_per_word: Bits per word for SPI transfer.
336 */
337static void pch_spi_set_bits_per_word(struct spi_master *master,
338 u8 bits_per_word)
339{
340 if (bits_per_word == 8)
341 pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
342 else
343 pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
344}
345
346/**
347 * pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer
348 * @spi: Pointer to struct spi_device.
349 */
350static void pch_spi_setup_transfer(struct spi_device *spi)
351{
352 u32 flags = 0;
353
354 dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n",
355 __func__, pch_spi_readreg(spi->master, PCH_SPBRR),
356 spi->max_speed_hz);
357 pch_spi_set_baud_rate(spi->master, spi->max_speed_hz);
358
359 /* set bits per word */
360 pch_spi_set_bits_per_word(spi->master, spi->bits_per_word);
361
362 if (!(spi->mode & SPI_LSB_FIRST))
363 flags |= SPCR_LSBF_BIT;
364 if (spi->mode & SPI_CPOL)
365 flags |= SPCR_CPOL_BIT;
366 if (spi->mode & SPI_CPHA)
367 flags |= SPCR_CPHA_BIT;
368 pch_spi_setclr_reg(spi->master, PCH_SPCR, flags,
369 (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT));
370
371 /* Clear the FIFO by toggling FICLR to 1 and back to 0 */
372 pch_spi_clear_fifo(spi->master);
373}
374
375/**
376 * pch_spi_reset() - Clears SPI registers
377 * @master: Pointer to struct spi_master.
378 */
379static void pch_spi_reset(struct spi_master *master)
380{
381 /* write 1 to reset SPI */
382 pch_spi_writereg(master, PCH_SRST, 0x1);
383
384 /* clear reset */
385 pch_spi_writereg(master, PCH_SRST, 0x0);
386}
387
388static int pch_spi_setup(struct spi_device *pspi)
389{
390 /* check bits per word */
391 if (pspi->bits_per_word == 0) {
392 pspi->bits_per_word = 8;
393 dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__);
394 }
395
396 if ((pspi->bits_per_word != 8) && (pspi->bits_per_word != 16)) {
397 dev_err(&pspi->dev, "%s Invalid bits per word\n", __func__);
398 return -EINVAL;
399 }
400
401 /* Check baud rate setting */
402 /* if baud rate of chip is greater than
403 max we can support,return error */
404 if ((pspi->max_speed_hz) > PCH_MAX_BAUDRATE)
405 pspi->max_speed_hz = PCH_MAX_BAUDRATE;
406
407 dev_dbg(&pspi->dev, "%s MODE = %x\n", __func__,
408 (pspi->mode) & (SPI_CPOL | SPI_CPHA));
409
410 return 0;
411}
412
413static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
414{
415
416 struct spi_transfer *transfer;
417 struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
418 int retval;
419 unsigned long flags;
420
421 /* validate spi message and baud rate */
422 if (unlikely(list_empty(&pmsg->transfers) == 1)) {
423 dev_err(&pspi->dev, "%s list empty\n", __func__);
424 retval = -EINVAL;
425 goto err_out;
426 }
427
428 if (unlikely(pspi->max_speed_hz == 0)) {
429 dev_err(&pspi->dev, "%s pch_spi_tranfer maxspeed=%d\n",
430 __func__, pspi->max_speed_hz);
431 retval = -EINVAL;
432 goto err_out;
433 }
434
435 dev_dbg(&pspi->dev, "%s Transfer List not empty. "
436 "Transfer Speed is set.\n", __func__);
437
438 /* validate Tx/Rx buffers and Transfer length */
439 list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
440 if (!transfer->tx_buf && !transfer->rx_buf) {
441 dev_err(&pspi->dev,
442 "%s Tx and Rx buffer NULL\n", __func__);
443 retval = -EINVAL;
444 goto err_out;
445 }
446
447 if (!transfer->len) {
448 dev_err(&pspi->dev, "%s Transfer length invalid\n",
449 __func__);
450 retval = -EINVAL;
451 goto err_out;
452 }
453
454 dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length"
455 " valid\n", __func__);
456
457 /* if baud rate hs been specified validate the same */
458 if (transfer->speed_hz > PCH_MAX_BAUDRATE)
459 transfer->speed_hz = PCH_MAX_BAUDRATE;
460
461 /* if bits per word has been specified validate the same */
462 if (transfer->bits_per_word) {
463 if ((transfer->bits_per_word != 8)
464 && (transfer->bits_per_word != 16)) {
465 retval = -EINVAL;
466 dev_err(&pspi->dev,
467 "%s Invalid bits per word\n", __func__);
468 goto err_out;
469 }
470 }
471 }
472
473 spin_lock_irqsave(&data->lock, flags);
474
475 /* We won't process any messages if we have been asked to terminate */
476 if (data->status == STATUS_EXITING) {
477 dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
478 retval = -ESHUTDOWN;
479 goto err_return_spinlock;
480 }
481
482 /* If suspended ,return -EINVAL */
483 if (data->board_dat->suspend_sts) {
484 dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
485 retval = -EINVAL;
486 goto err_return_spinlock;
487 }
488
489 /* set status of message */
490 pmsg->actual_length = 0;
491 dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
492
493 pmsg->status = -EINPROGRESS;
494
495 /* add message to queue */
496 list_add_tail(&pmsg->queue, &data->queue);
497 dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
498
499 /* schedule work queue to run */
500 queue_work(data->wk, &data->work);
501 dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__);
502
503 retval = 0;
504
505err_return_spinlock:
506 spin_unlock_irqrestore(&data->lock, flags);
507err_out:
508 dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
509 return retval;
510}
511
512static inline void pch_spi_select_chip(struct pch_spi_data *data,
513 struct spi_device *pspi)
514{
515 if (data->current_chip != NULL) {
516 if (pspi->chip_select != data->n_curnt_chip) {
517 dev_dbg(&pspi->dev, "%s : different slave\n", __func__);
518 data->current_chip = NULL;
519 }
520 }
521
522 data->current_chip = pspi;
523
524 data->n_curnt_chip = data->current_chip->chip_select;
525
526 dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__);
527 pch_spi_setup_transfer(pspi);
528}
529
530static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw,
531 struct spi_message **ppmsg)
532{
533 int size;
534 u32 n_writes;
535 int j;
536 struct spi_message *pmsg;
537 const u8 *tx_buf;
538 const u16 *tx_sbuf;
539
540 pmsg = *ppmsg;
541
542 /* set baud rate if needed */
543 if (data->cur_trans->speed_hz) {
544 dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
545 pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
546 }
547
548 /* set bits per word if needed */
549 if (data->cur_trans->bits_per_word &&
550 (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) {
551 dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
552 pch_spi_set_bits_per_word(data->master,
553 data->cur_trans->bits_per_word);
554 *bpw = data->cur_trans->bits_per_word;
555 } else {
556 *bpw = data->current_msg->spi->bits_per_word;
557 }
558
559 /* reset Tx/Rx index */
560 data->tx_index = 0;
561 data->rx_index = 0;
562
563 data->bpw_len = data->cur_trans->len / (*bpw / 8);
564
565 /* find alloc size */
566 size = data->cur_trans->len * sizeof(*data->pkt_tx_buff);
567
568 /* allocate memory for pkt_tx_buff & pkt_rx_buffer */
569 data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
570 if (data->pkt_tx_buff != NULL) {
571 data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
572 if (!data->pkt_rx_buff)
573 kfree(data->pkt_tx_buff);
574 }
575
576 if (!data->pkt_rx_buff) {
577 /* flush queue and set status of all transfers to -ENOMEM */
578 dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__);
579 list_for_each_entry(pmsg, data->queue.next, queue) {
580 pmsg->status = -ENOMEM;
581
582 if (pmsg->complete != 0)
583 pmsg->complete(pmsg->context);
584
585 /* delete from queue */
586 list_del_init(&pmsg->queue);
587 }
588 return;
589 }
590
591 /* copy Tx Data */
592 if (data->cur_trans->tx_buf != NULL) {
593 if (*bpw == 8) {
594 tx_buf = data->cur_trans->tx_buf;
595 for (j = 0; j < data->bpw_len; j++)
596 data->pkt_tx_buff[j] = *tx_buf++;
597 } else {
598 tx_sbuf = data->cur_trans->tx_buf;
599 for (j = 0; j < data->bpw_len; j++)
600 data->pkt_tx_buff[j] = *tx_sbuf++;
601 }
602 }
603
604 /* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */
605 n_writes = data->bpw_len;
606 if (n_writes > PCH_MAX_FIFO_DEPTH)
607 n_writes = PCH_MAX_FIFO_DEPTH;
608
609 dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
610 "0x2 to SSNXCR\n", __func__);
611 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
612
613 for (j = 0; j < n_writes; j++)
614 pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]);
615
616 /* update tx_index */
617 data->tx_index = j;
618
619 /* reset transfer complete flag */
620 data->transfer_complete = false;
621 data->transfer_active = true;
622}
623
624
625static void pch_spi_nomore_transfer(struct pch_spi_data *data,
626 struct spi_message *pmsg)
627{
628 dev_dbg(&data->master->dev, "%s called\n", __func__);
629 /* Invoke complete callback
630 * [To the spi core..indicating end of transfer] */
631 data->current_msg->status = 0;
632
633 if (data->current_msg->complete != 0) {
634 dev_dbg(&data->master->dev,
635 "%s:Invoking callback of SPI core\n", __func__);
636 data->current_msg->complete(data->current_msg->context);
637 }
638
639 /* update status in global variable */
640 data->bcurrent_msg_processing = false;
641
642 dev_dbg(&data->master->dev,
643 "%s:data->bcurrent_msg_processing = false\n", __func__);
644
645 data->current_msg = NULL;
646 data->cur_trans = NULL;
647
648 /* check if we have items in list and not suspending
649 * return 1 if list empty */
650 if ((list_empty(&data->queue) == 0) &&
651 (!data->board_dat->suspend_sts) &&
652 (data->status != STATUS_EXITING)) {
653 /* We have some more work to do (either there is more tranint
654 * bpw;sfer requests in the current message or there are
655 *more messages)
656 */
657 dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
658 queue_work(data->wk, &data->work);
659 } else if (data->board_dat->suspend_sts ||
660 data->status == STATUS_EXITING) {
661 dev_dbg(&data->master->dev,
662 "%s suspend/remove initiated, flushing queue\n",
663 __func__);
664 list_for_each_entry(pmsg, data->queue.next, queue) {
665 pmsg->status = -EIO;
666
667 if (pmsg->complete)
668 pmsg->complete(pmsg->context);
669
670 /* delete from queue */
671 list_del_init(&pmsg->queue);
672 }
673 }
674}
675
676static void pch_spi_set_ir(struct pch_spi_data *data)
677{
678 /* enable interrupts */
679 if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) {
680 /* set receive threhold to PCH_RX_THOLD */
681 pch_spi_setclr_reg(data->master, PCH_SPCR,
682 PCH_RX_THOLD << SPCR_TFIC_FIELD,
683 ~MASK_TFIC_SPCR_BITS);
684 /* enable FI and RFI interrupts */
685 pch_spi_setclr_reg(data->master, PCH_SPCR,
686 SPCR_RFIE_BIT | SPCR_TFIE_BIT, 0);
687 } else {
688 /* set receive threhold to maximum */
689 pch_spi_setclr_reg(data->master, PCH_SPCR,
690 PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD,
691 ~MASK_TFIC_SPCR_BITS);
692 /* enable FI interrupt */
693 pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_FIE_BIT, 0);
694 }
695
696 dev_dbg(&data->master->dev,
697 "%s:invoking pch_spi_set_enable to enable SPI\n", __func__);
698
699 /* SPI set enable */
700 pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, SPCR_SPE_BIT, 0);
701
702 /* Wait until the transfer completes; go to sleep after
703 initiating the transfer. */
704 dev_dbg(&data->master->dev,
705 "%s:waiting for transfer to get over\n", __func__);
706
707 wait_event_interruptible(data->wait, data->transfer_complete);
708
709 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
710 dev_dbg(&data->master->dev,
711 "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
712
713 data->transfer_active = false;
714 dev_dbg(&data->master->dev,
715 "%s set data->transfer_active = false\n", __func__);
716
717 /* clear all interrupts */
718 pch_spi_writereg(data->master, PCH_SPSR,
719 pch_spi_readreg(data->master, PCH_SPSR));
720 /* disable interrupts */
721 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
722}
723
724static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
725{
726 int j;
727 u8 *rx_buf;
728 u16 *rx_sbuf;
729
730 /* copy Rx Data */
731 if (!data->cur_trans->rx_buf)
732 return;
733
734 if (bpw == 8) {
735 rx_buf = data->cur_trans->rx_buf;
736 for (j = 0; j < data->bpw_len; j++)
737 *rx_buf++ = data->pkt_rx_buff[j] & 0xFF;
738 } else {
739 rx_sbuf = data->cur_trans->rx_buf;
740 for (j = 0; j < data->bpw_len; j++)
741 *rx_sbuf++ = data->pkt_rx_buff[j];
742 }
743}
744
745
746static void pch_spi_process_messages(struct work_struct *pwork)
747{
748 struct spi_message *pmsg;
749 struct pch_spi_data *data;
750 int bpw;
751
752 data = container_of(pwork, struct pch_spi_data, work);
753 dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
754
755 spin_lock(&data->lock);
756
757 /* check if suspend has been initiated;if yes flush queue */
758 if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
759 dev_dbg(&data->master->dev,
760 "%s suspend/remove initiated,flushing queue\n",
761 __func__);
762
763 list_for_each_entry(pmsg, data->queue.next, queue) {
764 pmsg->status = -EIO;
765
766 if (pmsg->complete != 0) {
767 spin_unlock(&data->lock);
768 pmsg->complete(pmsg->context);
769 spin_lock(&data->lock);
770 }
771
772 /* delete from queue */
773 list_del_init(&pmsg->queue);
774 }
775
776 spin_unlock(&data->lock);
777 return;
778 }
779
780 data->bcurrent_msg_processing = true;
781 dev_dbg(&data->master->dev,
782 "%s Set data->bcurrent_msg_processing= true\n", __func__);
783
784 /* Get the message from the queue and delete it from there. */
785 data->current_msg = list_entry(data->queue.next, struct spi_message,
786 queue);
787
788 list_del_init(&data->current_msg->queue);
789
790 data->current_msg->status = 0;
791
792 pch_spi_select_chip(data, data->current_msg->spi);
793
794 spin_unlock(&data->lock);
795
796 do {
797 /* If we are already processing a message get the next
798 transfer structure from the message otherwise retrieve
799 the 1st transfer request from the message. */
800 spin_lock(&data->lock);
801
802 if (data->cur_trans == NULL) {
803 data->cur_trans =
804 list_entry(data->current_msg->transfers.
805 next, struct spi_transfer,
806 transfer_list);
807 dev_dbg(&data->master->dev,
808 "%s :Getting 1st transfer message\n", __func__);
809 } else {
810 data->cur_trans =
811 list_entry(data->cur_trans->transfer_list.next,
812 struct spi_transfer,
813 transfer_list);
814 dev_dbg(&data->master->dev,
815 "%s :Getting next transfer message\n",
816 __func__);
817 }
818
819 spin_unlock(&data->lock);
820
821 pch_spi_set_tx(data, &bpw, &pmsg);
822
823 /* Control interrupt*/
824 pch_spi_set_ir(data);
825
826 /* Disable SPI transfer */
827 pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, 0,
828 SPCR_SPE_BIT);
829
830 /* clear FIFO */
831 pch_spi_clear_fifo(data->master);
832
833 /* copy Rx Data */
834 pch_spi_copy_rx_data(data, bpw);
835
836 /* free memory */
837 kfree(data->pkt_rx_buff);
838 data->pkt_rx_buff = NULL;
839
840 kfree(data->pkt_tx_buff);
841 data->pkt_tx_buff = NULL;
842
843 /* increment message count */
844 data->current_msg->actual_length += data->cur_trans->len;
845
846 dev_dbg(&data->master->dev,
847 "%s:data->current_msg->actual_length=%d\n",
848 __func__, data->current_msg->actual_length);
849
850 /* check for delay */
851 if (data->cur_trans->delay_usecs) {
852 dev_dbg(&data->master->dev, "%s:"
853 "delay in usec=%d\n", __func__,
854 data->cur_trans->delay_usecs);
855 udelay(data->cur_trans->delay_usecs);
856 }
857
858 spin_lock(&data->lock);
859
860 /* No more transfer in this message. */
861 if ((data->cur_trans->transfer_list.next) ==
862 &(data->current_msg->transfers)) {
863 pch_spi_nomore_transfer(data, pmsg);
864 }
865
866 spin_unlock(&data->lock);
867
868 } while (data->cur_trans != NULL);
869}
870
871static void pch_spi_free_resources(struct pch_spi_board_data *board_dat)
872{
873 dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
874
875 /* free workqueue */
876 if (board_dat->data->wk != NULL) {
877 destroy_workqueue(board_dat->data->wk);
878 board_dat->data->wk = NULL;
879 dev_dbg(&board_dat->pdev->dev,
880 "%s destroy_workqueue invoked successfully\n",
881 __func__);
882 }
883
884 /* disable interrupts & free IRQ */
885 if (board_dat->irq_reg_sts) {
886 /* disable interrupts */
887 pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0,
888 PCH_ALL);
889
890 /* free IRQ */
891 free_irq(board_dat->pdev->irq, board_dat);
892
893 dev_dbg(&board_dat->pdev->dev,
894 "%s free_irq invoked successfully\n", __func__);
895
896 board_dat->irq_reg_sts = false;
897 }
898
899 /* unmap PCI base address */
900 if (board_dat->data->io_remap_addr != 0) {
901 pci_iounmap(board_dat->pdev, board_dat->data->io_remap_addr);
902
903 board_dat->data->io_remap_addr = 0;
904
905 dev_dbg(&board_dat->pdev->dev,
906 "%s pci_iounmap invoked successfully\n", __func__);
907 }
908
909 /* release PCI region */
910 if (board_dat->pci_req_sts) {
911 pci_release_regions(board_dat->pdev);
912 dev_dbg(&board_dat->pdev->dev,
913 "%s pci_release_regions invoked successfully\n",
914 __func__);
915 board_dat->pci_req_sts = false;
916 }
917}
918
919static int pch_spi_get_resources(struct pch_spi_board_data *board_dat)
920{
921 void __iomem *io_remap_addr;
922 int retval;
923 dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
924
925 /* create workqueue */
926 board_dat->data->wk = create_singlethread_workqueue(KBUILD_MODNAME);
927 if (!board_dat->data->wk) {
928 dev_err(&board_dat->pdev->dev,
929 "%s create_singlet hread_workqueue failed\n", __func__);
930 retval = -EBUSY;
931 goto err_return;
932 }
933
934 dev_dbg(&board_dat->pdev->dev,
935 "%s create_singlethread_workqueue success\n", __func__);
936
937 retval = pci_request_regions(board_dat->pdev, KBUILD_MODNAME);
938 if (retval != 0) {
939 dev_err(&board_dat->pdev->dev,
940 "%s request_region failed\n", __func__);
941 goto err_return;
942 }
943
944 board_dat->pci_req_sts = true;
945
946 io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
947 if (io_remap_addr == 0) {
948 dev_err(&board_dat->pdev->dev,
949 "%s pci_iomap failed\n", __func__);
950 retval = -ENOMEM;
951 goto err_return;
952 }
953
954 /* calculate base address for all channels */
955 board_dat->data->io_remap_addr = io_remap_addr;
956
957 /* reset PCH SPI h/w */
958 pch_spi_reset(board_dat->data->master);
959 dev_dbg(&board_dat->pdev->dev,
960 "%s pch_spi_reset invoked successfully\n", __func__);
961
962 /* register IRQ */
963 retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
964 IRQF_SHARED, KBUILD_MODNAME, board_dat);
965 if (retval != 0) {
966 dev_err(&board_dat->pdev->dev,
967 "%s request_irq failed\n", __func__);
968 goto err_return;
969 }
970
971 dev_dbg(&board_dat->pdev->dev, "%s request_irq returned=%d\n",
972 __func__, retval);
973
974 board_dat->irq_reg_sts = true;
975 dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
976
977err_return:
978 if (retval != 0) {
979 dev_err(&board_dat->pdev->dev,
980 "%s FAIL:invoking pch_spi_free_resources\n", __func__);
981 pch_spi_free_resources(board_dat);
982 }
983
984 dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval);
985
986 return retval;
987}
988
989static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
990{
991
992 struct spi_master *master;
993
994 struct pch_spi_board_data *board_dat;
995 int retval;
996
997 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
998
999 /* allocate memory for private data */
1000 board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
1001 if (board_dat == NULL) {
1002 dev_err(&pdev->dev,
1003 " %s memory allocation for private data failed\n",
1004 __func__);
1005 retval = -ENOMEM;
1006 goto err_kmalloc;
1007 }
1008
1009 dev_dbg(&pdev->dev,
1010 "%s memory allocation for private data success\n", __func__);
1011
1012 /* enable PCI device */
1013 retval = pci_enable_device(pdev);
1014 if (retval != 0) {
1015 dev_err(&pdev->dev, "%s pci_enable_device FAILED\n", __func__);
1016
1017 goto err_pci_en_device;
1018 }
1019
1020 dev_dbg(&pdev->dev, "%s pci_enable_device returned=%d\n",
1021 __func__, retval);
1022
1023 board_dat->pdev = pdev;
1024
1025 /* alllocate memory for SPI master */
1026 master = spi_alloc_master(&pdev->dev, sizeof(struct pch_spi_data));
1027 if (master == NULL) {
1028 retval = -ENOMEM;
1029 dev_err(&pdev->dev, "%s Fail.\n", __func__);
1030 goto err_spi_alloc_master;
1031 }
1032
1033 dev_dbg(&pdev->dev,
1034 "%s spi_alloc_master returned non NULL\n", __func__);
1035
1036 /* initialize members of SPI master */
1037 master->bus_num = -1;
1038 master->num_chipselect = PCH_MAX_CS;
1039 master->setup = pch_spi_setup;
1040 master->transfer = pch_spi_transfer;
1041 dev_dbg(&pdev->dev,
1042 "%s transfer member of SPI master initialized\n", __func__);
1043
1044 board_dat->data = spi_master_get_devdata(master);
1045
1046 board_dat->data->master = master;
1047 board_dat->data->n_curnt_chip = 255;
1048 board_dat->data->board_dat = board_dat;
1049 board_dat->data->status = STATUS_RUNNING;
1050
1051 INIT_LIST_HEAD(&board_dat->data->queue);
1052 spin_lock_init(&board_dat->data->lock);
1053 INIT_WORK(&board_dat->data->work, pch_spi_process_messages);
1054 init_waitqueue_head(&board_dat->data->wait);
1055
1056 /* allocate resources for PCH SPI */
1057 retval = pch_spi_get_resources(board_dat);
1058 if (retval) {
1059 dev_err(&pdev->dev, "%s fail(retval=%d)\n", __func__, retval);
1060 goto err_spi_get_resources;
1061 }
1062
1063 dev_dbg(&pdev->dev, "%s pch_spi_get_resources returned=%d\n",
1064 __func__, retval);
1065
1066 /* save private data in dev */
1067 pci_set_drvdata(pdev, board_dat);
1068 dev_dbg(&pdev->dev, "%s invoked pci_set_drvdata\n", __func__);
1069
1070 /* set master mode */
1071 pch_spi_set_master_mode(master);
1072 dev_dbg(&pdev->dev,
1073 "%s invoked pch_spi_set_master_mode\n", __func__);
1074
1075 /* Register the controller with the SPI core. */
1076 retval = spi_register_master(master);
1077 if (retval != 0) {
1078 dev_err(&pdev->dev,
1079 "%s spi_register_master FAILED\n", __func__);
1080 goto err_spi_reg_master;
1081 }
1082
1083 dev_dbg(&pdev->dev, "%s spi_register_master returned=%d\n",
1084 __func__, retval);
1085
1086
1087 return 0;
1088
1089err_spi_reg_master:
1090 spi_unregister_master(master);
1091err_spi_get_resources:
1092err_spi_alloc_master:
1093 spi_master_put(master);
1094 pci_disable_device(pdev);
1095err_pci_en_device:
1096 kfree(board_dat);
1097err_kmalloc:
1098 return retval;
1099}
1100
1101static void pch_spi_remove(struct pci_dev *pdev)
1102{
1103 struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev);
1104 int count;
1105
1106 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
1107
1108 if (!board_dat) {
1109 dev_err(&pdev->dev,
1110 "%s pci_get_drvdata returned NULL\n", __func__);
1111 return;
1112 }
1113
1114 /* check for any pending messages; no action is taken if the queue
1115 * is still full; but at least we tried. Unload anyway */
1116 count = 500;
1117 spin_lock(&board_dat->data->lock);
1118 board_dat->data->status = STATUS_EXITING;
1119 while ((list_empty(&board_dat->data->queue) == 0) && --count) {
1120 dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
1121 __func__);
1122 spin_unlock(&board_dat->data->lock);
1123 msleep(PCH_SLEEP_TIME);
1124 spin_lock(&board_dat->data->lock);
1125 }
1126 spin_unlock(&board_dat->data->lock);
1127
1128 /* Free resources allocated for PCH SPI */
1129 pch_spi_free_resources(board_dat);
1130
1131 spi_unregister_master(board_dat->data->master);
1132
1133 /* free memory for private data */
1134 kfree(board_dat);
1135
1136 pci_set_drvdata(pdev, NULL);
1137
1138 /* disable PCI device */
1139 pci_disable_device(pdev);
1140
1141 dev_dbg(&pdev->dev, "%s invoked pci_disable_device\n", __func__);
1142}
1143
1144#ifdef CONFIG_PM
1145static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
1146{
1147 u8 count;
1148 int retval;
1149
1150 struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev);
1151
1152 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
1153
1154 if (!board_dat) {
1155 dev_err(&pdev->dev,
1156 "%s pci_get_drvdata returned NULL\n", __func__);
1157 return -EFAULT;
1158 }
1159
1160 retval = 0;
1161 board_dat->suspend_sts = true;
1162
1163 /* check if the current message is processed:
1164 Only after thats done the transfer will be suspended */
1165 count = 255;
1166 while ((--count) > 0) {
1167 if (!(board_dat->data->bcurrent_msg_processing)) {
1168 dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_"
1169 "msg_processing = false\n", __func__);
1170 break;
1171 } else {
1172 dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_msg_"
1173 "processing = true\n", __func__);
1174 }
1175 msleep(PCH_SLEEP_TIME);
1176 }
1177
1178 /* Free IRQ */
1179 if (board_dat->irq_reg_sts) {
1180 /* disable all interrupts */
1181 pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0,
1182 PCH_ALL);
1183 pch_spi_reset(board_dat->data->master);
1184
1185 free_irq(board_dat->pdev->irq, board_dat);
1186
1187 board_dat->irq_reg_sts = false;
1188 dev_dbg(&pdev->dev,
1189 "%s free_irq invoked successfully.\n", __func__);
1190 }
1191
1192 /* save config space */
1193 retval = pci_save_state(pdev);
1194
1195 if (retval == 0) {
1196 dev_dbg(&pdev->dev, "%s pci_save_state returned=%d\n",
1197 __func__, retval);
1198 /* disable PM notifications */
1199 pci_enable_wake(pdev, PCI_D3hot, 0);
1200 dev_dbg(&pdev->dev,
1201 "%s pci_enable_wake invoked successfully\n", __func__);
1202 /* disable PCI device */
1203 pci_disable_device(pdev);
1204 dev_dbg(&pdev->dev,
1205 "%s pci_disable_device invoked successfully\n",
1206 __func__);
1207 /* move device to D3hot state */
1208 pci_set_power_state(pdev, PCI_D3hot);
1209 dev_dbg(&pdev->dev,
1210 "%s pci_set_power_state invoked successfully\n",
1211 __func__);
1212 } else {
1213 dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__);
1214 }
1215
1216 dev_dbg(&pdev->dev, "%s return=%d\n", __func__, retval);
1217
1218 return retval;
1219}
1220
1221static int pch_spi_resume(struct pci_dev *pdev)
1222{
1223 int retval;
1224
1225 struct pch_spi_board_data *board = pci_get_drvdata(pdev);
1226 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
1227
1228 if (!board) {
1229 dev_err(&pdev->dev,
1230 "%s pci_get_drvdata returned NULL\n", __func__);
1231 return -EFAULT;
1232 }
1233
1234 /* move device to DO power state */
1235 pci_set_power_state(pdev, PCI_D0);
1236
1237 /* restore state */
1238 pci_restore_state(pdev);
1239
1240 retval = pci_enable_device(pdev);
1241 if (retval < 0) {
1242 dev_err(&pdev->dev,
1243 "%s pci_enable_device failed\n", __func__);
1244 } else {
1245 /* disable PM notifications */
1246 pci_enable_wake(pdev, PCI_D3hot, 0);
1247
1248 /* register IRQ handler */
1249 if (!board->irq_reg_sts) {
1250 /* register IRQ */
1251 retval = request_irq(board->pdev->irq, pch_spi_handler,
1252 IRQF_SHARED, KBUILD_MODNAME,
1253 board);
1254 if (retval < 0) {
1255 dev_err(&pdev->dev,
1256 "%s request_irq failed\n", __func__);
1257 return retval;
1258 }
1259 board->irq_reg_sts = true;
1260
1261 /* reset PCH SPI h/w */
1262 pch_spi_reset(board->data->master);
1263 pch_spi_set_master_mode(board->data->master);
1264
1265 /* set suspend status to false */
1266 board->suspend_sts = false;
1267
1268 }
1269 }
1270
1271 dev_dbg(&pdev->dev, "%s returning=%d\n", __func__, retval);
1272
1273 return retval;
1274}
1275#else
1276#define pch_spi_suspend NULL
1277#define pch_spi_resume NULL
1278
1279#endif
1280
1281static struct pci_driver pch_spi_pcidev = {
1282 .name = "pch_spi",
1283 .id_table = pch_spi_pcidev_id,
1284 .probe = pch_spi_probe,
1285 .remove = pch_spi_remove,
1286 .suspend = pch_spi_suspend,
1287 .resume = pch_spi_resume,
1288};
1289
1290static int __init pch_spi_init(void)
1291{
1292 return pci_register_driver(&pch_spi_pcidev);
1293}
1294module_init(pch_spi_init);
1295
1296static void __exit pch_spi_exit(void)
1297{
1298 pci_unregister_driver(&pch_spi_pcidev);
1299}
1300module_exit(pch_spi_exit);
1301
1302MODULE_LICENSE("GPL");
1303MODULE_DESCRIPTION("Topcliff PCH SPI PCI Driver");