diff options
-rw-r--r-- | Documentation/devicetree/bindings/spi/spi-sirf.txt | 3 | ||||
-rw-r--r-- | Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt | 26 | ||||
-rw-r--r-- | drivers/spi/Kconfig | 6 | ||||
-rw-r--r-- | drivers/spi/Makefile | 1 | ||||
-rw-r--r-- | drivers/spi/spi-sirf.c | 877 | ||||
-rw-r--r-- | drivers/spi/spi-zynqmp-gqspi.c | 1122 | ||||
-rw-r--r-- | drivers/spi/spidev.c | 33 |
7 files changed, 1806 insertions, 262 deletions
diff --git a/Documentation/devicetree/bindings/spi/spi-sirf.txt b/Documentation/devicetree/bindings/spi/spi-sirf.txt index 4c7adb8f777c..ddd78ff68fae 100644 --- a/Documentation/devicetree/bindings/spi/spi-sirf.txt +++ b/Documentation/devicetree/bindings/spi/spi-sirf.txt | |||
@@ -1,7 +1,8 @@ | |||
1 | * CSR SiRFprimaII Serial Peripheral Interface | 1 | * CSR SiRFprimaII Serial Peripheral Interface |
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible : Should be "sirf,prima2-spi" | 4 | - compatible : Should be "sirf,prima2-spi", "sirf,prima2-usp" |
5 | or "sirf,atlas7-usp" | ||
5 | - reg : Offset and length of the register set for the device | 6 | - reg : Offset and length of the register set for the device |
6 | - interrupts : Should contain SPI interrupt | 7 | - interrupts : Should contain SPI interrupt |
7 | - resets: phandle to the reset controller asserting this device in | 8 | - resets: phandle to the reset controller asserting this device in |
diff --git a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt new file mode 100644 index 000000000000..c8f50e5cf70b --- /dev/null +++ b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt | |||
@@ -0,0 +1,26 @@ | |||
1 | Xilinx Zynq UltraScale+ MPSoC GQSPI controller Device Tree Bindings | ||
2 | ------------------------------------------------------------------- | ||
3 | |||
4 | Required properties: | ||
5 | - compatible : Should be "xlnx,zynqmp-qspi-1.0". | ||
6 | - reg : Physical base address and size of GQSPI registers map. | ||
7 | - interrupts : Property with a value describing the interrupt | ||
8 | number. | ||
9 | - interrupt-parent : Must be core interrupt controller. | ||
10 | - clock-names : List of input clock names - "ref_clk", "pclk" | ||
11 | (See clock bindings for details). | ||
12 | - clocks : Clock phandles (see clock bindings for details). | ||
13 | |||
14 | Optional properties: | ||
15 | - num-cs : Number of chip selects used. | ||
16 | |||
17 | Example: | ||
18 | qspi: spi@ff0f0000 { | ||
19 | compatible = "xlnx,zynqmp-qspi-1.0"; | ||
20 | clock-names = "ref_clk", "pclk"; | ||
21 | clocks = <&misc_clk &misc_clk>; | ||
22 | interrupts = <0 15 4>; | ||
23 | interrupt-parent = <&gic>; | ||
24 | num-cs = <1>; | ||
25 | reg = <0x0 0xff0f0000 0x1000>,<0x0 0xc0000000 0x8000000>; | ||
26 | }; | ||
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index ec40a27a4b76..0cae1694014d 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -610,6 +610,12 @@ config SPI_XTENSA_XTFPGA | |||
610 | 16 bit words in SPI mode 0, automatically asserting CS on transfer | 610 | 16 bit words in SPI mode 0, automatically asserting CS on transfer |
611 | start and deasserting on end. | 611 | start and deasserting on end. |
612 | 612 | ||
613 | config SPI_ZYNQMP_GQSPI | ||
614 | tristate "Xilinx ZynqMP GQSPI controller" | ||
615 | depends on SPI_MASTER | ||
616 | help | ||
617 | Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC. | ||
618 | |||
613 | config SPI_NUC900 | 619 | config SPI_NUC900 |
614 | tristate "Nuvoton NUC900 series SPI" | 620 | tristate "Nuvoton NUC900 series SPI" |
615 | depends on ARCH_W90X900 | 621 | depends on ARCH_W90X900 |
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 2e7089fbc799..1154dbac8f2c 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile | |||
@@ -89,3 +89,4 @@ obj-$(CONFIG_SPI_TXX9) += spi-txx9.o | |||
89 | obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o | 89 | obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o |
90 | obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o | 90 | obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o |
91 | obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o | 91 | obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o |
92 | obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o | ||
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c index f5715c9f68b0..7072276ad354 100644 --- a/drivers/spi/spi-sirf.c +++ b/drivers/spi/spi-sirf.c | |||
@@ -26,28 +26,6 @@ | |||
26 | #include <linux/reset.h> | 26 | #include <linux/reset.h> |
27 | 27 | ||
28 | #define DRIVER_NAME "sirfsoc_spi" | 28 | #define DRIVER_NAME "sirfsoc_spi" |
29 | |||
30 | #define SIRFSOC_SPI_CTRL 0x0000 | ||
31 | #define SIRFSOC_SPI_CMD 0x0004 | ||
32 | #define SIRFSOC_SPI_TX_RX_EN 0x0008 | ||
33 | #define SIRFSOC_SPI_INT_EN 0x000C | ||
34 | #define SIRFSOC_SPI_INT_STATUS 0x0010 | ||
35 | #define SIRFSOC_SPI_TX_DMA_IO_CTRL 0x0100 | ||
36 | #define SIRFSOC_SPI_TX_DMA_IO_LEN 0x0104 | ||
37 | #define SIRFSOC_SPI_TXFIFO_CTRL 0x0108 | ||
38 | #define SIRFSOC_SPI_TXFIFO_LEVEL_CHK 0x010C | ||
39 | #define SIRFSOC_SPI_TXFIFO_OP 0x0110 | ||
40 | #define SIRFSOC_SPI_TXFIFO_STATUS 0x0114 | ||
41 | #define SIRFSOC_SPI_TXFIFO_DATA 0x0118 | ||
42 | #define SIRFSOC_SPI_RX_DMA_IO_CTRL 0x0120 | ||
43 | #define SIRFSOC_SPI_RX_DMA_IO_LEN 0x0124 | ||
44 | #define SIRFSOC_SPI_RXFIFO_CTRL 0x0128 | ||
45 | #define SIRFSOC_SPI_RXFIFO_LEVEL_CHK 0x012C | ||
46 | #define SIRFSOC_SPI_RXFIFO_OP 0x0130 | ||
47 | #define SIRFSOC_SPI_RXFIFO_STATUS 0x0134 | ||
48 | #define SIRFSOC_SPI_RXFIFO_DATA 0x0138 | ||
49 | #define SIRFSOC_SPI_DUMMY_DELAY_CTL 0x0144 | ||
50 | |||
51 | /* SPI CTRL register defines */ | 29 | /* SPI CTRL register defines */ |
52 | #define SIRFSOC_SPI_SLV_MODE BIT(16) | 30 | #define SIRFSOC_SPI_SLV_MODE BIT(16) |
53 | #define SIRFSOC_SPI_CMD_MODE BIT(17) | 31 | #define SIRFSOC_SPI_CMD_MODE BIT(17) |
@@ -80,8 +58,6 @@ | |||
80 | #define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9) | 58 | #define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9) |
81 | #define SIRFSOC_SPI_FRM_END_INT_EN BIT(10) | 59 | #define SIRFSOC_SPI_FRM_END_INT_EN BIT(10) |
82 | 60 | ||
83 | #define SIRFSOC_SPI_INT_MASK_ALL 0x1FFF | ||
84 | |||
85 | /* Interrupt status */ | 61 | /* Interrupt status */ |
86 | #define SIRFSOC_SPI_RX_DONE BIT(0) | 62 | #define SIRFSOC_SPI_RX_DONE BIT(0) |
87 | #define SIRFSOC_SPI_TX_DONE BIT(1) | 63 | #define SIRFSOC_SPI_TX_DONE BIT(1) |
@@ -110,20 +86,66 @@ | |||
110 | #define SIRFSOC_SPI_FIFO_WIDTH_BYTE (0 << 0) | 86 | #define SIRFSOC_SPI_FIFO_WIDTH_BYTE (0 << 0) |
111 | #define SIRFSOC_SPI_FIFO_WIDTH_WORD (1 << 0) | 87 | #define SIRFSOC_SPI_FIFO_WIDTH_WORD (1 << 0) |
112 | #define SIRFSOC_SPI_FIFO_WIDTH_DWORD (2 << 0) | 88 | #define SIRFSOC_SPI_FIFO_WIDTH_DWORD (2 << 0) |
113 | 89 | /* USP related */ | |
114 | /* FIFO Status */ | 90 | #define SIRFSOC_USP_SYNC_MODE BIT(0) |
115 | #define SIRFSOC_SPI_FIFO_LEVEL_MASK 0xFF | 91 | #define SIRFSOC_USP_SLV_MODE BIT(1) |
116 | #define SIRFSOC_SPI_FIFO_FULL BIT(8) | 92 | #define SIRFSOC_USP_LSB BIT(4) |
117 | #define SIRFSOC_SPI_FIFO_EMPTY BIT(9) | 93 | #define SIRFSOC_USP_EN BIT(5) |
118 | 94 | #define SIRFSOC_USP_RXD_FALLING_EDGE BIT(6) | |
119 | /* 256 bytes rx/tx FIFO */ | 95 | #define SIRFSOC_USP_TXD_FALLING_EDGE BIT(7) |
120 | #define SIRFSOC_SPI_FIFO_SIZE 256 | 96 | #define SIRFSOC_USP_CS_HIGH_VALID BIT(9) |
121 | #define SIRFSOC_SPI_DAT_FRM_LEN_MAX (64 * 1024) | 97 | #define SIRFSOC_USP_SCLK_IDLE_STAT BIT(11) |
122 | 98 | #define SIRFSOC_USP_TFS_IO_MODE BIT(14) | |
123 | #define SIRFSOC_SPI_FIFO_SC(x) ((x) & 0x3F) | 99 | #define SIRFSOC_USP_TFS_IO_INPUT BIT(19) |
124 | #define SIRFSOC_SPI_FIFO_LC(x) (((x) & 0x3F) << 10) | 100 | |
125 | #define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20) | 101 | #define SIRFSOC_USP_RXD_DELAY_LEN_MASK 0xFF |
126 | #define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2) | 102 | #define SIRFSOC_USP_TXD_DELAY_LEN_MASK 0xFF |
103 | #define SIRFSOC_USP_RXD_DELAY_OFFSET 0 | ||
104 | #define SIRFSOC_USP_TXD_DELAY_OFFSET 8 | ||
105 | #define SIRFSOC_USP_RXD_DELAY_LEN 1 | ||
106 | #define SIRFSOC_USP_TXD_DELAY_LEN 1 | ||
107 | #define SIRFSOC_USP_CLK_DIVISOR_OFFSET 21 | ||
108 | #define SIRFSOC_USP_CLK_DIVISOR_MASK 0x3FF | ||
109 | #define SIRFSOC_USP_CLK_10_11_MASK 0x3 | ||
110 | #define SIRFSOC_USP_CLK_10_11_OFFSET 30 | ||
111 | #define SIRFSOC_USP_CLK_12_15_MASK 0xF | ||
112 | #define SIRFSOC_USP_CLK_12_15_OFFSET 24 | ||
113 | |||
114 | #define SIRFSOC_USP_TX_DATA_OFFSET 0 | ||
115 | #define SIRFSOC_USP_TX_SYNC_OFFSET 8 | ||
116 | #define SIRFSOC_USP_TX_FRAME_OFFSET 16 | ||
117 | #define SIRFSOC_USP_TX_SHIFTER_OFFSET 24 | ||
118 | |||
119 | #define SIRFSOC_USP_TX_DATA_MASK 0xFF | ||
120 | #define SIRFSOC_USP_TX_SYNC_MASK 0xFF | ||
121 | #define SIRFSOC_USP_TX_FRAME_MASK 0xFF | ||
122 | #define SIRFSOC_USP_TX_SHIFTER_MASK 0x1F | ||
123 | |||
124 | #define SIRFSOC_USP_RX_DATA_OFFSET 0 | ||
125 | #define SIRFSOC_USP_RX_FRAME_OFFSET 8 | ||
126 | #define SIRFSOC_USP_RX_SHIFTER_OFFSET 16 | ||
127 | |||
128 | #define SIRFSOC_USP_RX_DATA_MASK 0xFF | ||
129 | #define SIRFSOC_USP_RX_FRAME_MASK 0xFF | ||
130 | #define SIRFSOC_USP_RX_SHIFTER_MASK 0x1F | ||
131 | #define SIRFSOC_USP_CS_HIGH_VALUE BIT(1) | ||
132 | |||
133 | #define SIRFSOC_SPI_FIFO_SC_OFFSET 0 | ||
134 | #define SIRFSOC_SPI_FIFO_LC_OFFSET 10 | ||
135 | #define SIRFSOC_SPI_FIFO_HC_OFFSET 20 | ||
136 | |||
137 | #define SIRFSOC_SPI_FIFO_FULL_MASK(s) (1 << ((s)->fifo_full_offset)) | ||
138 | #define SIRFSOC_SPI_FIFO_EMPTY_MASK(s) (1 << ((s)->fifo_full_offset + 1)) | ||
139 | #define SIRFSOC_SPI_FIFO_THD_MASK(s) ((s)->fifo_size - 1) | ||
140 | #define SIRFSOC_SPI_FIFO_THD_OFFSET 2 | ||
141 | #define SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(s, val) \ | ||
142 | ((val) & (s)->fifo_level_chk_mask) | ||
143 | |||
144 | enum sirf_spi_type { | ||
145 | SIRF_REAL_SPI, | ||
146 | SIRF_USP_SPI_P2, | ||
147 | SIRF_USP_SPI_A7, | ||
148 | }; | ||
127 | 149 | ||
128 | /* | 150 | /* |
129 | * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma | 151 | * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma |
@@ -137,6 +159,95 @@ | |||
137 | #define SIRFSOC_MAX_CMD_BYTES 4 | 159 | #define SIRFSOC_MAX_CMD_BYTES 4 |
138 | #define SIRFSOC_SPI_DEFAULT_FRQ 1000000 | 160 | #define SIRFSOC_SPI_DEFAULT_FRQ 1000000 |
139 | 161 | ||
162 | struct sirf_spi_register { | ||
163 | /*SPI and USP-SPI common*/ | ||
164 | u32 tx_rx_en; | ||
165 | u32 int_en; | ||
166 | u32 int_st; | ||
167 | u32 tx_dma_io_ctrl; | ||
168 | u32 tx_dma_io_len; | ||
169 | u32 txfifo_ctrl; | ||
170 | u32 txfifo_level_chk; | ||
171 | u32 txfifo_op; | ||
172 | u32 txfifo_st; | ||
173 | u32 txfifo_data; | ||
174 | u32 rx_dma_io_ctrl; | ||
175 | u32 rx_dma_io_len; | ||
176 | u32 rxfifo_ctrl; | ||
177 | u32 rxfifo_level_chk; | ||
178 | u32 rxfifo_op; | ||
179 | u32 rxfifo_st; | ||
180 | u32 rxfifo_data; | ||
181 | /*SPI self*/ | ||
182 | u32 spi_ctrl; | ||
183 | u32 spi_cmd; | ||
184 | u32 spi_dummy_delay_ctrl; | ||
185 | /*USP-SPI self*/ | ||
186 | u32 usp_mode1; | ||
187 | u32 usp_mode2; | ||
188 | u32 usp_tx_frame_ctrl; | ||
189 | u32 usp_rx_frame_ctrl; | ||
190 | u32 usp_pin_io_data; | ||
191 | u32 usp_risc_dsp_mode; | ||
192 | u32 usp_async_param_reg; | ||
193 | u32 usp_irda_x_mode_div; | ||
194 | u32 usp_sm_cfg; | ||
195 | u32 usp_int_en_clr; | ||
196 | }; | ||
197 | |||
198 | static const struct sirf_spi_register real_spi_register = { | ||
199 | .tx_rx_en = 0x8, | ||
200 | .int_en = 0xc, | ||
201 | .int_st = 0x10, | ||
202 | .tx_dma_io_ctrl = 0x100, | ||
203 | .tx_dma_io_len = 0x104, | ||
204 | .txfifo_ctrl = 0x108, | ||
205 | .txfifo_level_chk = 0x10c, | ||
206 | .txfifo_op = 0x110, | ||
207 | .txfifo_st = 0x114, | ||
208 | .txfifo_data = 0x118, | ||
209 | .rx_dma_io_ctrl = 0x120, | ||
210 | .rx_dma_io_len = 0x124, | ||
211 | .rxfifo_ctrl = 0x128, | ||
212 | .rxfifo_level_chk = 0x12c, | ||
213 | .rxfifo_op = 0x130, | ||
214 | .rxfifo_st = 0x134, | ||
215 | .rxfifo_data = 0x138, | ||
216 | .spi_ctrl = 0x0, | ||
217 | .spi_cmd = 0x4, | ||
218 | .spi_dummy_delay_ctrl = 0x144, | ||
219 | }; | ||
220 | |||
221 | static const struct sirf_spi_register usp_spi_register = { | ||
222 | .tx_rx_en = 0x10, | ||
223 | .int_en = 0x14, | ||
224 | .int_st = 0x18, | ||
225 | .tx_dma_io_ctrl = 0x100, | ||
226 | .tx_dma_io_len = 0x104, | ||
227 | .txfifo_ctrl = 0x108, | ||
228 | .txfifo_level_chk = 0x10c, | ||
229 | .txfifo_op = 0x110, | ||
230 | .txfifo_st = 0x114, | ||
231 | .txfifo_data = 0x118, | ||
232 | .rx_dma_io_ctrl = 0x120, | ||
233 | .rx_dma_io_len = 0x124, | ||
234 | .rxfifo_ctrl = 0x128, | ||
235 | .rxfifo_level_chk = 0x12c, | ||
236 | .rxfifo_op = 0x130, | ||
237 | .rxfifo_st = 0x134, | ||
238 | .rxfifo_data = 0x138, | ||
239 | .usp_mode1 = 0x0, | ||
240 | .usp_mode2 = 0x4, | ||
241 | .usp_tx_frame_ctrl = 0x8, | ||
242 | .usp_rx_frame_ctrl = 0xc, | ||
243 | .usp_pin_io_data = 0x1c, | ||
244 | .usp_risc_dsp_mode = 0x20, | ||
245 | .usp_async_param_reg = 0x24, | ||
246 | .usp_irda_x_mode_div = 0x28, | ||
247 | .usp_sm_cfg = 0x2c, | ||
248 | .usp_int_en_clr = 0x140, | ||
249 | }; | ||
250 | |||
140 | struct sirfsoc_spi { | 251 | struct sirfsoc_spi { |
141 | struct spi_bitbang bitbang; | 252 | struct spi_bitbang bitbang; |
142 | struct completion rx_done; | 253 | struct completion rx_done; |
@@ -164,7 +275,6 @@ struct sirfsoc_spi { | |||
164 | struct dma_chan *tx_chan; | 275 | struct dma_chan *tx_chan; |
165 | dma_addr_t src_start; | 276 | dma_addr_t src_start; |
166 | dma_addr_t dst_start; | 277 | dma_addr_t dst_start; |
167 | void *dummypage; | ||
168 | int word_width; /* in bytes */ | 278 | int word_width; /* in bytes */ |
169 | 279 | ||
170 | /* | 280 | /* |
@@ -173,14 +283,39 @@ struct sirfsoc_spi { | |||
173 | */ | 283 | */ |
174 | bool tx_by_cmd; | 284 | bool tx_by_cmd; |
175 | bool hw_cs; | 285 | bool hw_cs; |
286 | enum sirf_spi_type type; | ||
287 | const struct sirf_spi_register *regs; | ||
288 | unsigned int fifo_size; | ||
289 | /* fifo empty offset is (fifo full offset + 1)*/ | ||
290 | unsigned int fifo_full_offset; | ||
291 | /* fifo_level_chk_mask is (fifo_size/4 - 1) */ | ||
292 | unsigned int fifo_level_chk_mask; | ||
293 | unsigned int dat_max_frm_len; | ||
294 | }; | ||
295 | |||
296 | struct sirf_spi_comp_data { | ||
297 | const struct sirf_spi_register *regs; | ||
298 | enum sirf_spi_type type; | ||
299 | unsigned int dat_max_frm_len; | ||
300 | unsigned int fifo_size; | ||
301 | void (*hwinit)(struct sirfsoc_spi *sspi); | ||
176 | }; | 302 | }; |
177 | 303 | ||
304 | static void sirfsoc_usp_hwinit(struct sirfsoc_spi *sspi) | ||
305 | { | ||
306 | /* reset USP and let USP can operate */ | ||
307 | writel(readl(sspi->base + sspi->regs->usp_mode1) & | ||
308 | ~SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1); | ||
309 | writel(readl(sspi->base + sspi->regs->usp_mode1) | | ||
310 | SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1); | ||
311 | } | ||
312 | |||
178 | static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi) | 313 | static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi) |
179 | { | 314 | { |
180 | u32 data; | 315 | u32 data; |
181 | u8 *rx = sspi->rx; | 316 | u8 *rx = sspi->rx; |
182 | 317 | ||
183 | data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA); | 318 | data = readl(sspi->base + sspi->regs->rxfifo_data); |
184 | 319 | ||
185 | if (rx) { | 320 | if (rx) { |
186 | *rx++ = (u8) data; | 321 | *rx++ = (u8) data; |
@@ -199,8 +334,7 @@ static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi) | |||
199 | data = *tx++; | 334 | data = *tx++; |
200 | sspi->tx = tx; | 335 | sspi->tx = tx; |
201 | } | 336 | } |
202 | 337 | writel(data, sspi->base + sspi->regs->txfifo_data); | |
203 | writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA); | ||
204 | sspi->left_tx_word--; | 338 | sspi->left_tx_word--; |
205 | } | 339 | } |
206 | 340 | ||
@@ -209,7 +343,7 @@ static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi) | |||
209 | u32 data; | 343 | u32 data; |
210 | u16 *rx = sspi->rx; | 344 | u16 *rx = sspi->rx; |
211 | 345 | ||
212 | data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA); | 346 | data = readl(sspi->base + sspi->regs->rxfifo_data); |
213 | 347 | ||
214 | if (rx) { | 348 | if (rx) { |
215 | *rx++ = (u16) data; | 349 | *rx++ = (u16) data; |
@@ -229,7 +363,7 @@ static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi) | |||
229 | sspi->tx = tx; | 363 | sspi->tx = tx; |
230 | } | 364 | } |
231 | 365 | ||
232 | writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA); | 366 | writel(data, sspi->base + sspi->regs->txfifo_data); |
233 | sspi->left_tx_word--; | 367 | sspi->left_tx_word--; |
234 | } | 368 | } |
235 | 369 | ||
@@ -238,7 +372,7 @@ static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi) | |||
238 | u32 data; | 372 | u32 data; |
239 | u32 *rx = sspi->rx; | 373 | u32 *rx = sspi->rx; |
240 | 374 | ||
241 | data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA); | 375 | data = readl(sspi->base + sspi->regs->rxfifo_data); |
242 | 376 | ||
243 | if (rx) { | 377 | if (rx) { |
244 | *rx++ = (u32) data; | 378 | *rx++ = (u32) data; |
@@ -259,41 +393,59 @@ static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi) | |||
259 | sspi->tx = tx; | 393 | sspi->tx = tx; |
260 | } | 394 | } |
261 | 395 | ||
262 | writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA); | 396 | writel(data, sspi->base + sspi->regs->txfifo_data); |
263 | sspi->left_tx_word--; | 397 | sspi->left_tx_word--; |
264 | } | 398 | } |
265 | 399 | ||
266 | static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id) | 400 | static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id) |
267 | { | 401 | { |
268 | struct sirfsoc_spi *sspi = dev_id; | 402 | struct sirfsoc_spi *sspi = dev_id; |
269 | u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS); | 403 | u32 spi_stat; |
270 | if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) { | 404 | |
405 | spi_stat = readl(sspi->base + sspi->regs->int_st); | ||
406 | if (sspi->tx_by_cmd && sspi->type == SIRF_REAL_SPI | ||
407 | && (spi_stat & SIRFSOC_SPI_FRM_END)) { | ||
271 | complete(&sspi->tx_done); | 408 | complete(&sspi->tx_done); |
272 | writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); | 409 | writel(0x0, sspi->base + sspi->regs->int_en); |
273 | writel(SIRFSOC_SPI_INT_MASK_ALL, | 410 | writel(readl(sspi->base + sspi->regs->int_st), |
274 | sspi->base + SIRFSOC_SPI_INT_STATUS); | 411 | sspi->base + sspi->regs->int_st); |
275 | return IRQ_HANDLED; | 412 | return IRQ_HANDLED; |
276 | } | 413 | } |
277 | |||
278 | /* Error Conditions */ | 414 | /* Error Conditions */ |
279 | if (spi_stat & SIRFSOC_SPI_RX_OFLOW || | 415 | if (spi_stat & SIRFSOC_SPI_RX_OFLOW || |
280 | spi_stat & SIRFSOC_SPI_TX_UFLOW) { | 416 | spi_stat & SIRFSOC_SPI_TX_UFLOW) { |
281 | complete(&sspi->tx_done); | 417 | complete(&sspi->tx_done); |
282 | complete(&sspi->rx_done); | 418 | complete(&sspi->rx_done); |
283 | writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); | 419 | switch (sspi->type) { |
284 | writel(SIRFSOC_SPI_INT_MASK_ALL, | 420 | case SIRF_REAL_SPI: |
285 | sspi->base + SIRFSOC_SPI_INT_STATUS); | 421 | case SIRF_USP_SPI_P2: |
422 | writel(0x0, sspi->base + sspi->regs->int_en); | ||
423 | break; | ||
424 | case SIRF_USP_SPI_A7: | ||
425 | writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr); | ||
426 | break; | ||
427 | } | ||
428 | writel(readl(sspi->base + sspi->regs->int_st), | ||
429 | sspi->base + sspi->regs->int_st); | ||
286 | return IRQ_HANDLED; | 430 | return IRQ_HANDLED; |
287 | } | 431 | } |
288 | if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY) | 432 | if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY) |
289 | complete(&sspi->tx_done); | 433 | complete(&sspi->tx_done); |
290 | while (!(readl(sspi->base + SIRFSOC_SPI_INT_STATUS) & | 434 | while (!(readl(sspi->base + sspi->regs->int_st) & |
291 | SIRFSOC_SPI_RX_IO_DMA)) | 435 | SIRFSOC_SPI_RX_IO_DMA)) |
292 | cpu_relax(); | 436 | cpu_relax(); |
293 | complete(&sspi->rx_done); | 437 | complete(&sspi->rx_done); |
294 | writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); | 438 | switch (sspi->type) { |
295 | writel(SIRFSOC_SPI_INT_MASK_ALL, | 439 | case SIRF_REAL_SPI: |
296 | sspi->base + SIRFSOC_SPI_INT_STATUS); | 440 | case SIRF_USP_SPI_P2: |
441 | writel(0x0, sspi->base + sspi->regs->int_en); | ||
442 | break; | ||
443 | case SIRF_USP_SPI_A7: | ||
444 | writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr); | ||
445 | break; | ||
446 | } | ||
447 | writel(readl(sspi->base + sspi->regs->int_st), | ||
448 | sspi->base + sspi->regs->int_st); | ||
297 | 449 | ||
298 | return IRQ_HANDLED; | 450 | return IRQ_HANDLED; |
299 | } | 451 | } |
@@ -313,8 +465,8 @@ static void spi_sirfsoc_cmd_transfer(struct spi_device *spi, | |||
313 | u32 cmd; | 465 | u32 cmd; |
314 | 466 | ||
315 | sspi = spi_master_get_devdata(spi->master); | 467 | sspi = spi_master_get_devdata(spi->master); |
316 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); | 468 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op); |
317 | writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); | 469 | writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op); |
318 | memcpy(&cmd, sspi->tx, t->len); | 470 | memcpy(&cmd, sspi->tx, t->len); |
319 | if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST)) | 471 | if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST)) |
320 | cmd = cpu_to_be32(cmd) >> | 472 | cmd = cpu_to_be32(cmd) >> |
@@ -322,11 +474,11 @@ static void spi_sirfsoc_cmd_transfer(struct spi_device *spi, | |||
322 | if (sspi->word_width == 2 && t->len == 4 && | 474 | if (sspi->word_width == 2 && t->len == 4 && |
323 | (!(spi->mode & SPI_LSB_FIRST))) | 475 | (!(spi->mode & SPI_LSB_FIRST))) |
324 | cmd = ((cmd & 0xffff) << 16) | (cmd >> 16); | 476 | cmd = ((cmd & 0xffff) << 16) | (cmd >> 16); |
325 | writel(cmd, sspi->base + SIRFSOC_SPI_CMD); | 477 | writel(cmd, sspi->base + sspi->regs->spi_cmd); |
326 | writel(SIRFSOC_SPI_FRM_END_INT_EN, | 478 | writel(SIRFSOC_SPI_FRM_END_INT_EN, |
327 | sspi->base + SIRFSOC_SPI_INT_EN); | 479 | sspi->base + sspi->regs->int_en); |
328 | writel(SIRFSOC_SPI_CMD_TX_EN, | 480 | writel(SIRFSOC_SPI_CMD_TX_EN, |
329 | sspi->base + SIRFSOC_SPI_TX_RX_EN); | 481 | sspi->base + sspi->regs->tx_rx_en); |
330 | if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { | 482 | if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { |
331 | dev_err(&spi->dev, "cmd transfer timeout\n"); | 483 | dev_err(&spi->dev, "cmd transfer timeout\n"); |
332 | return; | 484 | return; |
@@ -342,25 +494,56 @@ static void spi_sirfsoc_dma_transfer(struct spi_device *spi, | |||
342 | int timeout = t->len * 10; | 494 | int timeout = t->len * 10; |
343 | 495 | ||
344 | sspi = spi_master_get_devdata(spi->master); | 496 | sspi = spi_master_get_devdata(spi->master); |
345 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP); | 497 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op); |
346 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); | 498 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op); |
347 | writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP); | 499 | switch (sspi->type) { |
348 | writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); | 500 | case SIRF_REAL_SPI: |
349 | writel(0, sspi->base + SIRFSOC_SPI_INT_EN); | 501 | writel(SIRFSOC_SPI_FIFO_START, |
350 | writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS); | 502 | sspi->base + sspi->regs->rxfifo_op); |
351 | if (sspi->left_tx_word < SIRFSOC_SPI_DAT_FRM_LEN_MAX) { | 503 | writel(SIRFSOC_SPI_FIFO_START, |
352 | writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | | 504 | sspi->base + sspi->regs->txfifo_op); |
353 | SIRFSOC_SPI_ENA_AUTO_CLR | SIRFSOC_SPI_MUL_DAT_MODE, | 505 | writel(0, sspi->base + sspi->regs->int_en); |
354 | sspi->base + SIRFSOC_SPI_CTRL); | 506 | break; |
355 | writel(sspi->left_tx_word - 1, | 507 | case SIRF_USP_SPI_P2: |
356 | sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); | 508 | writel(0x0, sspi->base + sspi->regs->rxfifo_op); |
357 | writel(sspi->left_tx_word - 1, | 509 | writel(0x0, sspi->base + sspi->regs->txfifo_op); |
358 | sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); | 510 | writel(0, sspi->base + sspi->regs->int_en); |
511 | break; | ||
512 | case SIRF_USP_SPI_A7: | ||
513 | writel(0x0, sspi->base + sspi->regs->rxfifo_op); | ||
514 | writel(0x0, sspi->base + sspi->regs->txfifo_op); | ||
515 | writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr); | ||
516 | break; | ||
517 | } | ||
518 | writel(readl(sspi->base + sspi->regs->int_st), | ||
519 | sspi->base + sspi->regs->int_st); | ||
520 | if (sspi->left_tx_word < sspi->dat_max_frm_len) { | ||
521 | switch (sspi->type) { | ||
522 | case SIRF_REAL_SPI: | ||
523 | writel(readl(sspi->base + sspi->regs->spi_ctrl) | | ||
524 | SIRFSOC_SPI_ENA_AUTO_CLR | | ||
525 | SIRFSOC_SPI_MUL_DAT_MODE, | ||
526 | sspi->base + sspi->regs->spi_ctrl); | ||
527 | writel(sspi->left_tx_word - 1, | ||
528 | sspi->base + sspi->regs->tx_dma_io_len); | ||
529 | writel(sspi->left_tx_word - 1, | ||
530 | sspi->base + sspi->regs->rx_dma_io_len); | ||
531 | break; | ||
532 | case SIRF_USP_SPI_P2: | ||
533 | case SIRF_USP_SPI_A7: | ||
534 | /*USP simulate SPI, tx/rx_dma_io_len indicates bytes*/ | ||
535 | writel(sspi->left_tx_word * sspi->word_width, | ||
536 | sspi->base + sspi->regs->tx_dma_io_len); | ||
537 | writel(sspi->left_tx_word * sspi->word_width, | ||
538 | sspi->base + sspi->regs->rx_dma_io_len); | ||
539 | break; | ||
540 | } | ||
359 | } else { | 541 | } else { |
360 | writel(readl(sspi->base + SIRFSOC_SPI_CTRL), | 542 | if (sspi->type == SIRF_REAL_SPI) |
361 | sspi->base + SIRFSOC_SPI_CTRL); | 543 | writel(readl(sspi->base + sspi->regs->spi_ctrl), |
362 | writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); | 544 | sspi->base + sspi->regs->spi_ctrl); |
363 | writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); | 545 | writel(0, sspi->base + sspi->regs->tx_dma_io_len); |
546 | writel(0, sspi->base + sspi->regs->rx_dma_io_len); | ||
364 | } | 547 | } |
365 | sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len, | 548 | sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len, |
366 | (t->tx_buf != t->rx_buf) ? | 549 | (t->tx_buf != t->rx_buf) ? |
@@ -385,7 +568,14 @@ static void spi_sirfsoc_dma_transfer(struct spi_device *spi, | |||
385 | dma_async_issue_pending(sspi->tx_chan); | 568 | dma_async_issue_pending(sspi->tx_chan); |
386 | dma_async_issue_pending(sspi->rx_chan); | 569 | dma_async_issue_pending(sspi->rx_chan); |
387 | writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, | 570 | writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, |
388 | sspi->base + SIRFSOC_SPI_TX_RX_EN); | 571 | sspi->base + sspi->regs->tx_rx_en); |
572 | if (sspi->type == SIRF_USP_SPI_P2 || | ||
573 | sspi->type == SIRF_USP_SPI_A7) { | ||
574 | writel(SIRFSOC_SPI_FIFO_START, | ||
575 | sspi->base + sspi->regs->rxfifo_op); | ||
576 | writel(SIRFSOC_SPI_FIFO_START, | ||
577 | sspi->base + sspi->regs->txfifo_op); | ||
578 | } | ||
389 | if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) { | 579 | if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) { |
390 | dev_err(&spi->dev, "transfer timeout\n"); | 580 | dev_err(&spi->dev, "transfer timeout\n"); |
391 | dmaengine_terminate_all(sspi->rx_chan); | 581 | dmaengine_terminate_all(sspi->rx_chan); |
@@ -398,15 +588,21 @@ static void spi_sirfsoc_dma_transfer(struct spi_device *spi, | |||
398 | */ | 588 | */ |
399 | if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { | 589 | if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { |
400 | dev_err(&spi->dev, "transfer timeout\n"); | 590 | dev_err(&spi->dev, "transfer timeout\n"); |
591 | if (sspi->type == SIRF_USP_SPI_P2 || | ||
592 | sspi->type == SIRF_USP_SPI_A7) | ||
593 | writel(0, sspi->base + sspi->regs->tx_rx_en); | ||
401 | dmaengine_terminate_all(sspi->tx_chan); | 594 | dmaengine_terminate_all(sspi->tx_chan); |
402 | } | 595 | } |
403 | dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE); | 596 | dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE); |
404 | dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE); | 597 | dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE); |
405 | /* TX, RX FIFO stop */ | 598 | /* TX, RX FIFO stop */ |
406 | writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP); | 599 | writel(0, sspi->base + sspi->regs->rxfifo_op); |
407 | writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP); | 600 | writel(0, sspi->base + sspi->regs->txfifo_op); |
408 | if (sspi->left_tx_word >= SIRFSOC_SPI_DAT_FRM_LEN_MAX) | 601 | if (sspi->left_tx_word >= sspi->dat_max_frm_len) |
409 | writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN); | 602 | writel(0, sspi->base + sspi->regs->tx_rx_en); |
603 | if (sspi->type == SIRF_USP_SPI_P2 || | ||
604 | sspi->type == SIRF_USP_SPI_A7) | ||
605 | writel(0, sspi->base + sspi->regs->tx_rx_en); | ||
410 | } | 606 | } |
411 | 607 | ||
412 | static void spi_sirfsoc_pio_transfer(struct spi_device *spi, | 608 | static void spi_sirfsoc_pio_transfer(struct spi_device *spi, |
@@ -414,57 +610,105 @@ static void spi_sirfsoc_pio_transfer(struct spi_device *spi, | |||
414 | { | 610 | { |
415 | struct sirfsoc_spi *sspi; | 611 | struct sirfsoc_spi *sspi; |
416 | int timeout = t->len * 10; | 612 | int timeout = t->len * 10; |
613 | unsigned int data_units; | ||
417 | 614 | ||
418 | sspi = spi_master_get_devdata(spi->master); | 615 | sspi = spi_master_get_devdata(spi->master); |
419 | do { | 616 | do { |
420 | writel(SIRFSOC_SPI_FIFO_RESET, | 617 | writel(SIRFSOC_SPI_FIFO_RESET, |
421 | sspi->base + SIRFSOC_SPI_RXFIFO_OP); | 618 | sspi->base + sspi->regs->rxfifo_op); |
422 | writel(SIRFSOC_SPI_FIFO_RESET, | 619 | writel(SIRFSOC_SPI_FIFO_RESET, |
423 | sspi->base + SIRFSOC_SPI_TXFIFO_OP); | 620 | sspi->base + sspi->regs->txfifo_op); |
424 | writel(SIRFSOC_SPI_FIFO_START, | 621 | switch (sspi->type) { |
425 | sspi->base + SIRFSOC_SPI_RXFIFO_OP); | 622 | case SIRF_USP_SPI_P2: |
426 | writel(SIRFSOC_SPI_FIFO_START, | 623 | writel(0x0, sspi->base + sspi->regs->rxfifo_op); |
427 | sspi->base + SIRFSOC_SPI_TXFIFO_OP); | 624 | writel(0x0, sspi->base + sspi->regs->txfifo_op); |
428 | writel(0, sspi->base + SIRFSOC_SPI_INT_EN); | 625 | writel(0, sspi->base + sspi->regs->int_en); |
429 | writel(SIRFSOC_SPI_INT_MASK_ALL, | 626 | writel(readl(sspi->base + sspi->regs->int_st), |
430 | sspi->base + SIRFSOC_SPI_INT_STATUS); | 627 | sspi->base + sspi->regs->int_st); |
431 | writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | | 628 | writel(min((sspi->left_tx_word * sspi->word_width), |
432 | SIRFSOC_SPI_MUL_DAT_MODE | SIRFSOC_SPI_ENA_AUTO_CLR, | 629 | sspi->fifo_size), |
433 | sspi->base + SIRFSOC_SPI_CTRL); | 630 | sspi->base + sspi->regs->tx_dma_io_len); |
434 | writel(min(sspi->left_tx_word, (u32)(256 / sspi->word_width)) | 631 | writel(min((sspi->left_rx_word * sspi->word_width), |
435 | - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); | 632 | sspi->fifo_size), |
436 | writel(min(sspi->left_rx_word, (u32)(256 / sspi->word_width)) | 633 | sspi->base + sspi->regs->rx_dma_io_len); |
437 | - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); | 634 | break; |
438 | while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS) | 635 | case SIRF_USP_SPI_A7: |
439 | & SIRFSOC_SPI_FIFO_FULL)) && sspi->left_tx_word) | 636 | writel(0x0, sspi->base + sspi->regs->rxfifo_op); |
637 | writel(0x0, sspi->base + sspi->regs->txfifo_op); | ||
638 | writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr); | ||
639 | writel(readl(sspi->base + sspi->regs->int_st), | ||
640 | sspi->base + sspi->regs->int_st); | ||
641 | writel(min((sspi->left_tx_word * sspi->word_width), | ||
642 | sspi->fifo_size), | ||
643 | sspi->base + sspi->regs->tx_dma_io_len); | ||
644 | writel(min((sspi->left_rx_word * sspi->word_width), | ||
645 | sspi->fifo_size), | ||
646 | sspi->base + sspi->regs->rx_dma_io_len); | ||
647 | break; | ||
648 | case SIRF_REAL_SPI: | ||
649 | writel(SIRFSOC_SPI_FIFO_START, | ||
650 | sspi->base + sspi->regs->rxfifo_op); | ||
651 | writel(SIRFSOC_SPI_FIFO_START, | ||
652 | sspi->base + sspi->regs->txfifo_op); | ||
653 | writel(0, sspi->base + sspi->regs->int_en); | ||
654 | writel(readl(sspi->base + sspi->regs->int_st), | ||
655 | sspi->base + sspi->regs->int_st); | ||
656 | writel(readl(sspi->base + sspi->regs->spi_ctrl) | | ||
657 | SIRFSOC_SPI_MUL_DAT_MODE | | ||
658 | SIRFSOC_SPI_ENA_AUTO_CLR, | ||
659 | sspi->base + sspi->regs->spi_ctrl); | ||
660 | data_units = sspi->fifo_size / sspi->word_width; | ||
661 | writel(min(sspi->left_tx_word, data_units) - 1, | ||
662 | sspi->base + sspi->regs->tx_dma_io_len); | ||
663 | writel(min(sspi->left_rx_word, data_units) - 1, | ||
664 | sspi->base + sspi->regs->rx_dma_io_len); | ||
665 | break; | ||
666 | } | ||
667 | while (!((readl(sspi->base + sspi->regs->txfifo_st) | ||
668 | & SIRFSOC_SPI_FIFO_FULL_MASK(sspi))) && | ||
669 | sspi->left_tx_word) | ||
440 | sspi->tx_word(sspi); | 670 | sspi->tx_word(sspi); |
441 | writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN | | 671 | writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN | |
442 | SIRFSOC_SPI_TX_UFLOW_INT_EN | | 672 | SIRFSOC_SPI_TX_UFLOW_INT_EN | |
443 | SIRFSOC_SPI_RX_OFLOW_INT_EN | | 673 | SIRFSOC_SPI_RX_OFLOW_INT_EN | |
444 | SIRFSOC_SPI_RX_IO_DMA_INT_EN, | 674 | SIRFSOC_SPI_RX_IO_DMA_INT_EN, |
445 | sspi->base + SIRFSOC_SPI_INT_EN); | 675 | sspi->base + sspi->regs->int_en); |
446 | writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, | 676 | writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, |
447 | sspi->base + SIRFSOC_SPI_TX_RX_EN); | 677 | sspi->base + sspi->regs->tx_rx_en); |
678 | if (sspi->type == SIRF_USP_SPI_P2 || | ||
679 | sspi->type == SIRF_USP_SPI_A7) { | ||
680 | writel(SIRFSOC_SPI_FIFO_START, | ||
681 | sspi->base + sspi->regs->rxfifo_op); | ||
682 | writel(SIRFSOC_SPI_FIFO_START, | ||
683 | sspi->base + sspi->regs->txfifo_op); | ||
684 | } | ||
448 | if (!wait_for_completion_timeout(&sspi->tx_done, timeout) || | 685 | if (!wait_for_completion_timeout(&sspi->tx_done, timeout) || |
449 | !wait_for_completion_timeout(&sspi->rx_done, timeout)) { | 686 | !wait_for_completion_timeout(&sspi->rx_done, timeout)) { |
450 | dev_err(&spi->dev, "transfer timeout\n"); | 687 | dev_err(&spi->dev, "transfer timeout\n"); |
688 | if (sspi->type == SIRF_USP_SPI_P2 || | ||
689 | sspi->type == SIRF_USP_SPI_A7) | ||
690 | writel(0, sspi->base + sspi->regs->tx_rx_en); | ||
451 | break; | 691 | break; |
452 | } | 692 | } |
453 | while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS) | 693 | while (!((readl(sspi->base + sspi->regs->rxfifo_st) |
454 | & SIRFSOC_SPI_FIFO_EMPTY)) && sspi->left_rx_word) | 694 | & SIRFSOC_SPI_FIFO_EMPTY_MASK(sspi))) && |
695 | sspi->left_rx_word) | ||
455 | sspi->rx_word(sspi); | 696 | sspi->rx_word(sspi); |
456 | writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP); | 697 | if (sspi->type == SIRF_USP_SPI_P2 || |
457 | writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP); | 698 | sspi->type == SIRF_USP_SPI_A7) |
699 | writel(0, sspi->base + sspi->regs->tx_rx_en); | ||
700 | writel(0, sspi->base + sspi->regs->rxfifo_op); | ||
701 | writel(0, sspi->base + sspi->regs->txfifo_op); | ||
458 | } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0); | 702 | } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0); |
459 | } | 703 | } |
460 | 704 | ||
461 | static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t) | 705 | static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t) |
462 | { | 706 | { |
463 | struct sirfsoc_spi *sspi; | 707 | struct sirfsoc_spi *sspi; |
464 | sspi = spi_master_get_devdata(spi->master); | ||
465 | 708 | ||
466 | sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage; | 709 | sspi = spi_master_get_devdata(spi->master); |
467 | sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage; | 710 | sspi->tx = t->tx_buf; |
711 | sspi->rx = t->rx_buf; | ||
468 | sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width; | 712 | sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width; |
469 | reinit_completion(&sspi->rx_done); | 713 | reinit_completion(&sspi->rx_done); |
470 | reinit_completion(&sspi->tx_done); | 714 | reinit_completion(&sspi->tx_done); |
@@ -473,7 +717,7 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
473 | * null, just fill command data into command register and wait for its | 717 | * null, just fill command data into command register and wait for its |
474 | * completion. | 718 | * completion. |
475 | */ | 719 | */ |
476 | if (sspi->tx_by_cmd) | 720 | if (sspi->type == SIRF_REAL_SPI && sspi->tx_by_cmd) |
477 | spi_sirfsoc_cmd_transfer(spi, t); | 721 | spi_sirfsoc_cmd_transfer(spi, t); |
478 | else if (IS_DMA_VALID(t)) | 722 | else if (IS_DMA_VALID(t)) |
479 | spi_sirfsoc_dma_transfer(spi, t); | 723 | spi_sirfsoc_dma_transfer(spi, t); |
@@ -488,22 +732,49 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value) | |||
488 | struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master); | 732 | struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master); |
489 | 733 | ||
490 | if (sspi->hw_cs) { | 734 | if (sspi->hw_cs) { |
491 | u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL); | 735 | u32 regval; |
492 | switch (value) { | 736 | |
493 | case BITBANG_CS_ACTIVE: | 737 | switch (sspi->type) { |
494 | if (spi->mode & SPI_CS_HIGH) | 738 | case SIRF_REAL_SPI: |
495 | regval |= SIRFSOC_SPI_CS_IO_OUT; | 739 | regval = readl(sspi->base + sspi->regs->spi_ctrl); |
496 | else | 740 | switch (value) { |
497 | regval &= ~SIRFSOC_SPI_CS_IO_OUT; | 741 | case BITBANG_CS_ACTIVE: |
742 | if (spi->mode & SPI_CS_HIGH) | ||
743 | regval |= SIRFSOC_SPI_CS_IO_OUT; | ||
744 | else | ||
745 | regval &= ~SIRFSOC_SPI_CS_IO_OUT; | ||
746 | break; | ||
747 | case BITBANG_CS_INACTIVE: | ||
748 | if (spi->mode & SPI_CS_HIGH) | ||
749 | regval &= ~SIRFSOC_SPI_CS_IO_OUT; | ||
750 | else | ||
751 | regval |= SIRFSOC_SPI_CS_IO_OUT; | ||
752 | break; | ||
753 | } | ||
754 | writel(regval, sspi->base + sspi->regs->spi_ctrl); | ||
498 | break; | 755 | break; |
499 | case BITBANG_CS_INACTIVE: | 756 | case SIRF_USP_SPI_P2: |
500 | if (spi->mode & SPI_CS_HIGH) | 757 | case SIRF_USP_SPI_A7: |
501 | regval &= ~SIRFSOC_SPI_CS_IO_OUT; | 758 | regval = readl(sspi->base + |
502 | else | 759 | sspi->regs->usp_pin_io_data); |
503 | regval |= SIRFSOC_SPI_CS_IO_OUT; | 760 | switch (value) { |
761 | case BITBANG_CS_ACTIVE: | ||
762 | if (spi->mode & SPI_CS_HIGH) | ||
763 | regval |= SIRFSOC_USP_CS_HIGH_VALUE; | ||
764 | else | ||
765 | regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE); | ||
766 | break; | ||
767 | case BITBANG_CS_INACTIVE: | ||
768 | if (spi->mode & SPI_CS_HIGH) | ||
769 | regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE); | ||
770 | else | ||
771 | regval |= SIRFSOC_USP_CS_HIGH_VALUE; | ||
772 | break; | ||
773 | } | ||
774 | writel(regval, | ||
775 | sspi->base + sspi->regs->usp_pin_io_data); | ||
504 | break; | 776 | break; |
505 | } | 777 | } |
506 | writel(regval, sspi->base + SIRFSOC_SPI_CTRL); | ||
507 | } else { | 778 | } else { |
508 | switch (value) { | 779 | switch (value) { |
509 | case BITBANG_CS_ACTIVE: | 780 | case BITBANG_CS_ACTIVE: |
@@ -518,27 +789,102 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value) | |||
518 | } | 789 | } |
519 | } | 790 | } |
520 | 791 | ||
792 | static int spi_sirfsoc_config_mode(struct spi_device *spi) | ||
793 | { | ||
794 | struct sirfsoc_spi *sspi; | ||
795 | u32 regval, usp_mode1; | ||
796 | |||
797 | sspi = spi_master_get_devdata(spi->master); | ||
798 | regval = readl(sspi->base + sspi->regs->spi_ctrl); | ||
799 | usp_mode1 = readl(sspi->base + sspi->regs->usp_mode1); | ||
800 | if (!(spi->mode & SPI_CS_HIGH)) { | ||
801 | regval |= SIRFSOC_SPI_CS_IDLE_STAT; | ||
802 | usp_mode1 &= ~SIRFSOC_USP_CS_HIGH_VALID; | ||
803 | } else { | ||
804 | regval &= ~SIRFSOC_SPI_CS_IDLE_STAT; | ||
805 | usp_mode1 |= SIRFSOC_USP_CS_HIGH_VALID; | ||
806 | } | ||
807 | if (!(spi->mode & SPI_LSB_FIRST)) { | ||
808 | regval |= SIRFSOC_SPI_TRAN_MSB; | ||
809 | usp_mode1 &= ~SIRFSOC_USP_LSB; | ||
810 | } else { | ||
811 | regval &= ~SIRFSOC_SPI_TRAN_MSB; | ||
812 | usp_mode1 |= SIRFSOC_USP_LSB; | ||
813 | } | ||
814 | if (spi->mode & SPI_CPOL) { | ||
815 | regval |= SIRFSOC_SPI_CLK_IDLE_STAT; | ||
816 | usp_mode1 |= SIRFSOC_USP_SCLK_IDLE_STAT; | ||
817 | } else { | ||
818 | regval &= ~SIRFSOC_SPI_CLK_IDLE_STAT; | ||
819 | usp_mode1 &= ~SIRFSOC_USP_SCLK_IDLE_STAT; | ||
820 | } | ||
821 | /* | ||
822 | * Data should be driven at least 1/2 cycle before the fetch edge | ||
823 | * to make sure that data gets stable at the fetch edge. | ||
824 | */ | ||
825 | if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) || | ||
826 | (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA))) { | ||
827 | regval &= ~SIRFSOC_SPI_DRV_POS_EDGE; | ||
828 | usp_mode1 |= (SIRFSOC_USP_TXD_FALLING_EDGE | | ||
829 | SIRFSOC_USP_RXD_FALLING_EDGE); | ||
830 | } else { | ||
831 | regval |= SIRFSOC_SPI_DRV_POS_EDGE; | ||
832 | usp_mode1 &= ~(SIRFSOC_USP_RXD_FALLING_EDGE | | ||
833 | SIRFSOC_USP_TXD_FALLING_EDGE); | ||
834 | } | ||
835 | writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) << | ||
836 | SIRFSOC_SPI_FIFO_SC_OFFSET) | | ||
837 | (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) << | ||
838 | SIRFSOC_SPI_FIFO_LC_OFFSET) | | ||
839 | (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) << | ||
840 | SIRFSOC_SPI_FIFO_HC_OFFSET), | ||
841 | sspi->base + sspi->regs->txfifo_level_chk); | ||
842 | writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) << | ||
843 | SIRFSOC_SPI_FIFO_SC_OFFSET) | | ||
844 | (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) << | ||
845 | SIRFSOC_SPI_FIFO_LC_OFFSET) | | ||
846 | (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) << | ||
847 | SIRFSOC_SPI_FIFO_HC_OFFSET), | ||
848 | sspi->base + sspi->regs->rxfifo_level_chk); | ||
849 | /* | ||
850 | * it should never set to hardware cs mode because in hardware cs mode, | ||
851 | * cs signal can't controlled by driver. | ||
852 | */ | ||
853 | switch (sspi->type) { | ||
854 | case SIRF_REAL_SPI: | ||
855 | regval |= SIRFSOC_SPI_CS_IO_MODE; | ||
856 | writel(regval, sspi->base + sspi->regs->spi_ctrl); | ||
857 | break; | ||
858 | case SIRF_USP_SPI_P2: | ||
859 | case SIRF_USP_SPI_A7: | ||
860 | usp_mode1 |= SIRFSOC_USP_SYNC_MODE; | ||
861 | usp_mode1 |= SIRFSOC_USP_TFS_IO_MODE; | ||
862 | usp_mode1 &= ~SIRFSOC_USP_TFS_IO_INPUT; | ||
863 | writel(usp_mode1, sspi->base + sspi->regs->usp_mode1); | ||
864 | break; | ||
865 | } | ||
866 | |||
867 | return 0; | ||
868 | } | ||
869 | |||
521 | static int | 870 | static int |
522 | spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | 871 | spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) |
523 | { | 872 | { |
524 | struct sirfsoc_spi *sspi; | 873 | struct sirfsoc_spi *sspi; |
525 | u8 bits_per_word = 0; | 874 | u8 bits_per_word = 0; |
526 | int hz = 0; | 875 | int hz = 0; |
527 | u32 regval; | 876 | u32 regval, txfifo_ctrl, rxfifo_ctrl, tx_frm_ctl, rx_frm_ctl, usp_mode2; |
528 | u32 txfifo_ctrl, rxfifo_ctrl; | ||
529 | u32 fifo_size = SIRFSOC_SPI_FIFO_SIZE / 4; | ||
530 | 877 | ||
531 | sspi = spi_master_get_devdata(spi->master); | 878 | sspi = spi_master_get_devdata(spi->master); |
532 | 879 | ||
533 | bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; | 880 | bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; |
534 | hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz; | 881 | hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz; |
535 | 882 | ||
536 | regval = (sspi->ctrl_freq / (2 * hz)) - 1; | 883 | usp_mode2 = regval = (sspi->ctrl_freq / (2 * hz)) - 1; |
537 | if (regval > 0xFFFF || regval < 0) { | 884 | if (regval > 0xFFFF || regval < 0) { |
538 | dev_err(&spi->dev, "Speed %d not supported\n", hz); | 885 | dev_err(&spi->dev, "Speed %d not supported\n", hz); |
539 | return -EINVAL; | 886 | return -EINVAL; |
540 | } | 887 | } |
541 | |||
542 | switch (bits_per_word) { | 888 | switch (bits_per_word) { |
543 | case 8: | 889 | case 8: |
544 | regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8; | 890 | regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8; |
@@ -559,94 +905,177 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
559 | sspi->tx_word = spi_sirfsoc_tx_word_u32; | 905 | sspi->tx_word = spi_sirfsoc_tx_word_u32; |
560 | break; | 906 | break; |
561 | default: | 907 | default: |
562 | BUG(); | 908 | dev_err(&spi->dev, "bpw %d not supported\n", bits_per_word); |
909 | return -EINVAL; | ||
563 | } | 910 | } |
564 | |||
565 | sspi->word_width = DIV_ROUND_UP(bits_per_word, 8); | 911 | sspi->word_width = DIV_ROUND_UP(bits_per_word, 8); |
566 | txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | | 912 | txfifo_ctrl = (((sspi->fifo_size / 2) & |
567 | (sspi->word_width >> 1); | 913 | SIRFSOC_SPI_FIFO_THD_MASK(sspi)) |
568 | rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | | 914 | << SIRFSOC_SPI_FIFO_THD_OFFSET) | |
569 | (sspi->word_width >> 1); | 915 | (sspi->word_width >> 1); |
570 | 916 | rxfifo_ctrl = (((sspi->fifo_size / 2) & | |
571 | if (!(spi->mode & SPI_CS_HIGH)) | 917 | SIRFSOC_SPI_FIFO_THD_MASK(sspi)) |
572 | regval |= SIRFSOC_SPI_CS_IDLE_STAT; | 918 | << SIRFSOC_SPI_FIFO_THD_OFFSET) | |
573 | if (!(spi->mode & SPI_LSB_FIRST)) | 919 | (sspi->word_width >> 1); |
574 | regval |= SIRFSOC_SPI_TRAN_MSB; | 920 | writel(txfifo_ctrl, sspi->base + sspi->regs->txfifo_ctrl); |
575 | if (spi->mode & SPI_CPOL) | 921 | writel(rxfifo_ctrl, sspi->base + sspi->regs->rxfifo_ctrl); |
576 | regval |= SIRFSOC_SPI_CLK_IDLE_STAT; | 922 | if (sspi->type == SIRF_USP_SPI_P2 || |
577 | 923 | sspi->type == SIRF_USP_SPI_A7) { | |
578 | /* | 924 | tx_frm_ctl = 0; |
579 | * Data should be driven at least 1/2 cycle before the fetch edge | 925 | tx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_TX_DATA_MASK) |
580 | * to make sure that data gets stable at the fetch edge. | 926 | << SIRFSOC_USP_TX_DATA_OFFSET; |
581 | */ | 927 | tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN |
582 | if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) || | 928 | - 1) & SIRFSOC_USP_TX_SYNC_MASK) << |
583 | (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA))) | 929 | SIRFSOC_USP_TX_SYNC_OFFSET; |
584 | regval &= ~SIRFSOC_SPI_DRV_POS_EDGE; | 930 | tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN |
585 | else | 931 | + 2 - 1) & SIRFSOC_USP_TX_FRAME_MASK) << |
586 | regval |= SIRFSOC_SPI_DRV_POS_EDGE; | 932 | SIRFSOC_USP_TX_FRAME_OFFSET; |
587 | 933 | tx_frm_ctl |= ((bits_per_word - 1) & | |
588 | writel(SIRFSOC_SPI_FIFO_SC(fifo_size - 2) | | 934 | SIRFSOC_USP_TX_SHIFTER_MASK) << |
589 | SIRFSOC_SPI_FIFO_LC(fifo_size / 2) | | 935 | SIRFSOC_USP_TX_SHIFTER_OFFSET; |
590 | SIRFSOC_SPI_FIFO_HC(2), | 936 | rx_frm_ctl = 0; |
591 | sspi->base + SIRFSOC_SPI_TXFIFO_LEVEL_CHK); | 937 | rx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_RX_DATA_MASK) |
592 | writel(SIRFSOC_SPI_FIFO_SC(2) | | 938 | << SIRFSOC_USP_RX_DATA_OFFSET; |
593 | SIRFSOC_SPI_FIFO_LC(fifo_size / 2) | | 939 | rx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_RXD_DELAY_LEN |
594 | SIRFSOC_SPI_FIFO_HC(fifo_size - 2), | 940 | + 2 - 1) & SIRFSOC_USP_RX_FRAME_MASK) << |
595 | sspi->base + SIRFSOC_SPI_RXFIFO_LEVEL_CHK); | 941 | SIRFSOC_USP_RX_FRAME_OFFSET; |
596 | writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL); | 942 | rx_frm_ctl |= ((bits_per_word - 1) |
597 | writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL); | 943 | & SIRFSOC_USP_RX_SHIFTER_MASK) << |
598 | 944 | SIRFSOC_USP_RX_SHIFTER_OFFSET; | |
599 | if (t && t->tx_buf && !t->rx_buf && (t->len <= SIRFSOC_MAX_CMD_BYTES)) { | 945 | writel(tx_frm_ctl | (((usp_mode2 >> 10) & |
600 | regval |= (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) | | 946 | SIRFSOC_USP_CLK_10_11_MASK) << |
601 | SIRFSOC_SPI_CMD_MODE); | 947 | SIRFSOC_USP_CLK_10_11_OFFSET), |
602 | sspi->tx_by_cmd = true; | 948 | sspi->base + sspi->regs->usp_tx_frame_ctrl); |
603 | } else { | 949 | writel(rx_frm_ctl | (((usp_mode2 >> 12) & |
604 | regval &= ~SIRFSOC_SPI_CMD_MODE; | 950 | SIRFSOC_USP_CLK_12_15_MASK) << |
605 | sspi->tx_by_cmd = false; | 951 | SIRFSOC_USP_CLK_12_15_OFFSET), |
952 | sspi->base + sspi->regs->usp_rx_frame_ctrl); | ||
953 | writel(readl(sspi->base + sspi->regs->usp_mode2) | | ||
954 | ((usp_mode2 & SIRFSOC_USP_CLK_DIVISOR_MASK) << | ||
955 | SIRFSOC_USP_CLK_DIVISOR_OFFSET) | | ||
956 | (SIRFSOC_USP_RXD_DELAY_LEN << | ||
957 | SIRFSOC_USP_RXD_DELAY_OFFSET) | | ||
958 | (SIRFSOC_USP_TXD_DELAY_LEN << | ||
959 | SIRFSOC_USP_TXD_DELAY_OFFSET), | ||
960 | sspi->base + sspi->regs->usp_mode2); | ||
961 | } | ||
962 | if (sspi->type == SIRF_REAL_SPI) | ||
963 | writel(regval, sspi->base + sspi->regs->spi_ctrl); | ||
964 | spi_sirfsoc_config_mode(spi); | ||
965 | if (sspi->type == SIRF_REAL_SPI) { | ||
966 | if (t && t->tx_buf && !t->rx_buf && | ||
967 | (t->len <= SIRFSOC_MAX_CMD_BYTES)) { | ||
968 | sspi->tx_by_cmd = true; | ||
969 | writel(readl(sspi->base + sspi->regs->spi_ctrl) | | ||
970 | (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) | | ||
971 | SIRFSOC_SPI_CMD_MODE), | ||
972 | sspi->base + sspi->regs->spi_ctrl); | ||
973 | } else { | ||
974 | sspi->tx_by_cmd = false; | ||
975 | writel(readl(sspi->base + sspi->regs->spi_ctrl) & | ||
976 | ~SIRFSOC_SPI_CMD_MODE, | ||
977 | sspi->base + sspi->regs->spi_ctrl); | ||
978 | } | ||
606 | } | 979 | } |
607 | /* | ||
608 | * it should never set to hardware cs mode because in hardware cs mode, | ||
609 | * cs signal can't controlled by driver. | ||
610 | */ | ||
611 | regval |= SIRFSOC_SPI_CS_IO_MODE; | ||
612 | writel(regval, sspi->base + SIRFSOC_SPI_CTRL); | ||
613 | |||
614 | if (IS_DMA_VALID(t)) { | 980 | if (IS_DMA_VALID(t)) { |
615 | /* Enable DMA mode for RX, TX */ | 981 | /* Enable DMA mode for RX, TX */ |
616 | writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); | 982 | writel(0, sspi->base + sspi->regs->tx_dma_io_ctrl); |
617 | writel(SIRFSOC_SPI_RX_DMA_FLUSH, | 983 | writel(SIRFSOC_SPI_RX_DMA_FLUSH, |
618 | sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); | 984 | sspi->base + sspi->regs->rx_dma_io_ctrl); |
619 | } else { | 985 | } else { |
620 | /* Enable IO mode for RX, TX */ | 986 | /* Enable IO mode for RX, TX */ |
621 | writel(SIRFSOC_SPI_IO_MODE_SEL, | 987 | writel(SIRFSOC_SPI_IO_MODE_SEL, |
622 | sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); | 988 | sspi->base + sspi->regs->tx_dma_io_ctrl); |
623 | writel(SIRFSOC_SPI_IO_MODE_SEL, | 989 | writel(SIRFSOC_SPI_IO_MODE_SEL, |
624 | sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); | 990 | sspi->base + sspi->regs->rx_dma_io_ctrl); |
625 | } | 991 | } |
626 | |||
627 | return 0; | 992 | return 0; |
628 | } | 993 | } |
629 | 994 | ||
630 | static int spi_sirfsoc_setup(struct spi_device *spi) | 995 | static int spi_sirfsoc_setup(struct spi_device *spi) |
631 | { | 996 | { |
632 | struct sirfsoc_spi *sspi; | 997 | struct sirfsoc_spi *sspi; |
998 | int ret = 0; | ||
633 | 999 | ||
634 | sspi = spi_master_get_devdata(spi->master); | 1000 | sspi = spi_master_get_devdata(spi->master); |
635 | |||
636 | if (spi->cs_gpio == -ENOENT) | 1001 | if (spi->cs_gpio == -ENOENT) |
637 | sspi->hw_cs = true; | 1002 | sspi->hw_cs = true; |
638 | else | 1003 | else { |
639 | sspi->hw_cs = false; | 1004 | sspi->hw_cs = false; |
640 | return spi_sirfsoc_setup_transfer(spi, NULL); | 1005 | if (!spi_get_ctldata(spi)) { |
1006 | void *cs = kmalloc(sizeof(int), GFP_KERNEL); | ||
1007 | if (!cs) { | ||
1008 | ret = -ENOMEM; | ||
1009 | goto exit; | ||
1010 | } | ||
1011 | ret = gpio_is_valid(spi->cs_gpio); | ||
1012 | if (!ret) { | ||
1013 | dev_err(&spi->dev, "no valid gpio\n"); | ||
1014 | ret = -ENOENT; | ||
1015 | goto exit; | ||
1016 | } | ||
1017 | ret = gpio_request(spi->cs_gpio, DRIVER_NAME); | ||
1018 | if (ret) { | ||
1019 | dev_err(&spi->dev, "failed to request gpio\n"); | ||
1020 | goto exit; | ||
1021 | } | ||
1022 | spi_set_ctldata(spi, cs); | ||
1023 | } | ||
1024 | } | ||
1025 | spi_sirfsoc_config_mode(spi); | ||
1026 | spi_sirfsoc_chipselect(spi, BITBANG_CS_INACTIVE); | ||
1027 | exit: | ||
1028 | return ret; | ||
1029 | } | ||
1030 | |||
1031 | static void spi_sirfsoc_cleanup(struct spi_device *spi) | ||
1032 | { | ||
1033 | if (spi_get_ctldata(spi)) { | ||
1034 | gpio_free(spi->cs_gpio); | ||
1035 | kfree(spi_get_ctldata(spi)); | ||
1036 | } | ||
641 | } | 1037 | } |
642 | 1038 | ||
1039 | static const struct sirf_spi_comp_data sirf_real_spi = { | ||
1040 | .regs = &real_spi_register, | ||
1041 | .type = SIRF_REAL_SPI, | ||
1042 | .dat_max_frm_len = 64 * 1024, | ||
1043 | .fifo_size = 256, | ||
1044 | }; | ||
1045 | |||
1046 | static const struct sirf_spi_comp_data sirf_usp_spi_p2 = { | ||
1047 | .regs = &usp_spi_register, | ||
1048 | .type = SIRF_USP_SPI_P2, | ||
1049 | .dat_max_frm_len = 1024 * 1024, | ||
1050 | .fifo_size = 128, | ||
1051 | .hwinit = sirfsoc_usp_hwinit, | ||
1052 | }; | ||
1053 | |||
1054 | static const struct sirf_spi_comp_data sirf_usp_spi_a7 = { | ||
1055 | .regs = &usp_spi_register, | ||
1056 | .type = SIRF_USP_SPI_A7, | ||
1057 | .dat_max_frm_len = 1024 * 1024, | ||
1058 | .fifo_size = 512, | ||
1059 | .hwinit = sirfsoc_usp_hwinit, | ||
1060 | }; | ||
1061 | |||
1062 | static const struct of_device_id spi_sirfsoc_of_match[] = { | ||
1063 | { .compatible = "sirf,prima2-spi", .data = &sirf_real_spi}, | ||
1064 | { .compatible = "sirf,prima2-usp-spi", .data = &sirf_usp_spi_p2}, | ||
1065 | { .compatible = "sirf,atlas7-usp-spi", .data = &sirf_usp_spi_a7}, | ||
1066 | {} | ||
1067 | }; | ||
1068 | MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match); | ||
1069 | |||
643 | static int spi_sirfsoc_probe(struct platform_device *pdev) | 1070 | static int spi_sirfsoc_probe(struct platform_device *pdev) |
644 | { | 1071 | { |
645 | struct sirfsoc_spi *sspi; | 1072 | struct sirfsoc_spi *sspi; |
646 | struct spi_master *master; | 1073 | struct spi_master *master; |
647 | struct resource *mem_res; | 1074 | struct resource *mem_res; |
1075 | struct sirf_spi_comp_data *spi_comp_data; | ||
648 | int irq; | 1076 | int irq; |
649 | int i, ret; | 1077 | int ret; |
1078 | const struct of_device_id *match; | ||
650 | 1079 | ||
651 | ret = device_reset(&pdev->dev); | 1080 | ret = device_reset(&pdev->dev); |
652 | if (ret) { | 1081 | if (ret) { |
@@ -659,16 +1088,22 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) | |||
659 | dev_err(&pdev->dev, "Unable to allocate SPI master\n"); | 1088 | dev_err(&pdev->dev, "Unable to allocate SPI master\n"); |
660 | return -ENOMEM; | 1089 | return -ENOMEM; |
661 | } | 1090 | } |
1091 | match = of_match_node(spi_sirfsoc_of_match, pdev->dev.of_node); | ||
662 | platform_set_drvdata(pdev, master); | 1092 | platform_set_drvdata(pdev, master); |
663 | sspi = spi_master_get_devdata(master); | 1093 | sspi = spi_master_get_devdata(master); |
664 | 1094 | sspi->fifo_full_offset = ilog2(sspi->fifo_size); | |
1095 | spi_comp_data = (struct sirf_spi_comp_data *)match->data; | ||
1096 | sspi->regs = spi_comp_data->regs; | ||
1097 | sspi->type = spi_comp_data->type; | ||
1098 | sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1; | ||
1099 | sspi->dat_max_frm_len = spi_comp_data->dat_max_frm_len; | ||
1100 | sspi->fifo_size = spi_comp_data->fifo_size; | ||
665 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1101 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
666 | sspi->base = devm_ioremap_resource(&pdev->dev, mem_res); | 1102 | sspi->base = devm_ioremap_resource(&pdev->dev, mem_res); |
667 | if (IS_ERR(sspi->base)) { | 1103 | if (IS_ERR(sspi->base)) { |
668 | ret = PTR_ERR(sspi->base); | 1104 | ret = PTR_ERR(sspi->base); |
669 | goto free_master; | 1105 | goto free_master; |
670 | } | 1106 | } |
671 | |||
672 | irq = platform_get_irq(pdev, 0); | 1107 | irq = platform_get_irq(pdev, 0); |
673 | if (irq < 0) { | 1108 | if (irq < 0) { |
674 | ret = -ENXIO; | 1109 | ret = -ENXIO; |
@@ -684,11 +1119,13 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) | |||
684 | sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer; | 1119 | sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer; |
685 | sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer; | 1120 | sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer; |
686 | sspi->bitbang.master->setup = spi_sirfsoc_setup; | 1121 | sspi->bitbang.master->setup = spi_sirfsoc_setup; |
1122 | sspi->bitbang.master->cleanup = spi_sirfsoc_cleanup; | ||
687 | master->bus_num = pdev->id; | 1123 | master->bus_num = pdev->id; |
688 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH; | 1124 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH; |
689 | master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) | | 1125 | master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) | |
690 | SPI_BPW_MASK(16) | SPI_BPW_MASK(32); | 1126 | SPI_BPW_MASK(16) | SPI_BPW_MASK(32); |
691 | master->max_speed_hz = SIRFSOC_SPI_DEFAULT_FRQ; | 1127 | master->max_speed_hz = SIRFSOC_SPI_DEFAULT_FRQ; |
1128 | master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; | ||
692 | sspi->bitbang.master->dev.of_node = pdev->dev.of_node; | 1129 | sspi->bitbang.master->dev.of_node = pdev->dev.of_node; |
693 | 1130 | ||
694 | /* request DMA channels */ | 1131 | /* request DMA channels */ |
@@ -711,47 +1148,19 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) | |||
711 | goto free_tx_dma; | 1148 | goto free_tx_dma; |
712 | } | 1149 | } |
713 | clk_prepare_enable(sspi->clk); | 1150 | clk_prepare_enable(sspi->clk); |
1151 | if (spi_comp_data->hwinit) | ||
1152 | spi_comp_data->hwinit(sspi); | ||
714 | sspi->ctrl_freq = clk_get_rate(sspi->clk); | 1153 | sspi->ctrl_freq = clk_get_rate(sspi->clk); |
715 | 1154 | ||
716 | init_completion(&sspi->rx_done); | 1155 | init_completion(&sspi->rx_done); |
717 | init_completion(&sspi->tx_done); | 1156 | init_completion(&sspi->tx_done); |
718 | 1157 | ||
719 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP); | ||
720 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); | ||
721 | writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP); | ||
722 | writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); | ||
723 | /* We are not using dummy delay between command and data */ | ||
724 | writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL); | ||
725 | |||
726 | sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL); | ||
727 | if (!sspi->dummypage) { | ||
728 | ret = -ENOMEM; | ||
729 | goto free_clk; | ||
730 | } | ||
731 | |||
732 | ret = spi_bitbang_start(&sspi->bitbang); | 1158 | ret = spi_bitbang_start(&sspi->bitbang); |
733 | if (ret) | 1159 | if (ret) |
734 | goto free_dummypage; | 1160 | goto free_clk; |
735 | for (i = 0; master->cs_gpios && i < master->num_chipselect; i++) { | ||
736 | if (master->cs_gpios[i] == -ENOENT) | ||
737 | continue; | ||
738 | if (!gpio_is_valid(master->cs_gpios[i])) { | ||
739 | dev_err(&pdev->dev, "no valid gpio\n"); | ||
740 | ret = -EINVAL; | ||
741 | goto free_dummypage; | ||
742 | } | ||
743 | ret = devm_gpio_request(&pdev->dev, | ||
744 | master->cs_gpios[i], DRIVER_NAME); | ||
745 | if (ret) { | ||
746 | dev_err(&pdev->dev, "failed to request gpio\n"); | ||
747 | goto free_dummypage; | ||
748 | } | ||
749 | } | ||
750 | dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num); | 1161 | dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num); |
751 | 1162 | ||
752 | return 0; | 1163 | return 0; |
753 | free_dummypage: | ||
754 | kfree(sspi->dummypage); | ||
755 | free_clk: | 1164 | free_clk: |
756 | clk_disable_unprepare(sspi->clk); | 1165 | clk_disable_unprepare(sspi->clk); |
757 | clk_put(sspi->clk); | 1166 | clk_put(sspi->clk); |
@@ -772,9 +1181,7 @@ static int spi_sirfsoc_remove(struct platform_device *pdev) | |||
772 | 1181 | ||
773 | master = platform_get_drvdata(pdev); | 1182 | master = platform_get_drvdata(pdev); |
774 | sspi = spi_master_get_devdata(master); | 1183 | sspi = spi_master_get_devdata(master); |
775 | |||
776 | spi_bitbang_stop(&sspi->bitbang); | 1184 | spi_bitbang_stop(&sspi->bitbang); |
777 | kfree(sspi->dummypage); | ||
778 | clk_disable_unprepare(sspi->clk); | 1185 | clk_disable_unprepare(sspi->clk); |
779 | clk_put(sspi->clk); | 1186 | clk_put(sspi->clk); |
780 | dma_release_channel(sspi->rx_chan); | 1187 | dma_release_channel(sspi->rx_chan); |
@@ -804,24 +1211,17 @@ static int spi_sirfsoc_resume(struct device *dev) | |||
804 | struct sirfsoc_spi *sspi = spi_master_get_devdata(master); | 1211 | struct sirfsoc_spi *sspi = spi_master_get_devdata(master); |
805 | 1212 | ||
806 | clk_enable(sspi->clk); | 1213 | clk_enable(sspi->clk); |
807 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP); | 1214 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op); |
808 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); | 1215 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op); |
809 | writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP); | 1216 | writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op); |
810 | writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); | 1217 | writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->rxfifo_op); |
811 | 1218 | return 0; | |
812 | return spi_master_resume(master); | ||
813 | } | 1219 | } |
814 | #endif | 1220 | #endif |
815 | 1221 | ||
816 | static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend, | 1222 | static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend, |
817 | spi_sirfsoc_resume); | 1223 | spi_sirfsoc_resume); |
818 | 1224 | ||
819 | static const struct of_device_id spi_sirfsoc_of_match[] = { | ||
820 | { .compatible = "sirf,prima2-spi", }, | ||
821 | {} | ||
822 | }; | ||
823 | MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match); | ||
824 | |||
825 | static struct platform_driver spi_sirfsoc_driver = { | 1225 | static struct platform_driver spi_sirfsoc_driver = { |
826 | .driver = { | 1226 | .driver = { |
827 | .name = DRIVER_NAME, | 1227 | .name = DRIVER_NAME, |
@@ -835,4 +1235,5 @@ module_platform_driver(spi_sirfsoc_driver); | |||
835 | MODULE_DESCRIPTION("SiRF SoC SPI master driver"); | 1235 | MODULE_DESCRIPTION("SiRF SoC SPI master driver"); |
836 | MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>"); | 1236 | MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>"); |
837 | MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>"); | 1237 | MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>"); |
1238 | MODULE_AUTHOR("Qipan Li <Qipan.Li@csr.com>"); | ||
838 | MODULE_LICENSE("GPL v2"); | 1239 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c new file mode 100644 index 000000000000..87b20a511a6b --- /dev/null +++ b/drivers/spi/spi-zynqmp-gqspi.c | |||
@@ -0,0 +1,1122 @@ | |||
1 | /* | ||
2 | * Xilinx Zynq UltraScale+ MPSoC Quad-SPI (QSPI) controller driver | ||
3 | * (master mode only) | ||
4 | * | ||
5 | * Copyright (C) 2009 - 2015 Xilinx, Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published | ||
9 | * by the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/clk.h> | ||
14 | #include <linux/delay.h> | ||
15 | #include <linux/dma-mapping.h> | ||
16 | #include <linux/dmaengine.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/io.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/of_irq.h> | ||
21 | #include <linux/of_address.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/spi/spi.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/workqueue.h> | ||
26 | |||
27 | /* Generic QSPI register offsets */ | ||
28 | #define GQSPI_CONFIG_OFST 0x00000100 | ||
29 | #define GQSPI_ISR_OFST 0x00000104 | ||
30 | #define GQSPI_IDR_OFST 0x0000010C | ||
31 | #define GQSPI_IER_OFST 0x00000108 | ||
32 | #define GQSPI_IMASK_OFST 0x00000110 | ||
33 | #define GQSPI_EN_OFST 0x00000114 | ||
34 | #define GQSPI_TXD_OFST 0x0000011C | ||
35 | #define GQSPI_RXD_OFST 0x00000120 | ||
36 | #define GQSPI_TX_THRESHOLD_OFST 0x00000128 | ||
37 | #define GQSPI_RX_THRESHOLD_OFST 0x0000012C | ||
38 | #define GQSPI_LPBK_DLY_ADJ_OFST 0x00000138 | ||
39 | #define GQSPI_GEN_FIFO_OFST 0x00000140 | ||
40 | #define GQSPI_SEL_OFST 0x00000144 | ||
41 | #define GQSPI_GF_THRESHOLD_OFST 0x00000150 | ||
42 | #define GQSPI_FIFO_CTRL_OFST 0x0000014C | ||
43 | #define GQSPI_QSPIDMA_DST_CTRL_OFST 0x0000080C | ||
44 | #define GQSPI_QSPIDMA_DST_SIZE_OFST 0x00000804 | ||
45 | #define GQSPI_QSPIDMA_DST_STS_OFST 0x00000808 | ||
46 | #define GQSPI_QSPIDMA_DST_I_STS_OFST 0x00000814 | ||
47 | #define GQSPI_QSPIDMA_DST_I_EN_OFST 0x00000818 | ||
48 | #define GQSPI_QSPIDMA_DST_I_DIS_OFST 0x0000081C | ||
49 | #define GQSPI_QSPIDMA_DST_I_MASK_OFST 0x00000820 | ||
50 | #define GQSPI_QSPIDMA_DST_ADDR_OFST 0x00000800 | ||
51 | #define GQSPI_QSPIDMA_DST_ADDR_MSB_OFST 0x00000828 | ||
52 | |||
53 | /* GQSPI register bit masks */ | ||
54 | #define GQSPI_SEL_MASK 0x00000001 | ||
55 | #define GQSPI_EN_MASK 0x00000001 | ||
56 | #define GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK 0x00000020 | ||
57 | #define GQSPI_ISR_WR_TO_CLR_MASK 0x00000002 | ||
58 | #define GQSPI_IDR_ALL_MASK 0x00000FBE | ||
59 | #define GQSPI_CFG_MODE_EN_MASK 0xC0000000 | ||
60 | #define GQSPI_CFG_GEN_FIFO_START_MODE_MASK 0x20000000 | ||
61 | #define GQSPI_CFG_ENDIAN_MASK 0x04000000 | ||
62 | #define GQSPI_CFG_EN_POLL_TO_MASK 0x00100000 | ||
63 | #define GQSPI_CFG_WP_HOLD_MASK 0x00080000 | ||
64 | #define GQSPI_CFG_BAUD_RATE_DIV_MASK 0x00000038 | ||
65 | #define GQSPI_CFG_CLK_PHA_MASK 0x00000004 | ||
66 | #define GQSPI_CFG_CLK_POL_MASK 0x00000002 | ||
67 | #define GQSPI_CFG_START_GEN_FIFO_MASK 0x10000000 | ||
68 | #define GQSPI_GENFIFO_IMM_DATA_MASK 0x000000FF | ||
69 | #define GQSPI_GENFIFO_DATA_XFER 0x00000100 | ||
70 | #define GQSPI_GENFIFO_EXP 0x00000200 | ||
71 | #define GQSPI_GENFIFO_MODE_SPI 0x00000400 | ||
72 | #define GQSPI_GENFIFO_MODE_DUALSPI 0x00000800 | ||
73 | #define GQSPI_GENFIFO_MODE_QUADSPI 0x00000C00 | ||
74 | #define GQSPI_GENFIFO_MODE_MASK 0x00000C00 | ||
75 | #define GQSPI_GENFIFO_CS_LOWER 0x00001000 | ||
76 | #define GQSPI_GENFIFO_CS_UPPER 0x00002000 | ||
77 | #define GQSPI_GENFIFO_BUS_LOWER 0x00004000 | ||
78 | #define GQSPI_GENFIFO_BUS_UPPER 0x00008000 | ||
79 | #define GQSPI_GENFIFO_BUS_BOTH 0x0000C000 | ||
80 | #define GQSPI_GENFIFO_BUS_MASK 0x0000C000 | ||
81 | #define GQSPI_GENFIFO_TX 0x00010000 | ||
82 | #define GQSPI_GENFIFO_RX 0x00020000 | ||
83 | #define GQSPI_GENFIFO_STRIPE 0x00040000 | ||
84 | #define GQSPI_GENFIFO_POLL 0x00080000 | ||
85 | #define GQSPI_GENFIFO_EXP_START 0x00000100 | ||
86 | #define GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK 0x00000004 | ||
87 | #define GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK 0x00000002 | ||
88 | #define GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK 0x00000001 | ||
89 | #define GQSPI_ISR_RXEMPTY_MASK 0x00000800 | ||
90 | #define GQSPI_ISR_GENFIFOFULL_MASK 0x00000400 | ||
91 | #define GQSPI_ISR_GENFIFONOT_FULL_MASK 0x00000200 | ||
92 | #define GQSPI_ISR_TXEMPTY_MASK 0x00000100 | ||
93 | #define GQSPI_ISR_GENFIFOEMPTY_MASK 0x00000080 | ||
94 | #define GQSPI_ISR_RXFULL_MASK 0x00000020 | ||
95 | #define GQSPI_ISR_RXNEMPTY_MASK 0x00000010 | ||
96 | #define GQSPI_ISR_TXFULL_MASK 0x00000008 | ||
97 | #define GQSPI_ISR_TXNOT_FULL_MASK 0x00000004 | ||
98 | #define GQSPI_ISR_POLL_TIME_EXPIRE_MASK 0x00000002 | ||
99 | #define GQSPI_IER_TXNOT_FULL_MASK 0x00000004 | ||
100 | #define GQSPI_IER_RXEMPTY_MASK 0x00000800 | ||
101 | #define GQSPI_IER_POLL_TIME_EXPIRE_MASK 0x00000002 | ||
102 | #define GQSPI_IER_RXNEMPTY_MASK 0x00000010 | ||
103 | #define GQSPI_IER_GENFIFOEMPTY_MASK 0x00000080 | ||
104 | #define GQSPI_IER_TXEMPTY_MASK 0x00000100 | ||
105 | #define GQSPI_QSPIDMA_DST_INTR_ALL_MASK 0x000000FE | ||
106 | #define GQSPI_QSPIDMA_DST_STS_WTC 0x0000E000 | ||
107 | #define GQSPI_CFG_MODE_EN_DMA_MASK 0x80000000 | ||
108 | #define GQSPI_ISR_IDR_MASK 0x00000994 | ||
109 | #define GQSPI_QSPIDMA_DST_I_EN_DONE_MASK 0x00000002 | ||
110 | #define GQSPI_QSPIDMA_DST_I_STS_DONE_MASK 0x00000002 | ||
111 | #define GQSPI_IRQ_MASK 0x00000980 | ||
112 | |||
113 | #define GQSPI_CFG_BAUD_RATE_DIV_SHIFT 3 | ||
114 | #define GQSPI_GENFIFO_CS_SETUP 0x4 | ||
115 | #define GQSPI_GENFIFO_CS_HOLD 0x3 | ||
116 | #define GQSPI_TXD_DEPTH 64 | ||
117 | #define GQSPI_RX_FIFO_THRESHOLD 32 | ||
118 | #define GQSPI_RX_FIFO_FILL (GQSPI_RX_FIFO_THRESHOLD * 4) | ||
119 | #define GQSPI_TX_FIFO_THRESHOLD_RESET_VAL 32 | ||
120 | #define GQSPI_TX_FIFO_FILL (GQSPI_TXD_DEPTH -\ | ||
121 | GQSPI_TX_FIFO_THRESHOLD_RESET_VAL) | ||
122 | #define GQSPI_GEN_FIFO_THRESHOLD_RESET_VAL 0X10 | ||
123 | #define GQSPI_QSPIDMA_DST_CTRL_RESET_VAL 0x803FFA00 | ||
124 | #define GQSPI_SELECT_FLASH_CS_LOWER 0x1 | ||
125 | #define GQSPI_SELECT_FLASH_CS_UPPER 0x2 | ||
126 | #define GQSPI_SELECT_FLASH_CS_BOTH 0x3 | ||
127 | #define GQSPI_SELECT_FLASH_BUS_LOWER 0x1 | ||
128 | #define GQSPI_SELECT_FLASH_BUS_UPPER 0x2 | ||
129 | #define GQSPI_SELECT_FLASH_BUS_BOTH 0x3 | ||
130 | #define GQSPI_BAUD_DIV_MAX 7 /* Baud rate divisor maximum */ | ||
131 | #define GQSPI_BAUD_DIV_SHIFT 2 /* Baud rate divisor shift */ | ||
132 | #define GQSPI_SELECT_MODE_SPI 0x1 | ||
133 | #define GQSPI_SELECT_MODE_DUALSPI 0x2 | ||
134 | #define GQSPI_SELECT_MODE_QUADSPI 0x4 | ||
135 | #define GQSPI_DMA_UNALIGN 0x3 | ||
136 | #define GQSPI_DEFAULT_NUM_CS 1 /* Default number of chip selects */ | ||
137 | |||
138 | enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA}; | ||
139 | |||
140 | /** | ||
141 | * struct zynqmp_qspi - Defines qspi driver instance | ||
142 | * @regs: Virtual address of the QSPI controller registers | ||
143 | * @refclk: Pointer to the peripheral clock | ||
144 | * @pclk: Pointer to the APB clock | ||
145 | * @irq: IRQ number | ||
146 | * @dev: Pointer to struct device | ||
147 | * @txbuf: Pointer to the TX buffer | ||
148 | * @rxbuf: Pointer to the RX buffer | ||
149 | * @bytes_to_transfer: Number of bytes left to transfer | ||
150 | * @bytes_to_receive: Number of bytes left to receive | ||
151 | * @genfifocs: Used for chip select | ||
152 | * @genfifobus: Used to select the upper or lower bus | ||
153 | * @dma_rx_bytes: Remaining bytes to receive by DMA mode | ||
154 | * @dma_addr: DMA address after mapping the kernel buffer | ||
155 | * @genfifoentry: Used for storing the genfifoentry instruction. | ||
156 | * @mode: Defines the mode in which QSPI is operating | ||
157 | */ | ||
158 | struct zynqmp_qspi { | ||
159 | void __iomem *regs; | ||
160 | struct clk *refclk; | ||
161 | struct clk *pclk; | ||
162 | int irq; | ||
163 | struct device *dev; | ||
164 | const void *txbuf; | ||
165 | void *rxbuf; | ||
166 | int bytes_to_transfer; | ||
167 | int bytes_to_receive; | ||
168 | u32 genfifocs; | ||
169 | u32 genfifobus; | ||
170 | u32 dma_rx_bytes; | ||
171 | dma_addr_t dma_addr; | ||
172 | u32 genfifoentry; | ||
173 | enum mode_type mode; | ||
174 | }; | ||
175 | |||
176 | /** | ||
177 | * zynqmp_gqspi_read: For GQSPI controller read operation | ||
178 | * @xqspi: Pointer to the zynqmp_qspi structure | ||
179 | * @offset: Offset from where to read | ||
180 | */ | ||
181 | static u32 zynqmp_gqspi_read(struct zynqmp_qspi *xqspi, u32 offset) | ||
182 | { | ||
183 | return readl_relaxed(xqspi->regs + offset); | ||
184 | } | ||
185 | |||
186 | /** | ||
187 | * zynqmp_gqspi_write: For GQSPI controller write operation | ||
188 | * @xqspi: Pointer to the zynqmp_qspi structure | ||
189 | * @offset: Offset where to write | ||
190 | * @val: Value to be written | ||
191 | */ | ||
192 | static inline void zynqmp_gqspi_write(struct zynqmp_qspi *xqspi, u32 offset, | ||
193 | u32 val) | ||
194 | { | ||
195 | writel_relaxed(val, (xqspi->regs + offset)); | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * zynqmp_gqspi_selectslave: For selection of slave device | ||
200 | * @instanceptr: Pointer to the zynqmp_qspi structure | ||
201 | * @flashcs: For chip select | ||
202 | * @flashbus: To check which bus is selected- upper or lower | ||
203 | */ | ||
204 | static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr, | ||
205 | u8 slavecs, u8 slavebus) | ||
206 | { | ||
207 | /* | ||
208 | * Bus and CS lines selected here will be updated in the instance and | ||
209 | * used for subsequent GENFIFO entries during transfer. | ||
210 | */ | ||
211 | |||
212 | /* Choose slave select line */ | ||
213 | switch (slavecs) { | ||
214 | case GQSPI_SELECT_FLASH_CS_BOTH: | ||
215 | instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER | | ||
216 | GQSPI_GENFIFO_CS_UPPER; | ||
217 | case GQSPI_SELECT_FLASH_CS_UPPER: | ||
218 | instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER; | ||
219 | break; | ||
220 | case GQSPI_SELECT_FLASH_CS_LOWER: | ||
221 | instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER; | ||
222 | break; | ||
223 | default: | ||
224 | dev_warn(instanceptr->dev, "Invalid slave select\n"); | ||
225 | } | ||
226 | |||
227 | /* Choose the bus */ | ||
228 | switch (slavebus) { | ||
229 | case GQSPI_SELECT_FLASH_BUS_BOTH: | ||
230 | instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER | | ||
231 | GQSPI_GENFIFO_BUS_UPPER; | ||
232 | break; | ||
233 | case GQSPI_SELECT_FLASH_BUS_UPPER: | ||
234 | instanceptr->genfifobus = GQSPI_GENFIFO_BUS_UPPER; | ||
235 | break; | ||
236 | case GQSPI_SELECT_FLASH_BUS_LOWER: | ||
237 | instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER; | ||
238 | break; | ||
239 | default: | ||
240 | dev_warn(instanceptr->dev, "Invalid slave bus\n"); | ||
241 | } | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * zynqmp_qspi_init_hw: Initialize the hardware | ||
246 | * @xqspi: Pointer to the zynqmp_qspi structure | ||
247 | * | ||
248 | * The default settings of the QSPI controller's configurable parameters on | ||
249 | * reset are | ||
250 | * - Master mode | ||
251 | * - TX threshold set to 1 | ||
252 | * - RX threshold set to 1 | ||
253 | * - Flash memory interface mode enabled | ||
254 | * This function performs the following actions | ||
255 | * - Disable and clear all the interrupts | ||
256 | * - Enable manual slave select | ||
257 | * - Enable manual start | ||
258 | * - Deselect all the chip select lines | ||
259 | * - Set the little endian mode of TX FIFO and | ||
260 | * - Enable the QSPI controller | ||
261 | */ | ||
262 | static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi) | ||
263 | { | ||
264 | u32 config_reg; | ||
265 | |||
266 | /* Select the GQSPI mode */ | ||
267 | zynqmp_gqspi_write(xqspi, GQSPI_SEL_OFST, GQSPI_SEL_MASK); | ||
268 | /* Clear and disable interrupts */ | ||
269 | zynqmp_gqspi_write(xqspi, GQSPI_ISR_OFST, | ||
270 | zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST) | | ||
271 | GQSPI_ISR_WR_TO_CLR_MASK); | ||
272 | /* Clear the DMA STS */ | ||
273 | zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST, | ||
274 | zynqmp_gqspi_read(xqspi, | ||
275 | GQSPI_QSPIDMA_DST_I_STS_OFST)); | ||
276 | zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_STS_OFST, | ||
277 | zynqmp_gqspi_read(xqspi, | ||
278 | GQSPI_QSPIDMA_DST_STS_OFST) | | ||
279 | GQSPI_QSPIDMA_DST_STS_WTC); | ||
280 | zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_IDR_ALL_MASK); | ||
281 | zynqmp_gqspi_write(xqspi, | ||
282 | GQSPI_QSPIDMA_DST_I_DIS_OFST, | ||
283 | GQSPI_QSPIDMA_DST_INTR_ALL_MASK); | ||
284 | /* Disable the GQSPI */ | ||
285 | zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0); | ||
286 | config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST); | ||
287 | config_reg &= ~GQSPI_CFG_MODE_EN_MASK; | ||
288 | /* Manual start */ | ||
289 | config_reg |= GQSPI_CFG_GEN_FIFO_START_MODE_MASK; | ||
290 | /* Little endian by default */ | ||
291 | config_reg &= ~GQSPI_CFG_ENDIAN_MASK; | ||
292 | /* Disable poll time out */ | ||
293 | config_reg &= ~GQSPI_CFG_EN_POLL_TO_MASK; | ||
294 | /* Set hold bit */ | ||
295 | config_reg |= GQSPI_CFG_WP_HOLD_MASK; | ||
296 | /* Clear pre-scalar by default */ | ||
297 | config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK; | ||
298 | /* CPHA 0 */ | ||
299 | config_reg &= ~GQSPI_CFG_CLK_PHA_MASK; | ||
300 | /* CPOL 0 */ | ||
301 | config_reg &= ~GQSPI_CFG_CLK_POL_MASK; | ||
302 | zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); | ||
303 | |||
304 | /* Clear the TX and RX FIFO */ | ||
305 | zynqmp_gqspi_write(xqspi, GQSPI_FIFO_CTRL_OFST, | ||
306 | GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK | | ||
307 | GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK | | ||
308 | GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK); | ||
309 | /* Set by default to allow for high frequencies */ | ||
310 | zynqmp_gqspi_write(xqspi, GQSPI_LPBK_DLY_ADJ_OFST, | ||
311 | zynqmp_gqspi_read(xqspi, GQSPI_LPBK_DLY_ADJ_OFST) | | ||
312 | GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK); | ||
313 | /* Reset thresholds */ | ||
314 | zynqmp_gqspi_write(xqspi, GQSPI_TX_THRESHOLD_OFST, | ||
315 | GQSPI_TX_FIFO_THRESHOLD_RESET_VAL); | ||
316 | zynqmp_gqspi_write(xqspi, GQSPI_RX_THRESHOLD_OFST, | ||
317 | GQSPI_RX_FIFO_THRESHOLD); | ||
318 | zynqmp_gqspi_write(xqspi, GQSPI_GF_THRESHOLD_OFST, | ||
319 | GQSPI_GEN_FIFO_THRESHOLD_RESET_VAL); | ||
320 | zynqmp_gqspi_selectslave(xqspi, | ||
321 | GQSPI_SELECT_FLASH_CS_LOWER, | ||
322 | GQSPI_SELECT_FLASH_BUS_LOWER); | ||
323 | /* Initialize DMA */ | ||
324 | zynqmp_gqspi_write(xqspi, | ||
325 | GQSPI_QSPIDMA_DST_CTRL_OFST, | ||
326 | GQSPI_QSPIDMA_DST_CTRL_RESET_VAL); | ||
327 | |||
328 | /* Enable the GQSPI */ | ||
329 | zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK); | ||
330 | } | ||
331 | |||
332 | /** | ||
333 | * zynqmp_qspi_copy_read_data: Copy data to RX buffer | ||
334 | * @xqspi: Pointer to the zynqmp_qspi structure | ||
335 | * @data: The variable where data is stored | ||
336 | * @size: Number of bytes to be copied from data to RX buffer | ||
337 | */ | ||
338 | static void zynqmp_qspi_copy_read_data(struct zynqmp_qspi *xqspi, | ||
339 | ulong data, u8 size) | ||
340 | { | ||
341 | memcpy(xqspi->rxbuf, &data, size); | ||
342 | xqspi->rxbuf += size; | ||
343 | xqspi->bytes_to_receive -= size; | ||
344 | } | ||
345 | |||
346 | /** | ||
347 | * zynqmp_prepare_transfer_hardware: Prepares hardware for transfer. | ||
348 | * @master: Pointer to the spi_master structure which provides | ||
349 | * information about the controller. | ||
350 | * | ||
351 | * This function enables SPI master controller. | ||
352 | * | ||
353 | * Return: 0 on success; error value otherwise | ||
354 | */ | ||
355 | static int zynqmp_prepare_transfer_hardware(struct spi_master *master) | ||
356 | { | ||
357 | struct zynqmp_qspi *xqspi = spi_master_get_devdata(master); | ||
358 | int ret; | ||
359 | |||
360 | ret = clk_enable(xqspi->refclk); | ||
361 | if (ret) | ||
362 | goto clk_err; | ||
363 | |||
364 | ret = clk_enable(xqspi->pclk); | ||
365 | if (ret) | ||
366 | goto clk_err; | ||
367 | |||
368 | zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK); | ||
369 | return 0; | ||
370 | clk_err: | ||
371 | return ret; | ||
372 | } | ||
373 | |||
374 | /** | ||
375 | * zynqmp_unprepare_transfer_hardware: Relaxes hardware after transfer | ||
376 | * @master: Pointer to the spi_master structure which provides | ||
377 | * information about the controller. | ||
378 | * | ||
379 | * This function disables the SPI master controller. | ||
380 | * | ||
381 | * Return: Always 0 | ||
382 | */ | ||
383 | static int zynqmp_unprepare_transfer_hardware(struct spi_master *master) | ||
384 | { | ||
385 | struct zynqmp_qspi *xqspi = spi_master_get_devdata(master); | ||
386 | |||
387 | zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0); | ||
388 | clk_disable(xqspi->refclk); | ||
389 | clk_disable(xqspi->pclk); | ||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | /** | ||
394 | * zynqmp_qspi_chipselect: Select or deselect the chip select line | ||
395 | * @qspi: Pointer to the spi_device structure | ||
396 | * @is_high: Select(0) or deselect (1) the chip select line | ||
397 | */ | ||
398 | static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high) | ||
399 | { | ||
400 | struct zynqmp_qspi *xqspi = spi_master_get_devdata(qspi->master); | ||
401 | ulong timeout; | ||
402 | u32 genfifoentry = 0x0, statusreg; | ||
403 | |||
404 | genfifoentry |= GQSPI_GENFIFO_MODE_SPI; | ||
405 | genfifoentry |= xqspi->genfifobus; | ||
406 | |||
407 | if (!is_high) { | ||
408 | genfifoentry |= xqspi->genfifocs; | ||
409 | genfifoentry |= GQSPI_GENFIFO_CS_SETUP; | ||
410 | } else { | ||
411 | genfifoentry |= GQSPI_GENFIFO_CS_HOLD; | ||
412 | } | ||
413 | |||
414 | zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry); | ||
415 | |||
416 | /* Dummy generic FIFO entry */ | ||
417 | zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0); | ||
418 | |||
419 | /* Manually start the generic FIFO command */ | ||
420 | zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, | ||
421 | zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) | | ||
422 | GQSPI_CFG_START_GEN_FIFO_MASK); | ||
423 | |||
424 | timeout = jiffies + msecs_to_jiffies(1000); | ||
425 | |||
426 | /* Wait until the generic FIFO command is empty */ | ||
427 | do { | ||
428 | statusreg = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST); | ||
429 | |||
430 | if ((statusreg & GQSPI_ISR_GENFIFOEMPTY_MASK) && | ||
431 | (statusreg & GQSPI_ISR_TXEMPTY_MASK)) | ||
432 | break; | ||
433 | else | ||
434 | cpu_relax(); | ||
435 | } while (!time_after_eq(jiffies, timeout)); | ||
436 | |||
437 | if (time_after_eq(jiffies, timeout)) | ||
438 | dev_err(xqspi->dev, "Chip select timed out\n"); | ||
439 | } | ||
440 | |||
441 | /** | ||
442 | * zynqmp_qspi_setup_transfer: Configure QSPI controller for specified | ||
443 | * transfer | ||
444 | * @qspi: Pointer to the spi_device structure | ||
445 | * @transfer: Pointer to the spi_transfer structure which provides | ||
446 | * information about next transfer setup parameters | ||
447 | * | ||
448 | * Sets the operational mode of QSPI controller for the next QSPI transfer and | ||
449 | * sets the requested clock frequency. | ||
450 | * | ||
451 | * Return: Always 0 | ||
452 | * | ||
453 | * Note: | ||
454 | * If the requested frequency is not an exact match with what can be | ||
455 | * obtained using the pre-scalar value, the driver sets the clock | ||
456 | * frequency which is lower than the requested frequency (maximum lower) | ||
457 | * for the transfer. | ||
458 | * | ||
459 | * If the requested frequency is higher or lower than that is supported | ||
460 | * by the QSPI controller the driver will set the highest or lowest | ||
461 | * frequency supported by controller. | ||
462 | */ | ||
463 | static int zynqmp_qspi_setup_transfer(struct spi_device *qspi, | ||
464 | struct spi_transfer *transfer) | ||
465 | { | ||
466 | struct zynqmp_qspi *xqspi = spi_master_get_devdata(qspi->master); | ||
467 | ulong clk_rate; | ||
468 | u32 config_reg, req_hz, baud_rate_val = 0; | ||
469 | |||
470 | if (transfer) | ||
471 | req_hz = transfer->speed_hz; | ||
472 | else | ||
473 | req_hz = qspi->max_speed_hz; | ||
474 | |||
475 | /* Set the clock frequency */ | ||
476 | /* If req_hz == 0, default to lowest speed */ | ||
477 | clk_rate = clk_get_rate(xqspi->refclk); | ||
478 | |||
479 | while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) && | ||
480 | (clk_rate / | ||
481 | (GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) > req_hz) | ||
482 | baud_rate_val++; | ||
483 | |||
484 | config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST); | ||
485 | |||
486 | /* Set the QSPI clock phase and clock polarity */ | ||
487 | config_reg &= (~GQSPI_CFG_CLK_PHA_MASK) & (~GQSPI_CFG_CLK_POL_MASK); | ||
488 | |||
489 | if (qspi->mode & SPI_CPHA) | ||
490 | config_reg |= GQSPI_CFG_CLK_PHA_MASK; | ||
491 | if (qspi->mode & SPI_CPOL) | ||
492 | config_reg |= GQSPI_CFG_CLK_POL_MASK; | ||
493 | |||
494 | config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK; | ||
495 | config_reg |= (baud_rate_val << GQSPI_CFG_BAUD_RATE_DIV_SHIFT); | ||
496 | zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); | ||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | /** | ||
501 | * zynqmp_qspi_setup: Configure the QSPI controller | ||
502 | * @qspi: Pointer to the spi_device structure | ||
503 | * | ||
504 | * Sets the operational mode of QSPI controller for the next QSPI transfer, | ||
505 | * baud rate and divisor value to setup the requested qspi clock. | ||
506 | * | ||
507 | * Return: 0 on success; error value otherwise. | ||
508 | */ | ||
509 | static int zynqmp_qspi_setup(struct spi_device *qspi) | ||
510 | { | ||
511 | if (qspi->master->busy) | ||
512 | return -EBUSY; | ||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | /** | ||
517 | * zynqmp_qspi_filltxfifo: Fills the TX FIFO as long as there is room in | ||
518 | * the FIFO or the bytes required to be | ||
519 | * transmitted. | ||
520 | * @xqspi: Pointer to the zynqmp_qspi structure | ||
521 | * @size: Number of bytes to be copied from TX buffer to TX FIFO | ||
522 | */ | ||
523 | static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size) | ||
524 | { | ||
525 | u32 count = 0, intermediate; | ||
526 | |||
527 | while ((xqspi->bytes_to_transfer > 0) && (count < size)) { | ||
528 | memcpy(&intermediate, xqspi->txbuf, 4); | ||
529 | zynqmp_gqspi_write(xqspi, GQSPI_TXD_OFST, intermediate); | ||
530 | |||
531 | if (xqspi->bytes_to_transfer >= 4) { | ||
532 | xqspi->txbuf += 4; | ||
533 | xqspi->bytes_to_transfer -= 4; | ||
534 | } else { | ||
535 | xqspi->txbuf += xqspi->bytes_to_transfer; | ||
536 | xqspi->bytes_to_transfer = 0; | ||
537 | } | ||
538 | count++; | ||
539 | } | ||
540 | } | ||
541 | |||
542 | /** | ||
543 | * zynqmp_qspi_readrxfifo: Fills the RX FIFO as long as there is room in | ||
544 | * the FIFO. | ||
545 | * @xqspi: Pointer to the zynqmp_qspi structure | ||
546 | * @size: Number of bytes to be copied from RX buffer to RX FIFO | ||
547 | */ | ||
548 | static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size) | ||
549 | { | ||
550 | ulong data; | ||
551 | int count = 0; | ||
552 | |||
553 | while ((count < size) && (xqspi->bytes_to_receive > 0)) { | ||
554 | if (xqspi->bytes_to_receive >= 4) { | ||
555 | (*(u32 *) xqspi->rxbuf) = | ||
556 | zynqmp_gqspi_read(xqspi, GQSPI_RXD_OFST); | ||
557 | xqspi->rxbuf += 4; | ||
558 | xqspi->bytes_to_receive -= 4; | ||
559 | count += 4; | ||
560 | } else { | ||
561 | data = zynqmp_gqspi_read(xqspi, GQSPI_RXD_OFST); | ||
562 | count += xqspi->bytes_to_receive; | ||
563 | zynqmp_qspi_copy_read_data(xqspi, data, | ||
564 | xqspi->bytes_to_receive); | ||
565 | xqspi->bytes_to_receive = 0; | ||
566 | } | ||
567 | } | ||
568 | } | ||
569 | |||
570 | /** | ||
571 | * zynqmp_process_dma_irq: Handler for DMA done interrupt of QSPI | ||
572 | * controller | ||
573 | * @xqspi: zynqmp_qspi instance pointer | ||
574 | * | ||
575 | * This function handles DMA interrupt only. | ||
576 | */ | ||
577 | static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi) | ||
578 | { | ||
579 | u32 config_reg, genfifoentry; | ||
580 | |||
581 | dma_unmap_single(xqspi->dev, xqspi->dma_addr, | ||
582 | xqspi->dma_rx_bytes, DMA_FROM_DEVICE); | ||
583 | xqspi->rxbuf += xqspi->dma_rx_bytes; | ||
584 | xqspi->bytes_to_receive -= xqspi->dma_rx_bytes; | ||
585 | xqspi->dma_rx_bytes = 0; | ||
586 | |||
587 | /* Disabling the DMA interrupts */ | ||
588 | zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_DIS_OFST, | ||
589 | GQSPI_QSPIDMA_DST_I_EN_DONE_MASK); | ||
590 | |||
591 | if (xqspi->bytes_to_receive > 0) { | ||
592 | /* Switch to IO mode,for remaining bytes to receive */ | ||
593 | config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST); | ||
594 | config_reg &= ~GQSPI_CFG_MODE_EN_MASK; | ||
595 | zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); | ||
596 | |||
597 | /* Initiate the transfer of remaining bytes */ | ||
598 | genfifoentry = xqspi->genfifoentry; | ||
599 | genfifoentry |= xqspi->bytes_to_receive; | ||
600 | zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry); | ||
601 | |||
602 | /* Dummy generic FIFO entry */ | ||
603 | zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0); | ||
604 | |||
605 | /* Manual start */ | ||
606 | zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, | ||
607 | (zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) | | ||
608 | GQSPI_CFG_START_GEN_FIFO_MASK)); | ||
609 | |||
610 | /* Enable the RX interrupts for IO mode */ | ||
611 | zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST, | ||
612 | GQSPI_IER_GENFIFOEMPTY_MASK | | ||
613 | GQSPI_IER_RXNEMPTY_MASK | | ||
614 | GQSPI_IER_RXEMPTY_MASK); | ||
615 | } | ||
616 | } | ||
617 | |||
618 | /** | ||
619 | * zynqmp_qspi_irq: Interrupt service routine of the QSPI controller | ||
620 | * @irq: IRQ number | ||
621 | * @dev_id: Pointer to the xqspi structure | ||
622 | * | ||
623 | * This function handles TX empty only. | ||
624 | * On TX empty interrupt this function reads the received data from RX FIFO | ||
625 | * and fills the TX FIFO if there is any data remaining to be transferred. | ||
626 | * | ||
627 | * Return: IRQ_HANDLED when interrupt is handled | ||
628 | * IRQ_NONE otherwise. | ||
629 | */ | ||
630 | static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id) | ||
631 | { | ||
632 | struct spi_master *master = dev_id; | ||
633 | struct zynqmp_qspi *xqspi = spi_master_get_devdata(master); | ||
634 | int ret = IRQ_NONE; | ||
635 | u32 status, mask, dma_status = 0; | ||
636 | |||
637 | status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST); | ||
638 | zynqmp_gqspi_write(xqspi, GQSPI_ISR_OFST, status); | ||
639 | mask = (status & ~(zynqmp_gqspi_read(xqspi, GQSPI_IMASK_OFST))); | ||
640 | |||
641 | /* Read and clear DMA status */ | ||
642 | if (xqspi->mode == GQSPI_MODE_DMA) { | ||
643 | dma_status = | ||
644 | zynqmp_gqspi_read(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST); | ||
645 | zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST, | ||
646 | dma_status); | ||
647 | } | ||
648 | |||
649 | if (mask & GQSPI_ISR_TXNOT_FULL_MASK) { | ||
650 | zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL); | ||
651 | ret = IRQ_HANDLED; | ||
652 | } | ||
653 | |||
654 | if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) { | ||
655 | zynqmp_process_dma_irq(xqspi); | ||
656 | ret = IRQ_HANDLED; | ||
657 | } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) && | ||
658 | (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) { | ||
659 | zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL); | ||
660 | ret = IRQ_HANDLED; | ||
661 | } | ||
662 | |||
663 | if ((xqspi->bytes_to_receive == 0) && (xqspi->bytes_to_transfer == 0) | ||
664 | && ((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) { | ||
665 | zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK); | ||
666 | spi_finalize_current_transfer(master); | ||
667 | ret = IRQ_HANDLED; | ||
668 | } | ||
669 | return ret; | ||
670 | } | ||
671 | |||
672 | /** | ||
673 | * zynqmp_qspi_selectspimode: Selects SPI mode - x1 or x2 or x4. | ||
674 | * @xqspi: xqspi is a pointer to the GQSPI instance | ||
675 | * @spimode: spimode - SPI or DUAL or QUAD. | ||
676 | * Return: Mask to set desired SPI mode in GENFIFO entry. | ||
677 | */ | ||
678 | static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi, | ||
679 | u8 spimode) | ||
680 | { | ||
681 | u32 mask = 0; | ||
682 | |||
683 | switch (spimode) { | ||
684 | case GQSPI_SELECT_MODE_DUALSPI: | ||
685 | mask = GQSPI_GENFIFO_MODE_DUALSPI; | ||
686 | break; | ||
687 | case GQSPI_SELECT_MODE_QUADSPI: | ||
688 | mask = GQSPI_GENFIFO_MODE_QUADSPI; | ||
689 | break; | ||
690 | case GQSPI_SELECT_MODE_SPI: | ||
691 | mask = GQSPI_GENFIFO_MODE_SPI; | ||
692 | break; | ||
693 | default: | ||
694 | dev_warn(xqspi->dev, "Invalid SPI mode\n"); | ||
695 | } | ||
696 | |||
697 | return mask; | ||
698 | } | ||
699 | |||
700 | /** | ||
701 | * zynq_qspi_setuprxdma: This function sets up the RX DMA operation | ||
702 | * @xqspi: xqspi is a pointer to the GQSPI instance. | ||
703 | */ | ||
704 | static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi) | ||
705 | { | ||
706 | u32 rx_bytes, rx_rem, config_reg; | ||
707 | dma_addr_t addr; | ||
708 | u64 dma_align = (u64)(uintptr_t)xqspi->rxbuf; | ||
709 | |||
710 | if ((xqspi->bytes_to_receive < 8) || | ||
711 | ((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) { | ||
712 | /* Setting to IO mode */ | ||
713 | config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST); | ||
714 | config_reg &= ~GQSPI_CFG_MODE_EN_MASK; | ||
715 | zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); | ||
716 | xqspi->mode = GQSPI_MODE_IO; | ||
717 | xqspi->dma_rx_bytes = 0; | ||
718 | return; | ||
719 | } | ||
720 | |||
721 | rx_rem = xqspi->bytes_to_receive % 4; | ||
722 | rx_bytes = (xqspi->bytes_to_receive - rx_rem); | ||
723 | |||
724 | addr = dma_map_single(xqspi->dev, (void *)xqspi->rxbuf, | ||
725 | rx_bytes, DMA_FROM_DEVICE); | ||
726 | if (dma_mapping_error(xqspi->dev, addr)) | ||
727 | dev_err(xqspi->dev, "ERR:rxdma:memory not mapped\n"); | ||
728 | |||
729 | xqspi->dma_rx_bytes = rx_bytes; | ||
730 | xqspi->dma_addr = addr; | ||
731 | zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_OFST, | ||
732 | (u32)(addr & 0xffffffff)); | ||
733 | addr = ((addr >> 16) >> 16); | ||
734 | zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_MSB_OFST, | ||
735 | ((u32)addr) & 0xfff); | ||
736 | |||
737 | /* Enabling the DMA mode */ | ||
738 | config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST); | ||
739 | config_reg &= ~GQSPI_CFG_MODE_EN_MASK; | ||
740 | config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK; | ||
741 | zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); | ||
742 | |||
743 | /* Switch to DMA mode */ | ||
744 | xqspi->mode = GQSPI_MODE_DMA; | ||
745 | |||
746 | /* Write the number of bytes to transfer */ | ||
747 | zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes); | ||
748 | } | ||
749 | |||
750 | /** | ||
751 | * zynqmp_qspi_txrxsetup: This function checks the TX/RX buffers in | ||
752 | * the transfer and sets up the GENFIFO entries, | ||
753 | * TX FIFO as required. | ||
754 | * @xqspi: xqspi is a pointer to the GQSPI instance. | ||
755 | * @transfer: It is a pointer to the structure containing transfer data. | ||
756 | * @genfifoentry: genfifoentry is pointer to the variable in which | ||
757 | * GENFIFO mask is returned to calling function | ||
758 | */ | ||
759 | static void zynqmp_qspi_txrxsetup(struct zynqmp_qspi *xqspi, | ||
760 | struct spi_transfer *transfer, | ||
761 | u32 *genfifoentry) | ||
762 | { | ||
763 | u32 config_reg; | ||
764 | |||
765 | /* Transmit */ | ||
766 | if ((xqspi->txbuf != NULL) && (xqspi->rxbuf == NULL)) { | ||
767 | /* Setup data to be TXed */ | ||
768 | *genfifoentry &= ~GQSPI_GENFIFO_RX; | ||
769 | *genfifoentry |= GQSPI_GENFIFO_DATA_XFER; | ||
770 | *genfifoentry |= GQSPI_GENFIFO_TX; | ||
771 | *genfifoentry |= | ||
772 | zynqmp_qspi_selectspimode(xqspi, transfer->tx_nbits); | ||
773 | xqspi->bytes_to_transfer = transfer->len; | ||
774 | if (xqspi->mode == GQSPI_MODE_DMA) { | ||
775 | config_reg = zynqmp_gqspi_read(xqspi, | ||
776 | GQSPI_CONFIG_OFST); | ||
777 | config_reg &= ~GQSPI_CFG_MODE_EN_MASK; | ||
778 | zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, | ||
779 | config_reg); | ||
780 | xqspi->mode = GQSPI_MODE_IO; | ||
781 | } | ||
782 | zynqmp_qspi_filltxfifo(xqspi, GQSPI_TXD_DEPTH); | ||
783 | /* Discard RX data */ | ||
784 | xqspi->bytes_to_receive = 0; | ||
785 | } else if ((xqspi->txbuf == NULL) && (xqspi->rxbuf != NULL)) { | ||
786 | /* Receive */ | ||
787 | |||
788 | /* TX auto fill */ | ||
789 | *genfifoentry &= ~GQSPI_GENFIFO_TX; | ||
790 | /* Setup RX */ | ||
791 | *genfifoentry |= GQSPI_GENFIFO_DATA_XFER; | ||
792 | *genfifoentry |= GQSPI_GENFIFO_RX; | ||
793 | *genfifoentry |= | ||
794 | zynqmp_qspi_selectspimode(xqspi, transfer->rx_nbits); | ||
795 | xqspi->bytes_to_transfer = 0; | ||
796 | xqspi->bytes_to_receive = transfer->len; | ||
797 | zynq_qspi_setuprxdma(xqspi); | ||
798 | } | ||
799 | } | ||
800 | |||
801 | /** | ||
802 | * zynqmp_qspi_start_transfer: Initiates the QSPI transfer | ||
803 | * @master: Pointer to the spi_master structure which provides | ||
804 | * information about the controller. | ||
805 | * @qspi: Pointer to the spi_device structure | ||
806 | * @transfer: Pointer to the spi_transfer structure which provide information | ||
807 | * about next transfer parameters | ||
808 | * | ||
809 | * This function fills the TX FIFO, starts the QSPI transfer, and waits for the | ||
810 | * transfer to be completed. | ||
811 | * | ||
812 | * Return: Number of bytes transferred in the last transfer | ||
813 | */ | ||
814 | static int zynqmp_qspi_start_transfer(struct spi_master *master, | ||
815 | struct spi_device *qspi, | ||
816 | struct spi_transfer *transfer) | ||
817 | { | ||
818 | struct zynqmp_qspi *xqspi = spi_master_get_devdata(master); | ||
819 | u32 genfifoentry = 0x0, transfer_len; | ||
820 | |||
821 | xqspi->txbuf = transfer->tx_buf; | ||
822 | xqspi->rxbuf = transfer->rx_buf; | ||
823 | |||
824 | zynqmp_qspi_setup_transfer(qspi, transfer); | ||
825 | |||
826 | genfifoentry |= xqspi->genfifocs; | ||
827 | genfifoentry |= xqspi->genfifobus; | ||
828 | |||
829 | zynqmp_qspi_txrxsetup(xqspi, transfer, &genfifoentry); | ||
830 | |||
831 | if (xqspi->mode == GQSPI_MODE_DMA) | ||
832 | transfer_len = xqspi->dma_rx_bytes; | ||
833 | else | ||
834 | transfer_len = transfer->len; | ||
835 | |||
836 | xqspi->genfifoentry = genfifoentry; | ||
837 | if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) { | ||
838 | genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK; | ||
839 | genfifoentry |= transfer_len; | ||
840 | zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry); | ||
841 | } else { | ||
842 | int tempcount = transfer_len; | ||
843 | u32 exponent = 8; /* 2^8 = 256 */ | ||
844 | u8 imm_data = tempcount & 0xFF; | ||
845 | |||
846 | tempcount &= ~(tempcount & 0xFF); | ||
847 | /* Immediate entry */ | ||
848 | if (tempcount != 0) { | ||
849 | /* Exponent entries */ | ||
850 | genfifoentry |= GQSPI_GENFIFO_EXP; | ||
851 | while (tempcount != 0) { | ||
852 | if (tempcount & GQSPI_GENFIFO_EXP_START) { | ||
853 | genfifoentry &= | ||
854 | ~GQSPI_GENFIFO_IMM_DATA_MASK; | ||
855 | genfifoentry |= exponent; | ||
856 | zynqmp_gqspi_write(xqspi, | ||
857 | GQSPI_GEN_FIFO_OFST, | ||
858 | genfifoentry); | ||
859 | } | ||
860 | tempcount = tempcount >> 1; | ||
861 | exponent++; | ||
862 | } | ||
863 | } | ||
864 | if (imm_data != 0) { | ||
865 | genfifoentry &= ~GQSPI_GENFIFO_EXP; | ||
866 | genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK; | ||
867 | genfifoentry |= (u8) (imm_data & 0xFF); | ||
868 | zynqmp_gqspi_write(xqspi, | ||
869 | GQSPI_GEN_FIFO_OFST, genfifoentry); | ||
870 | } | ||
871 | } | ||
872 | |||
873 | if ((xqspi->mode == GQSPI_MODE_IO) && | ||
874 | (xqspi->rxbuf != NULL)) { | ||
875 | /* Dummy generic FIFO entry */ | ||
876 | zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0); | ||
877 | } | ||
878 | |||
879 | /* Since we are using manual mode */ | ||
880 | zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, | ||
881 | zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) | | ||
882 | GQSPI_CFG_START_GEN_FIFO_MASK); | ||
883 | |||
884 | if (xqspi->txbuf != NULL) | ||
885 | /* Enable interrupts for TX */ | ||
886 | zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST, | ||
887 | GQSPI_IER_TXEMPTY_MASK | | ||
888 | GQSPI_IER_GENFIFOEMPTY_MASK | | ||
889 | GQSPI_IER_TXNOT_FULL_MASK); | ||
890 | |||
891 | if (xqspi->rxbuf != NULL) { | ||
892 | /* Enable interrupts for RX */ | ||
893 | if (xqspi->mode == GQSPI_MODE_DMA) { | ||
894 | /* Enable DMA interrupts */ | ||
895 | zynqmp_gqspi_write(xqspi, | ||
896 | GQSPI_QSPIDMA_DST_I_EN_OFST, | ||
897 | GQSPI_QSPIDMA_DST_I_EN_DONE_MASK); | ||
898 | } else { | ||
899 | zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST, | ||
900 | GQSPI_IER_GENFIFOEMPTY_MASK | | ||
901 | GQSPI_IER_RXNEMPTY_MASK | | ||
902 | GQSPI_IER_RXEMPTY_MASK); | ||
903 | } | ||
904 | } | ||
905 | |||
906 | return transfer->len; | ||
907 | } | ||
908 | |||
909 | /** | ||
910 | * zynqmp_qspi_suspend: Suspend method for the QSPI driver | ||
911 | * @_dev: Address of the platform_device structure | ||
912 | * | ||
913 | * This function stops the QSPI driver queue and disables the QSPI controller | ||
914 | * | ||
915 | * Return: Always 0 | ||
916 | */ | ||
917 | static int __maybe_unused zynqmp_qspi_suspend(struct device *dev) | ||
918 | { | ||
919 | struct platform_device *pdev = container_of(dev, | ||
920 | struct platform_device, | ||
921 | dev); | ||
922 | struct spi_master *master = platform_get_drvdata(pdev); | ||
923 | |||
924 | spi_master_suspend(master); | ||
925 | |||
926 | zynqmp_unprepare_transfer_hardware(master); | ||
927 | |||
928 | return 0; | ||
929 | } | ||
930 | |||
931 | /** | ||
932 | * zynqmp_qspi_resume: Resume method for the QSPI driver | ||
933 | * @dev: Address of the platform_device structure | ||
934 | * | ||
935 | * The function starts the QSPI driver queue and initializes the QSPI | ||
936 | * controller | ||
937 | * | ||
938 | * Return: 0 on success; error value otherwise | ||
939 | */ | ||
940 | static int __maybe_unused zynqmp_qspi_resume(struct device *dev) | ||
941 | { | ||
942 | struct platform_device *pdev = container_of(dev, | ||
943 | struct platform_device, | ||
944 | dev); | ||
945 | struct spi_master *master = platform_get_drvdata(pdev); | ||
946 | struct zynqmp_qspi *xqspi = spi_master_get_devdata(master); | ||
947 | int ret = 0; | ||
948 | |||
949 | ret = clk_enable(xqspi->pclk); | ||
950 | if (ret) { | ||
951 | dev_err(dev, "Cannot enable APB clock.\n"); | ||
952 | return ret; | ||
953 | } | ||
954 | |||
955 | ret = clk_enable(xqspi->refclk); | ||
956 | if (ret) { | ||
957 | dev_err(dev, "Cannot enable device clock.\n"); | ||
958 | clk_disable(xqspi->pclk); | ||
959 | return ret; | ||
960 | } | ||
961 | |||
962 | spi_master_resume(master); | ||
963 | |||
964 | return 0; | ||
965 | } | ||
966 | |||
967 | static SIMPLE_DEV_PM_OPS(zynqmp_qspi_dev_pm_ops, zynqmp_qspi_suspend, | ||
968 | zynqmp_qspi_resume); | ||
969 | |||
970 | /** | ||
971 | * zynqmp_qspi_probe: Probe method for the QSPI driver | ||
972 | * @pdev: Pointer to the platform_device structure | ||
973 | * | ||
974 | * This function initializes the driver data structures and the hardware. | ||
975 | * | ||
976 | * Return: 0 on success; error value otherwise | ||
977 | */ | ||
978 | static int zynqmp_qspi_probe(struct platform_device *pdev) | ||
979 | { | ||
980 | int ret = 0; | ||
981 | struct spi_master *master; | ||
982 | struct zynqmp_qspi *xqspi; | ||
983 | struct resource *res; | ||
984 | struct device *dev = &pdev->dev; | ||
985 | |||
986 | master = spi_alloc_master(&pdev->dev, sizeof(*xqspi)); | ||
987 | if (!master) | ||
988 | return -ENOMEM; | ||
989 | |||
990 | xqspi = spi_master_get_devdata(master); | ||
991 | master->dev.of_node = pdev->dev.of_node; | ||
992 | platform_set_drvdata(pdev, master); | ||
993 | |||
994 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
995 | xqspi->regs = devm_ioremap_resource(&pdev->dev, res); | ||
996 | if (IS_ERR(xqspi->regs)) { | ||
997 | ret = PTR_ERR(xqspi->regs); | ||
998 | goto remove_master; | ||
999 | } | ||
1000 | |||
1001 | xqspi->dev = dev; | ||
1002 | xqspi->pclk = devm_clk_get(&pdev->dev, "pclk"); | ||
1003 | if (IS_ERR(xqspi->pclk)) { | ||
1004 | dev_err(dev, "pclk clock not found.\n"); | ||
1005 | ret = PTR_ERR(xqspi->pclk); | ||
1006 | goto remove_master; | ||
1007 | } | ||
1008 | |||
1009 | ret = clk_prepare_enable(xqspi->pclk); | ||
1010 | if (ret) { | ||
1011 | dev_err(dev, "Unable to enable APB clock.\n"); | ||
1012 | goto remove_master; | ||
1013 | } | ||
1014 | |||
1015 | xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk"); | ||
1016 | if (IS_ERR(xqspi->refclk)) { | ||
1017 | dev_err(dev, "ref_clk clock not found.\n"); | ||
1018 | ret = PTR_ERR(xqspi->refclk); | ||
1019 | goto clk_dis_pclk; | ||
1020 | } | ||
1021 | |||
1022 | ret = clk_prepare_enable(xqspi->refclk); | ||
1023 | if (ret) { | ||
1024 | dev_err(dev, "Unable to enable device clock.\n"); | ||
1025 | goto clk_dis_pclk; | ||
1026 | } | ||
1027 | |||
1028 | /* QSPI controller initializations */ | ||
1029 | zynqmp_qspi_init_hw(xqspi); | ||
1030 | |||
1031 | xqspi->irq = platform_get_irq(pdev, 0); | ||
1032 | if (xqspi->irq <= 0) { | ||
1033 | ret = -ENXIO; | ||
1034 | dev_err(dev, "irq resource not found\n"); | ||
1035 | goto clk_dis_all; | ||
1036 | } | ||
1037 | ret = devm_request_irq(&pdev->dev, xqspi->irq, zynqmp_qspi_irq, | ||
1038 | 0, pdev->name, master); | ||
1039 | if (ret != 0) { | ||
1040 | ret = -ENXIO; | ||
1041 | dev_err(dev, "request_irq failed\n"); | ||
1042 | goto clk_dis_all; | ||
1043 | } | ||
1044 | |||
1045 | master->num_chipselect = GQSPI_DEFAULT_NUM_CS; | ||
1046 | |||
1047 | master->setup = zynqmp_qspi_setup; | ||
1048 | master->set_cs = zynqmp_qspi_chipselect; | ||
1049 | master->transfer_one = zynqmp_qspi_start_transfer; | ||
1050 | master->prepare_transfer_hardware = zynqmp_prepare_transfer_hardware; | ||
1051 | master->unprepare_transfer_hardware = | ||
1052 | zynqmp_unprepare_transfer_hardware; | ||
1053 | master->max_speed_hz = clk_get_rate(xqspi->refclk) / 2; | ||
1054 | master->bits_per_word_mask = SPI_BPW_MASK(8); | ||
1055 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD | | ||
1056 | SPI_TX_DUAL | SPI_TX_QUAD; | ||
1057 | |||
1058 | if (master->dev.parent == NULL) | ||
1059 | master->dev.parent = &master->dev; | ||
1060 | |||
1061 | ret = spi_register_master(master); | ||
1062 | if (ret) | ||
1063 | goto clk_dis_all; | ||
1064 | |||
1065 | return 0; | ||
1066 | |||
1067 | clk_dis_all: | ||
1068 | clk_disable_unprepare(xqspi->refclk); | ||
1069 | clk_dis_pclk: | ||
1070 | clk_disable_unprepare(xqspi->pclk); | ||
1071 | remove_master: | ||
1072 | spi_master_put(master); | ||
1073 | |||
1074 | return ret; | ||
1075 | } | ||
1076 | |||
1077 | /** | ||
1078 | * zynqmp_qspi_remove: Remove method for the QSPI driver | ||
1079 | * @pdev: Pointer to the platform_device structure | ||
1080 | * | ||
1081 | * This function is called if a device is physically removed from the system or | ||
1082 | * if the driver module is being unloaded. It frees all resources allocated to | ||
1083 | * the device. | ||
1084 | * | ||
1085 | * Return: 0 Always | ||
1086 | */ | ||
1087 | static int zynqmp_qspi_remove(struct platform_device *pdev) | ||
1088 | { | ||
1089 | struct spi_master *master = platform_get_drvdata(pdev); | ||
1090 | struct zynqmp_qspi *xqspi = spi_master_get_devdata(master); | ||
1091 | |||
1092 | zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0); | ||
1093 | clk_disable_unprepare(xqspi->refclk); | ||
1094 | clk_disable_unprepare(xqspi->pclk); | ||
1095 | |||
1096 | spi_unregister_master(master); | ||
1097 | |||
1098 | return 0; | ||
1099 | } | ||
1100 | |||
1101 | static const struct of_device_id zynqmp_qspi_of_match[] = { | ||
1102 | { .compatible = "xlnx,zynqmp-qspi-1.0", }, | ||
1103 | { /* End of table */ } | ||
1104 | }; | ||
1105 | |||
1106 | MODULE_DEVICE_TABLE(of, zynqmp_qspi_of_match); | ||
1107 | |||
1108 | static struct platform_driver zynqmp_qspi_driver = { | ||
1109 | .probe = zynqmp_qspi_probe, | ||
1110 | .remove = zynqmp_qspi_remove, | ||
1111 | .driver = { | ||
1112 | .name = "zynqmp-qspi", | ||
1113 | .of_match_table = zynqmp_qspi_of_match, | ||
1114 | .pm = &zynqmp_qspi_dev_pm_ops, | ||
1115 | }, | ||
1116 | }; | ||
1117 | |||
1118 | module_platform_driver(zynqmp_qspi_driver); | ||
1119 | |||
1120 | MODULE_AUTHOR("Xilinx, Inc."); | ||
1121 | MODULE_DESCRIPTION("Xilinx Zynqmp QSPI driver"); | ||
1122 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 92c909eed6b5..dd616ff0ffc5 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c | |||
@@ -95,37 +95,25 @@ MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message"); | |||
95 | 95 | ||
96 | /*-------------------------------------------------------------------------*/ | 96 | /*-------------------------------------------------------------------------*/ |
97 | 97 | ||
98 | /* | ||
99 | * We can't use the standard synchronous wrappers for file I/O; we | ||
100 | * need to protect against async removal of the underlying spi_device. | ||
101 | */ | ||
102 | static void spidev_complete(void *arg) | ||
103 | { | ||
104 | complete(arg); | ||
105 | } | ||
106 | |||
107 | static ssize_t | 98 | static ssize_t |
108 | spidev_sync(struct spidev_data *spidev, struct spi_message *message) | 99 | spidev_sync(struct spidev_data *spidev, struct spi_message *message) |
109 | { | 100 | { |
110 | DECLARE_COMPLETION_ONSTACK(done); | 101 | DECLARE_COMPLETION_ONSTACK(done); |
111 | int status; | 102 | int status; |
112 | 103 | struct spi_device *spi; | |
113 | message->complete = spidev_complete; | ||
114 | message->context = &done; | ||
115 | 104 | ||
116 | spin_lock_irq(&spidev->spi_lock); | 105 | spin_lock_irq(&spidev->spi_lock); |
117 | if (spidev->spi == NULL) | 106 | spi = spidev->spi; |
107 | spin_unlock_irq(&spidev->spi_lock); | ||
108 | |||
109 | if (spi == NULL) | ||
118 | status = -ESHUTDOWN; | 110 | status = -ESHUTDOWN; |
119 | else | 111 | else |
120 | status = spi_async(spidev->spi, message); | 112 | status = spi_sync(spi, message); |
121 | spin_unlock_irq(&spidev->spi_lock); | 113 | |
114 | if (status == 0) | ||
115 | status = message->actual_length; | ||
122 | 116 | ||
123 | if (status == 0) { | ||
124 | wait_for_completion(&done); | ||
125 | status = message->status; | ||
126 | if (status == 0) | ||
127 | status = message->actual_length; | ||
128 | } | ||
129 | return status; | 117 | return status; |
130 | } | 118 | } |
131 | 119 | ||
@@ -647,7 +635,6 @@ err_find_dev: | |||
647 | static int spidev_release(struct inode *inode, struct file *filp) | 635 | static int spidev_release(struct inode *inode, struct file *filp) |
648 | { | 636 | { |
649 | struct spidev_data *spidev; | 637 | struct spidev_data *spidev; |
650 | int status = 0; | ||
651 | 638 | ||
652 | mutex_lock(&device_list_lock); | 639 | mutex_lock(&device_list_lock); |
653 | spidev = filp->private_data; | 640 | spidev = filp->private_data; |
@@ -676,7 +663,7 @@ static int spidev_release(struct inode *inode, struct file *filp) | |||
676 | } | 663 | } |
677 | mutex_unlock(&device_list_lock); | 664 | mutex_unlock(&device_list_lock); |
678 | 665 | ||
679 | return status; | 666 | return 0; |
680 | } | 667 | } |
681 | 668 | ||
682 | static const struct file_operations spidev_fops = { | 669 | static const struct file_operations spidev_fops = { |