diff options
author | Grant Likely <grant.likely@secretlab.ca> | 2013-02-09 11:02:44 -0500 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2013-02-09 11:02:44 -0500 |
commit | 0d73299ddf1f4c3ea9f8606b49c4346871a3f139 (patch) | |
tree | 5b6a02093f8e9838346536c5805bcc5ff69dba98 /drivers/spi | |
parent | 7410e848583f9120dd5f9414629f01bb76b5ee5f (diff) | |
parent | a3496855d9f1948d1b977afe8bd922725ded05d5 (diff) |
Merge branch spi-next from git://git.kernel.org/pub/scm/linux/kernel/git/broonie/misc.git
Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/spi')
-rw-r--r-- | drivers/spi/Kconfig | 13 | ||||
-rw-r--r-- | drivers/spi/Makefile | 5 | ||||
-rw-r--r-- | drivers/spi/spi-pxa2xx-dma.c | 392 | ||||
-rw-r--r-- | drivers/spi/spi-pxa2xx-pxadma.c | 490 | ||||
-rw-r--r-- | drivers/spi/spi-pxa2xx.c | 908 | ||||
-rw-r--r-- | drivers/spi/spi-pxa2xx.h | 221 |
6 files changed, 1451 insertions, 578 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index a90393d7f106..e79884e997ae 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -297,9 +297,20 @@ config SPI_PPC4xx | |||
297 | help | 297 | help |
298 | This selects a driver for the PPC4xx SPI Controller. | 298 | This selects a driver for the PPC4xx SPI Controller. |
299 | 299 | ||
300 | config SPI_PXA2XX_PXADMA | ||
301 | bool "PXA2xx SSP legacy PXA DMA API support" | ||
302 | depends on SPI_PXA2XX && ARCH_PXA | ||
303 | help | ||
304 | Enable PXA private legacy DMA API support. Note that this is | ||
305 | deprecated in favor of generic DMA engine API. | ||
306 | |||
307 | config SPI_PXA2XX_DMA | ||
308 | def_bool y | ||
309 | depends on SPI_PXA2XX && !SPI_PXA2XX_PXADMA | ||
310 | |||
300 | config SPI_PXA2XX | 311 | config SPI_PXA2XX |
301 | tristate "PXA2xx SSP SPI master" | 312 | tristate "PXA2xx SSP SPI master" |
302 | depends on ARCH_PXA || PCI | 313 | depends on ARCH_PXA || PCI || ACPI |
303 | select PXA_SSP if ARCH_PXA | 314 | select PXA_SSP if ARCH_PXA |
304 | help | 315 | help |
305 | This enables using a PXA2xx or Sodaville SSP port as a SPI master | 316 | This enables using a PXA2xx or Sodaville SSP port as a SPI master |
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 64e970ba261c..e53c30941340 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile | |||
@@ -47,7 +47,10 @@ obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o | |||
47 | obj-$(CONFIG_SPI_ORION) += spi-orion.o | 47 | obj-$(CONFIG_SPI_ORION) += spi-orion.o |
48 | obj-$(CONFIG_SPI_PL022) += spi-pl022.o | 48 | obj-$(CONFIG_SPI_PL022) += spi-pl022.o |
49 | obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o | 49 | obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o |
50 | obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx.o | 50 | spi-pxa2xx-platform-objs := spi-pxa2xx.o |
51 | spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_PXADMA) += spi-pxa2xx-pxadma.o | ||
52 | spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o | ||
53 | obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o | ||
51 | obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o | 54 | obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o |
52 | obj-$(CONFIG_SPI_RSPI) += spi-rspi.o | 55 | obj-$(CONFIG_SPI_RSPI) += spi-rspi.o |
53 | obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o | 56 | obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o |
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c new file mode 100644 index 000000000000..c735c5a008a2 --- /dev/null +++ b/drivers/spi/spi-pxa2xx-dma.c | |||
@@ -0,0 +1,392 @@ | |||
1 | /* | ||
2 | * PXA2xx SPI DMA engine support. | ||
3 | * | ||
4 | * Copyright (C) 2013, Intel Corporation | ||
5 | * Author: Mika Westerberg <mika.westerberg@linux.intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/dma-mapping.h> | ||
15 | #include <linux/dmaengine.h> | ||
16 | #include <linux/pxa2xx_ssp.h> | ||
17 | #include <linux/scatterlist.h> | ||
18 | #include <linux/sizes.h> | ||
19 | #include <linux/spi/spi.h> | ||
20 | #include <linux/spi/pxa2xx_spi.h> | ||
21 | |||
22 | #include "spi-pxa2xx.h" | ||
23 | |||
24 | static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data, | ||
25 | enum dma_data_direction dir) | ||
26 | { | ||
27 | int i, nents, len = drv_data->len; | ||
28 | struct scatterlist *sg; | ||
29 | struct device *dmadev; | ||
30 | struct sg_table *sgt; | ||
31 | void *buf, *pbuf; | ||
32 | |||
33 | /* | ||
34 | * Some DMA controllers have problems transferring buffers that are | ||
35 | * not multiple of 4 bytes. So we truncate the transfer so that it | ||
36 | * is suitable for such controllers, and handle the trailing bytes | ||
37 | * manually after the DMA completes. | ||
38 | * | ||
39 | * REVISIT: It would be better if this information could be | ||
40 | * retrieved directly from the DMA device in a similar way than | ||
41 | * ->copy_align etc. is done. | ||
42 | */ | ||
43 | len = ALIGN(drv_data->len, 4); | ||
44 | |||
45 | if (dir == DMA_TO_DEVICE) { | ||
46 | dmadev = drv_data->tx_chan->device->dev; | ||
47 | sgt = &drv_data->tx_sgt; | ||
48 | buf = drv_data->tx; | ||
49 | drv_data->tx_map_len = len; | ||
50 | } else { | ||
51 | dmadev = drv_data->rx_chan->device->dev; | ||
52 | sgt = &drv_data->rx_sgt; | ||
53 | buf = drv_data->rx; | ||
54 | drv_data->rx_map_len = len; | ||
55 | } | ||
56 | |||
57 | nents = DIV_ROUND_UP(len, SZ_2K); | ||
58 | if (nents != sgt->nents) { | ||
59 | int ret; | ||
60 | |||
61 | sg_free_table(sgt); | ||
62 | ret = sg_alloc_table(sgt, nents, GFP_KERNEL); | ||
63 | if (ret) | ||
64 | return ret; | ||
65 | } | ||
66 | |||
67 | pbuf = buf; | ||
68 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
69 | size_t bytes = min_t(size_t, len, SZ_2K); | ||
70 | |||
71 | if (buf) | ||
72 | sg_set_buf(sg, pbuf, bytes); | ||
73 | else | ||
74 | sg_set_buf(sg, drv_data->dummy, bytes); | ||
75 | |||
76 | pbuf += bytes; | ||
77 | len -= bytes; | ||
78 | } | ||
79 | |||
80 | nents = dma_map_sg(dmadev, sgt->sgl, sgt->nents, dir); | ||
81 | if (!nents) | ||
82 | return -ENOMEM; | ||
83 | |||
84 | return nents; | ||
85 | } | ||
86 | |||
87 | static void pxa2xx_spi_unmap_dma_buffer(struct driver_data *drv_data, | ||
88 | enum dma_data_direction dir) | ||
89 | { | ||
90 | struct device *dmadev; | ||
91 | struct sg_table *sgt; | ||
92 | |||
93 | if (dir == DMA_TO_DEVICE) { | ||
94 | dmadev = drv_data->tx_chan->device->dev; | ||
95 | sgt = &drv_data->tx_sgt; | ||
96 | } else { | ||
97 | dmadev = drv_data->rx_chan->device->dev; | ||
98 | sgt = &drv_data->rx_sgt; | ||
99 | } | ||
100 | |||
101 | dma_unmap_sg(dmadev, sgt->sgl, sgt->nents, dir); | ||
102 | } | ||
103 | |||
104 | static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data) | ||
105 | { | ||
106 | if (!drv_data->dma_mapped) | ||
107 | return; | ||
108 | |||
109 | pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_FROM_DEVICE); | ||
110 | pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE); | ||
111 | |||
112 | drv_data->dma_mapped = 0; | ||
113 | } | ||
114 | |||
115 | static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, | ||
116 | bool error) | ||
117 | { | ||
118 | struct spi_message *msg = drv_data->cur_msg; | ||
119 | |||
120 | /* | ||
121 | * It is possible that one CPU is handling ROR interrupt and other | ||
122 | * just gets DMA completion. Calling pump_transfers() twice for the | ||
123 | * same transfer leads to problems thus we prevent concurrent calls | ||
124 | * by using ->dma_running. | ||
125 | */ | ||
126 | if (atomic_dec_and_test(&drv_data->dma_running)) { | ||
127 | void __iomem *reg = drv_data->ioaddr; | ||
128 | |||
129 | /* | ||
130 | * If the other CPU is still handling the ROR interrupt we | ||
131 | * might not know about the error yet. So we re-check the | ||
132 | * ROR bit here before we clear the status register. | ||
133 | */ | ||
134 | if (!error) { | ||
135 | u32 status = read_SSSR(reg) & drv_data->mask_sr; | ||
136 | error = status & SSSR_ROR; | ||
137 | } | ||
138 | |||
139 | /* Clear status & disable interrupts */ | ||
140 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
141 | write_SSSR_CS(drv_data, drv_data->clear_sr); | ||
142 | if (!pxa25x_ssp_comp(drv_data)) | ||
143 | write_SSTO(0, reg); | ||
144 | |||
145 | if (!error) { | ||
146 | pxa2xx_spi_unmap_dma_buffers(drv_data); | ||
147 | |||
148 | /* Handle the last bytes of unaligned transfer */ | ||
149 | drv_data->tx += drv_data->tx_map_len; | ||
150 | drv_data->write(drv_data); | ||
151 | |||
152 | drv_data->rx += drv_data->rx_map_len; | ||
153 | drv_data->read(drv_data); | ||
154 | |||
155 | msg->actual_length += drv_data->len; | ||
156 | msg->state = pxa2xx_spi_next_transfer(drv_data); | ||
157 | } else { | ||
158 | /* In case we got an error we disable the SSP now */ | ||
159 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | ||
160 | |||
161 | msg->state = ERROR_STATE; | ||
162 | } | ||
163 | |||
164 | tasklet_schedule(&drv_data->pump_transfers); | ||
165 | } | ||
166 | } | ||
167 | |||
168 | static void pxa2xx_spi_dma_callback(void *data) | ||
169 | { | ||
170 | pxa2xx_spi_dma_transfer_complete(data, false); | ||
171 | } | ||
172 | |||
173 | static struct dma_async_tx_descriptor * | ||
174 | pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, | ||
175 | enum dma_transfer_direction dir) | ||
176 | { | ||
177 | struct pxa2xx_spi_master *pdata = drv_data->master_info; | ||
178 | struct chip_data *chip = drv_data->cur_chip; | ||
179 | enum dma_slave_buswidth width; | ||
180 | struct dma_slave_config cfg; | ||
181 | struct dma_chan *chan; | ||
182 | struct sg_table *sgt; | ||
183 | int nents, ret; | ||
184 | |||
185 | switch (drv_data->n_bytes) { | ||
186 | case 1: | ||
187 | width = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
188 | break; | ||
189 | case 2: | ||
190 | width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
191 | break; | ||
192 | default: | ||
193 | width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
194 | break; | ||
195 | } | ||
196 | |||
197 | memset(&cfg, 0, sizeof(cfg)); | ||
198 | cfg.direction = dir; | ||
199 | |||
200 | if (dir == DMA_MEM_TO_DEV) { | ||
201 | cfg.dst_addr = drv_data->ssdr_physical; | ||
202 | cfg.dst_addr_width = width; | ||
203 | cfg.dst_maxburst = chip->dma_burst_size; | ||
204 | cfg.slave_id = pdata->tx_slave_id; | ||
205 | |||
206 | sgt = &drv_data->tx_sgt; | ||
207 | nents = drv_data->tx_nents; | ||
208 | chan = drv_data->tx_chan; | ||
209 | } else { | ||
210 | cfg.src_addr = drv_data->ssdr_physical; | ||
211 | cfg.src_addr_width = width; | ||
212 | cfg.src_maxburst = chip->dma_burst_size; | ||
213 | cfg.slave_id = pdata->rx_slave_id; | ||
214 | |||
215 | sgt = &drv_data->rx_sgt; | ||
216 | nents = drv_data->rx_nents; | ||
217 | chan = drv_data->rx_chan; | ||
218 | } | ||
219 | |||
220 | ret = dmaengine_slave_config(chan, &cfg); | ||
221 | if (ret) { | ||
222 | dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n"); | ||
223 | return NULL; | ||
224 | } | ||
225 | |||
226 | return dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, | ||
227 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
228 | } | ||
229 | |||
230 | static bool pxa2xx_spi_dma_filter(struct dma_chan *chan, void *param) | ||
231 | { | ||
232 | const struct pxa2xx_spi_master *pdata = param; | ||
233 | |||
234 | return chan->chan_id == pdata->tx_chan_id || | ||
235 | chan->chan_id == pdata->rx_chan_id; | ||
236 | } | ||
237 | |||
238 | bool pxa2xx_spi_dma_is_possible(size_t len) | ||
239 | { | ||
240 | return len <= MAX_DMA_LEN; | ||
241 | } | ||
242 | |||
243 | int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data) | ||
244 | { | ||
245 | const struct chip_data *chip = drv_data->cur_chip; | ||
246 | int ret; | ||
247 | |||
248 | if (!chip->enable_dma) | ||
249 | return 0; | ||
250 | |||
251 | /* Don't bother with DMA if we can't do even a single burst */ | ||
252 | if (drv_data->len < chip->dma_burst_size) | ||
253 | return 0; | ||
254 | |||
255 | ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_TO_DEVICE); | ||
256 | if (ret <= 0) { | ||
257 | dev_warn(&drv_data->pdev->dev, "failed to DMA map TX\n"); | ||
258 | return 0; | ||
259 | } | ||
260 | |||
261 | drv_data->tx_nents = ret; | ||
262 | |||
263 | ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_FROM_DEVICE); | ||
264 | if (ret <= 0) { | ||
265 | pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE); | ||
266 | dev_warn(&drv_data->pdev->dev, "failed to DMA map RX\n"); | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | drv_data->rx_nents = ret; | ||
271 | return 1; | ||
272 | } | ||
273 | |||
274 | irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) | ||
275 | { | ||
276 | u32 status; | ||
277 | |||
278 | status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr; | ||
279 | if (status & SSSR_ROR) { | ||
280 | dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); | ||
281 | |||
282 | dmaengine_terminate_all(drv_data->rx_chan); | ||
283 | dmaengine_terminate_all(drv_data->tx_chan); | ||
284 | |||
285 | pxa2xx_spi_dma_transfer_complete(drv_data, true); | ||
286 | return IRQ_HANDLED; | ||
287 | } | ||
288 | |||
289 | return IRQ_NONE; | ||
290 | } | ||
291 | |||
292 | int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst) | ||
293 | { | ||
294 | struct dma_async_tx_descriptor *tx_desc, *rx_desc; | ||
295 | |||
296 | tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV); | ||
297 | if (!tx_desc) { | ||
298 | dev_err(&drv_data->pdev->dev, | ||
299 | "failed to get DMA TX descriptor\n"); | ||
300 | return -EBUSY; | ||
301 | } | ||
302 | |||
303 | rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM); | ||
304 | if (!rx_desc) { | ||
305 | dev_err(&drv_data->pdev->dev, | ||
306 | "failed to get DMA RX descriptor\n"); | ||
307 | return -EBUSY; | ||
308 | } | ||
309 | |||
310 | /* We are ready when RX completes */ | ||
311 | rx_desc->callback = pxa2xx_spi_dma_callback; | ||
312 | rx_desc->callback_param = drv_data; | ||
313 | |||
314 | dmaengine_submit(rx_desc); | ||
315 | dmaengine_submit(tx_desc); | ||
316 | return 0; | ||
317 | } | ||
318 | |||
319 | void pxa2xx_spi_dma_start(struct driver_data *drv_data) | ||
320 | { | ||
321 | dma_async_issue_pending(drv_data->rx_chan); | ||
322 | dma_async_issue_pending(drv_data->tx_chan); | ||
323 | |||
324 | atomic_set(&drv_data->dma_running, 1); | ||
325 | } | ||
326 | |||
327 | int pxa2xx_spi_dma_setup(struct driver_data *drv_data) | ||
328 | { | ||
329 | struct pxa2xx_spi_master *pdata = drv_data->master_info; | ||
330 | dma_cap_mask_t mask; | ||
331 | |||
332 | dma_cap_zero(mask); | ||
333 | dma_cap_set(DMA_SLAVE, mask); | ||
334 | |||
335 | drv_data->dummy = devm_kzalloc(&drv_data->pdev->dev, SZ_2K, GFP_KERNEL); | ||
336 | if (!drv_data->dummy) | ||
337 | return -ENOMEM; | ||
338 | |||
339 | drv_data->tx_chan = dma_request_channel(mask, pxa2xx_spi_dma_filter, | ||
340 | pdata); | ||
341 | if (!drv_data->tx_chan) | ||
342 | return -ENODEV; | ||
343 | |||
344 | drv_data->rx_chan = dma_request_channel(mask, pxa2xx_spi_dma_filter, | ||
345 | pdata); | ||
346 | if (!drv_data->rx_chan) { | ||
347 | dma_release_channel(drv_data->tx_chan); | ||
348 | drv_data->tx_chan = NULL; | ||
349 | return -ENODEV; | ||
350 | } | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | void pxa2xx_spi_dma_release(struct driver_data *drv_data) | ||
356 | { | ||
357 | if (drv_data->rx_chan) { | ||
358 | dmaengine_terminate_all(drv_data->rx_chan); | ||
359 | dma_release_channel(drv_data->rx_chan); | ||
360 | sg_free_table(&drv_data->rx_sgt); | ||
361 | drv_data->rx_chan = NULL; | ||
362 | } | ||
363 | if (drv_data->tx_chan) { | ||
364 | dmaengine_terminate_all(drv_data->tx_chan); | ||
365 | dma_release_channel(drv_data->tx_chan); | ||
366 | sg_free_table(&drv_data->tx_sgt); | ||
367 | drv_data->tx_chan = NULL; | ||
368 | } | ||
369 | } | ||
370 | |||
371 | void pxa2xx_spi_dma_resume(struct driver_data *drv_data) | ||
372 | { | ||
373 | } | ||
374 | |||
375 | int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, | ||
376 | struct spi_device *spi, | ||
377 | u8 bits_per_word, u32 *burst_code, | ||
378 | u32 *threshold) | ||
379 | { | ||
380 | struct pxa2xx_spi_chip *chip_info = spi->controller_data; | ||
381 | |||
382 | /* | ||
383 | * If the DMA burst size is given in chip_info we use that, | ||
384 | * otherwise we use the default. Also we use the default FIFO | ||
385 | * thresholds for now. | ||
386 | */ | ||
387 | *burst_code = chip_info ? chip_info->dma_burst_size : 16; | ||
388 | *threshold = SSCR1_RxTresh(RX_THRESH_DFLT) | ||
389 | | SSCR1_TxTresh(TX_THRESH_DFLT); | ||
390 | |||
391 | return 0; | ||
392 | } | ||
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c new file mode 100644 index 000000000000..2916efc7cfe5 --- /dev/null +++ b/drivers/spi/spi-pxa2xx-pxadma.c | |||
@@ -0,0 +1,490 @@ | |||
1 | /* | ||
2 | * PXA2xx SPI private DMA support. | ||
3 | * | ||
4 | * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | |||
21 | #include <linux/init.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/device.h> | ||
24 | #include <linux/dma-mapping.h> | ||
25 | #include <linux/pxa2xx_ssp.h> | ||
26 | #include <linux/spi/spi.h> | ||
27 | #include <linux/spi/pxa2xx_spi.h> | ||
28 | |||
29 | #include "spi-pxa2xx.h" | ||
30 | |||
31 | #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) | ||
32 | #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) | ||
33 | |||
34 | bool pxa2xx_spi_dma_is_possible(size_t len) | ||
35 | { | ||
36 | /* Try to map dma buffer and do a dma transfer if successful, but | ||
37 | * only if the length is non-zero and less than MAX_DMA_LEN. | ||
38 | * | ||
39 | * Zero-length non-descriptor DMA is illegal on PXA2xx; force use | ||
40 | * of PIO instead. Care is needed above because the transfer may | ||
41 | * have have been passed with buffers that are already dma mapped. | ||
42 | * A zero-length transfer in PIO mode will not try to write/read | ||
43 | * to/from the buffers | ||
44 | * | ||
45 | * REVISIT large transfers are exactly where we most want to be | ||
46 | * using DMA. If this happens much, split those transfers into | ||
47 | * multiple DMA segments rather than forcing PIO. | ||
48 | */ | ||
49 | return len > 0 && len <= MAX_DMA_LEN; | ||
50 | } | ||
51 | |||
52 | int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data) | ||
53 | { | ||
54 | struct spi_message *msg = drv_data->cur_msg; | ||
55 | struct device *dev = &msg->spi->dev; | ||
56 | |||
57 | if (!drv_data->cur_chip->enable_dma) | ||
58 | return 0; | ||
59 | |||
60 | if (msg->is_dma_mapped) | ||
61 | return drv_data->rx_dma && drv_data->tx_dma; | ||
62 | |||
63 | if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) | ||
64 | return 0; | ||
65 | |||
66 | /* Modify setup if rx buffer is null */ | ||
67 | if (drv_data->rx == NULL) { | ||
68 | *drv_data->null_dma_buf = 0; | ||
69 | drv_data->rx = drv_data->null_dma_buf; | ||
70 | drv_data->rx_map_len = 4; | ||
71 | } else | ||
72 | drv_data->rx_map_len = drv_data->len; | ||
73 | |||
74 | |||
75 | /* Modify setup if tx buffer is null */ | ||
76 | if (drv_data->tx == NULL) { | ||
77 | *drv_data->null_dma_buf = 0; | ||
78 | drv_data->tx = drv_data->null_dma_buf; | ||
79 | drv_data->tx_map_len = 4; | ||
80 | } else | ||
81 | drv_data->tx_map_len = drv_data->len; | ||
82 | |||
83 | /* Stream map the tx buffer. Always do DMA_TO_DEVICE first | ||
84 | * so we flush the cache *before* invalidating it, in case | ||
85 | * the tx and rx buffers overlap. | ||
86 | */ | ||
87 | drv_data->tx_dma = dma_map_single(dev, drv_data->tx, | ||
88 | drv_data->tx_map_len, DMA_TO_DEVICE); | ||
89 | if (dma_mapping_error(dev, drv_data->tx_dma)) | ||
90 | return 0; | ||
91 | |||
92 | /* Stream map the rx buffer */ | ||
93 | drv_data->rx_dma = dma_map_single(dev, drv_data->rx, | ||
94 | drv_data->rx_map_len, DMA_FROM_DEVICE); | ||
95 | if (dma_mapping_error(dev, drv_data->rx_dma)) { | ||
96 | dma_unmap_single(dev, drv_data->tx_dma, | ||
97 | drv_data->tx_map_len, DMA_TO_DEVICE); | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | return 1; | ||
102 | } | ||
103 | |||
104 | static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data) | ||
105 | { | ||
106 | struct device *dev; | ||
107 | |||
108 | if (!drv_data->dma_mapped) | ||
109 | return; | ||
110 | |||
111 | if (!drv_data->cur_msg->is_dma_mapped) { | ||
112 | dev = &drv_data->cur_msg->spi->dev; | ||
113 | dma_unmap_single(dev, drv_data->rx_dma, | ||
114 | drv_data->rx_map_len, DMA_FROM_DEVICE); | ||
115 | dma_unmap_single(dev, drv_data->tx_dma, | ||
116 | drv_data->tx_map_len, DMA_TO_DEVICE); | ||
117 | } | ||
118 | |||
119 | drv_data->dma_mapped = 0; | ||
120 | } | ||
121 | |||
122 | static int wait_ssp_rx_stall(void const __iomem *ioaddr) | ||
123 | { | ||
124 | unsigned long limit = loops_per_jiffy << 1; | ||
125 | |||
126 | while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) | ||
127 | cpu_relax(); | ||
128 | |||
129 | return limit; | ||
130 | } | ||
131 | |||
132 | static int wait_dma_channel_stop(int channel) | ||
133 | { | ||
134 | unsigned long limit = loops_per_jiffy << 1; | ||
135 | |||
136 | while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit) | ||
137 | cpu_relax(); | ||
138 | |||
139 | return limit; | ||
140 | } | ||
141 | |||
142 | static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data, | ||
143 | const char *msg) | ||
144 | { | ||
145 | void __iomem *reg = drv_data->ioaddr; | ||
146 | |||
147 | /* Stop and reset */ | ||
148 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
149 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
150 | write_SSSR_CS(drv_data, drv_data->clear_sr); | ||
151 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
152 | if (!pxa25x_ssp_comp(drv_data)) | ||
153 | write_SSTO(0, reg); | ||
154 | pxa2xx_spi_flush(drv_data); | ||
155 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | ||
156 | |||
157 | pxa2xx_spi_unmap_dma_buffers(drv_data); | ||
158 | |||
159 | dev_err(&drv_data->pdev->dev, "%s\n", msg); | ||
160 | |||
161 | drv_data->cur_msg->state = ERROR_STATE; | ||
162 | tasklet_schedule(&drv_data->pump_transfers); | ||
163 | } | ||
164 | |||
165 | static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data) | ||
166 | { | ||
167 | void __iomem *reg = drv_data->ioaddr; | ||
168 | struct spi_message *msg = drv_data->cur_msg; | ||
169 | |||
170 | /* Clear and disable interrupts on SSP and DMA channels*/ | ||
171 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
172 | write_SSSR_CS(drv_data, drv_data->clear_sr); | ||
173 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
174 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
175 | |||
176 | if (wait_dma_channel_stop(drv_data->rx_channel) == 0) | ||
177 | dev_err(&drv_data->pdev->dev, | ||
178 | "dma_handler: dma rx channel stop failed\n"); | ||
179 | |||
180 | if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) | ||
181 | dev_err(&drv_data->pdev->dev, | ||
182 | "dma_transfer: ssp rx stall failed\n"); | ||
183 | |||
184 | pxa2xx_spi_unmap_dma_buffers(drv_data); | ||
185 | |||
186 | /* update the buffer pointer for the amount completed in dma */ | ||
187 | drv_data->rx += drv_data->len - | ||
188 | (DCMD(drv_data->rx_channel) & DCMD_LENGTH); | ||
189 | |||
190 | /* read trailing data from fifo, it does not matter how many | ||
191 | * bytes are in the fifo just read until buffer is full | ||
192 | * or fifo is empty, which ever occurs first */ | ||
193 | drv_data->read(drv_data); | ||
194 | |||
195 | /* return count of what was actually read */ | ||
196 | msg->actual_length += drv_data->len - | ||
197 | (drv_data->rx_end - drv_data->rx); | ||
198 | |||
199 | /* Transfer delays and chip select release are | ||
200 | * handled in pump_transfers or giveback | ||
201 | */ | ||
202 | |||
203 | /* Move to next transfer */ | ||
204 | msg->state = pxa2xx_spi_next_transfer(drv_data); | ||
205 | |||
206 | /* Schedule transfer tasklet */ | ||
207 | tasklet_schedule(&drv_data->pump_transfers); | ||
208 | } | ||
209 | |||
210 | void pxa2xx_spi_dma_handler(int channel, void *data) | ||
211 | { | ||
212 | struct driver_data *drv_data = data; | ||
213 | u32 irq_status = DCSR(channel) & DMA_INT_MASK; | ||
214 | |||
215 | if (irq_status & DCSR_BUSERR) { | ||
216 | |||
217 | if (channel == drv_data->tx_channel) | ||
218 | pxa2xx_spi_dma_error_stop(drv_data, | ||
219 | "dma_handler: bad bus address on tx channel"); | ||
220 | else | ||
221 | pxa2xx_spi_dma_error_stop(drv_data, | ||
222 | "dma_handler: bad bus address on rx channel"); | ||
223 | return; | ||
224 | } | ||
225 | |||
226 | /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ | ||
227 | if ((channel == drv_data->tx_channel) | ||
228 | && (irq_status & DCSR_ENDINTR) | ||
229 | && (drv_data->ssp_type == PXA25x_SSP)) { | ||
230 | |||
231 | /* Wait for rx to stall */ | ||
232 | if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) | ||
233 | dev_err(&drv_data->pdev->dev, | ||
234 | "dma_handler: ssp rx stall failed\n"); | ||
235 | |||
236 | /* finish this transfer, start the next */ | ||
237 | pxa2xx_spi_dma_transfer_complete(drv_data); | ||
238 | } | ||
239 | } | ||
240 | |||
241 | irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) | ||
242 | { | ||
243 | u32 irq_status; | ||
244 | void __iomem *reg = drv_data->ioaddr; | ||
245 | |||
246 | irq_status = read_SSSR(reg) & drv_data->mask_sr; | ||
247 | if (irq_status & SSSR_ROR) { | ||
248 | pxa2xx_spi_dma_error_stop(drv_data, | ||
249 | "dma_transfer: fifo overrun"); | ||
250 | return IRQ_HANDLED; | ||
251 | } | ||
252 | |||
253 | /* Check for false positive timeout */ | ||
254 | if ((irq_status & SSSR_TINT) | ||
255 | && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { | ||
256 | write_SSSR(SSSR_TINT, reg); | ||
257 | return IRQ_HANDLED; | ||
258 | } | ||
259 | |||
260 | if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { | ||
261 | |||
262 | /* Clear and disable timeout interrupt, do the rest in | ||
263 | * dma_transfer_complete */ | ||
264 | if (!pxa25x_ssp_comp(drv_data)) | ||
265 | write_SSTO(0, reg); | ||
266 | |||
267 | /* finish this transfer, start the next */ | ||
268 | pxa2xx_spi_dma_transfer_complete(drv_data); | ||
269 | |||
270 | return IRQ_HANDLED; | ||
271 | } | ||
272 | |||
273 | /* Opps problem detected */ | ||
274 | return IRQ_NONE; | ||
275 | } | ||
276 | |||
277 | int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst) | ||
278 | { | ||
279 | u32 dma_width; | ||
280 | |||
281 | switch (drv_data->n_bytes) { | ||
282 | case 1: | ||
283 | dma_width = DCMD_WIDTH1; | ||
284 | break; | ||
285 | case 2: | ||
286 | dma_width = DCMD_WIDTH2; | ||
287 | break; | ||
288 | default: | ||
289 | dma_width = DCMD_WIDTH4; | ||
290 | break; | ||
291 | } | ||
292 | |||
293 | /* Setup rx DMA Channel */ | ||
294 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
295 | DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; | ||
296 | DTADR(drv_data->rx_channel) = drv_data->rx_dma; | ||
297 | if (drv_data->rx == drv_data->null_dma_buf) | ||
298 | /* No target address increment */ | ||
299 | DCMD(drv_data->rx_channel) = DCMD_FLOWSRC | ||
300 | | dma_width | ||
301 | | dma_burst | ||
302 | | drv_data->len; | ||
303 | else | ||
304 | DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR | ||
305 | | DCMD_FLOWSRC | ||
306 | | dma_width | ||
307 | | dma_burst | ||
308 | | drv_data->len; | ||
309 | |||
310 | /* Setup tx DMA Channel */ | ||
311 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
312 | DSADR(drv_data->tx_channel) = drv_data->tx_dma; | ||
313 | DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; | ||
314 | if (drv_data->tx == drv_data->null_dma_buf) | ||
315 | /* No source address increment */ | ||
316 | DCMD(drv_data->tx_channel) = DCMD_FLOWTRG | ||
317 | | dma_width | ||
318 | | dma_burst | ||
319 | | drv_data->len; | ||
320 | else | ||
321 | DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR | ||
322 | | DCMD_FLOWTRG | ||
323 | | dma_width | ||
324 | | dma_burst | ||
325 | | drv_data->len; | ||
326 | |||
327 | /* Enable dma end irqs on SSP to detect end of transfer */ | ||
328 | if (drv_data->ssp_type == PXA25x_SSP) | ||
329 | DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; | ||
330 | |||
331 | return 0; | ||
332 | } | ||
333 | |||
334 | void pxa2xx_spi_dma_start(struct driver_data *drv_data) | ||
335 | { | ||
336 | DCSR(drv_data->rx_channel) |= DCSR_RUN; | ||
337 | DCSR(drv_data->tx_channel) |= DCSR_RUN; | ||
338 | } | ||
339 | |||
340 | int pxa2xx_spi_dma_setup(struct driver_data *drv_data) | ||
341 | { | ||
342 | struct device *dev = &drv_data->pdev->dev; | ||
343 | struct ssp_device *ssp = drv_data->ssp; | ||
344 | |||
345 | /* Get two DMA channels (rx and tx) */ | ||
346 | drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", | ||
347 | DMA_PRIO_HIGH, | ||
348 | pxa2xx_spi_dma_handler, | ||
349 | drv_data); | ||
350 | if (drv_data->rx_channel < 0) { | ||
351 | dev_err(dev, "problem (%d) requesting rx channel\n", | ||
352 | drv_data->rx_channel); | ||
353 | return -ENODEV; | ||
354 | } | ||
355 | drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", | ||
356 | DMA_PRIO_MEDIUM, | ||
357 | pxa2xx_spi_dma_handler, | ||
358 | drv_data); | ||
359 | if (drv_data->tx_channel < 0) { | ||
360 | dev_err(dev, "problem (%d) requesting tx channel\n", | ||
361 | drv_data->tx_channel); | ||
362 | pxa_free_dma(drv_data->rx_channel); | ||
363 | return -ENODEV; | ||
364 | } | ||
365 | |||
366 | DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel; | ||
367 | DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel; | ||
368 | |||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | void pxa2xx_spi_dma_release(struct driver_data *drv_data) | ||
373 | { | ||
374 | struct ssp_device *ssp = drv_data->ssp; | ||
375 | |||
376 | DRCMR(ssp->drcmr_rx) = 0; | ||
377 | DRCMR(ssp->drcmr_tx) = 0; | ||
378 | |||
379 | if (drv_data->tx_channel != 0) | ||
380 | pxa_free_dma(drv_data->tx_channel); | ||
381 | if (drv_data->rx_channel != 0) | ||
382 | pxa_free_dma(drv_data->rx_channel); | ||
383 | } | ||
384 | |||
385 | void pxa2xx_spi_dma_resume(struct driver_data *drv_data) | ||
386 | { | ||
387 | if (drv_data->rx_channel != -1) | ||
388 | DRCMR(drv_data->ssp->drcmr_rx) = | ||
389 | DRCMR_MAPVLD | drv_data->rx_channel; | ||
390 | if (drv_data->tx_channel != -1) | ||
391 | DRCMR(drv_data->ssp->drcmr_tx) = | ||
392 | DRCMR_MAPVLD | drv_data->tx_channel; | ||
393 | } | ||
394 | |||
395 | int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, | ||
396 | struct spi_device *spi, | ||
397 | u8 bits_per_word, u32 *burst_code, | ||
398 | u32 *threshold) | ||
399 | { | ||
400 | struct pxa2xx_spi_chip *chip_info = | ||
401 | (struct pxa2xx_spi_chip *)spi->controller_data; | ||
402 | int bytes_per_word; | ||
403 | int burst_bytes; | ||
404 | int thresh_words; | ||
405 | int req_burst_size; | ||
406 | int retval = 0; | ||
407 | |||
408 | /* Set the threshold (in registers) to equal the same amount of data | ||
409 | * as represented by burst size (in bytes). The computation below | ||
410 | * is (burst_size rounded up to nearest 8 byte, word or long word) | ||
411 | * divided by (bytes/register); the tx threshold is the inverse of | ||
412 | * the rx, so that there will always be enough data in the rx fifo | ||
413 | * to satisfy a burst, and there will always be enough space in the | ||
414 | * tx fifo to accept a burst (a tx burst will overwrite the fifo if | ||
415 | * there is not enough space), there must always remain enough empty | ||
416 | * space in the rx fifo for any data loaded to the tx fifo. | ||
417 | * Whenever burst_size (in bytes) equals bits/word, the fifo threshold | ||
418 | * will be 8, or half the fifo; | ||
419 | * The threshold can only be set to 2, 4 or 8, but not 16, because | ||
420 | * to burst 16 to the tx fifo, the fifo would have to be empty; | ||
421 | * however, the minimum fifo trigger level is 1, and the tx will | ||
422 | * request service when the fifo is at this level, with only 15 spaces. | ||
423 | */ | ||
424 | |||
425 | /* find bytes/word */ | ||
426 | if (bits_per_word <= 8) | ||
427 | bytes_per_word = 1; | ||
428 | else if (bits_per_word <= 16) | ||
429 | bytes_per_word = 2; | ||
430 | else | ||
431 | bytes_per_word = 4; | ||
432 | |||
433 | /* use struct pxa2xx_spi_chip->dma_burst_size if available */ | ||
434 | if (chip_info) | ||
435 | req_burst_size = chip_info->dma_burst_size; | ||
436 | else { | ||
437 | switch (chip->dma_burst_size) { | ||
438 | default: | ||
439 | /* if the default burst size is not set, | ||
440 | * do it now */ | ||
441 | chip->dma_burst_size = DCMD_BURST8; | ||
442 | case DCMD_BURST8: | ||
443 | req_burst_size = 8; | ||
444 | break; | ||
445 | case DCMD_BURST16: | ||
446 | req_burst_size = 16; | ||
447 | break; | ||
448 | case DCMD_BURST32: | ||
449 | req_burst_size = 32; | ||
450 | break; | ||
451 | } | ||
452 | } | ||
453 | if (req_burst_size <= 8) { | ||
454 | *burst_code = DCMD_BURST8; | ||
455 | burst_bytes = 8; | ||
456 | } else if (req_burst_size <= 16) { | ||
457 | if (bytes_per_word == 1) { | ||
458 | /* don't burst more than 1/2 the fifo */ | ||
459 | *burst_code = DCMD_BURST8; | ||
460 | burst_bytes = 8; | ||
461 | retval = 1; | ||
462 | } else { | ||
463 | *burst_code = DCMD_BURST16; | ||
464 | burst_bytes = 16; | ||
465 | } | ||
466 | } else { | ||
467 | if (bytes_per_word == 1) { | ||
468 | /* don't burst more than 1/2 the fifo */ | ||
469 | *burst_code = DCMD_BURST8; | ||
470 | burst_bytes = 8; | ||
471 | retval = 1; | ||
472 | } else if (bytes_per_word == 2) { | ||
473 | /* don't burst more than 1/2 the fifo */ | ||
474 | *burst_code = DCMD_BURST16; | ||
475 | burst_bytes = 16; | ||
476 | retval = 1; | ||
477 | } else { | ||
478 | *burst_code = DCMD_BURST32; | ||
479 | burst_bytes = 32; | ||
480 | } | ||
481 | } | ||
482 | |||
483 | thresh_words = burst_bytes / bytes_per_word; | ||
484 | |||
485 | /* thresh_words will be between 2 and 8 */ | ||
486 | *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT) | ||
487 | | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT); | ||
488 | |||
489 | return retval; | ||
490 | } | ||
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 304cf6eb50e6..90b27a3508a6 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs | 2 | * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs |
3 | * Copyright (C) 2013, Intel Corporation | ||
3 | * | 4 | * |
4 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -24,18 +25,20 @@ | |||
24 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
25 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
26 | #include <linux/spi/pxa2xx_spi.h> | 27 | #include <linux/spi/pxa2xx_spi.h> |
27 | #include <linux/dma-mapping.h> | ||
28 | #include <linux/spi/spi.h> | 28 | #include <linux/spi/spi.h> |
29 | #include <linux/workqueue.h> | 29 | #include <linux/workqueue.h> |
30 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
31 | #include <linux/gpio.h> | 31 | #include <linux/gpio.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/clk.h> | 33 | #include <linux/clk.h> |
34 | #include <linux/pm_runtime.h> | ||
35 | #include <linux/acpi.h> | ||
34 | 36 | ||
35 | #include <asm/io.h> | 37 | #include <asm/io.h> |
36 | #include <asm/irq.h> | 38 | #include <asm/irq.h> |
37 | #include <asm/delay.h> | 39 | #include <asm/delay.h> |
38 | 40 | ||
41 | #include "spi-pxa2xx.h" | ||
39 | 42 | ||
40 | MODULE_AUTHOR("Stephen Street"); | 43 | MODULE_AUTHOR("Stephen Street"); |
41 | MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); | 44 | MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); |
@@ -46,12 +49,6 @@ MODULE_ALIAS("platform:pxa2xx-spi"); | |||
46 | 49 | ||
47 | #define TIMOUT_DFLT 1000 | 50 | #define TIMOUT_DFLT 1000 |
48 | 51 | ||
49 | #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) | ||
50 | #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) | ||
51 | #define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT) | ||
52 | #define MAX_DMA_LEN 8191 | ||
53 | #define DMA_ALIGNMENT 8 | ||
54 | |||
55 | /* | 52 | /* |
56 | * for testing SSCR1 changes that require SSP restart, basically | 53 | * for testing SSCR1 changes that require SSP restart, basically |
57 | * everything except the service and interrupt enables, the pxa270 developer | 54 | * everything except the service and interrupt enables, the pxa270 developer |
@@ -66,105 +63,97 @@ MODULE_ALIAS("platform:pxa2xx-spi"); | |||
66 | | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ | 63 | | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ |
67 | | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) | 64 | | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) |
68 | 65 | ||
69 | #define DEFINE_SSP_REG(reg, off) \ | 66 | #define LPSS_RX_THRESH_DFLT 64 |
70 | static inline u32 read_##reg(void const __iomem *p) \ | 67 | #define LPSS_TX_LOTHRESH_DFLT 160 |
71 | { return __raw_readl(p + (off)); } \ | 68 | #define LPSS_TX_HITHRESH_DFLT 224 |
72 | \ | ||
73 | static inline void write_##reg(u32 v, void __iomem *p) \ | ||
74 | { __raw_writel(v, p + (off)); } | ||
75 | |||
76 | DEFINE_SSP_REG(SSCR0, 0x00) | ||
77 | DEFINE_SSP_REG(SSCR1, 0x04) | ||
78 | DEFINE_SSP_REG(SSSR, 0x08) | ||
79 | DEFINE_SSP_REG(SSITR, 0x0c) | ||
80 | DEFINE_SSP_REG(SSDR, 0x10) | ||
81 | DEFINE_SSP_REG(SSTO, 0x28) | ||
82 | DEFINE_SSP_REG(SSPSP, 0x2c) | ||
83 | |||
84 | #define START_STATE ((void*)0) | ||
85 | #define RUNNING_STATE ((void*)1) | ||
86 | #define DONE_STATE ((void*)2) | ||
87 | #define ERROR_STATE ((void*)-1) | ||
88 | |||
89 | struct driver_data { | ||
90 | /* Driver model hookup */ | ||
91 | struct platform_device *pdev; | ||
92 | |||
93 | /* SSP Info */ | ||
94 | struct ssp_device *ssp; | ||
95 | 69 | ||
96 | /* SPI framework hookup */ | 70 | /* Offset from drv_data->lpss_base */ |
97 | enum pxa_ssp_type ssp_type; | 71 | #define SPI_CS_CONTROL 0x18 |
98 | struct spi_master *master; | 72 | #define SPI_CS_CONTROL_SW_MODE BIT(0) |
73 | #define SPI_CS_CONTROL_CS_HIGH BIT(1) | ||
99 | 74 | ||
100 | /* PXA hookup */ | 75 | static bool is_lpss_ssp(const struct driver_data *drv_data) |
101 | struct pxa2xx_spi_master *master_info; | 76 | { |
102 | 77 | return drv_data->ssp_type == LPSS_SSP; | |
103 | /* DMA setup stuff */ | 78 | } |
104 | int rx_channel; | ||
105 | int tx_channel; | ||
106 | u32 *null_dma_buf; | ||
107 | |||
108 | /* SSP register addresses */ | ||
109 | void __iomem *ioaddr; | ||
110 | u32 ssdr_physical; | ||
111 | |||
112 | /* SSP masks*/ | ||
113 | u32 dma_cr1; | ||
114 | u32 int_cr1; | ||
115 | u32 clear_sr; | ||
116 | u32 mask_sr; | ||
117 | |||
118 | /* Maximun clock rate */ | ||
119 | unsigned long max_clk_rate; | ||
120 | |||
121 | /* Message Transfer pump */ | ||
122 | struct tasklet_struct pump_transfers; | ||
123 | |||
124 | /* Current message transfer state info */ | ||
125 | struct spi_message* cur_msg; | ||
126 | struct spi_transfer* cur_transfer; | ||
127 | struct chip_data *cur_chip; | ||
128 | size_t len; | ||
129 | void *tx; | ||
130 | void *tx_end; | ||
131 | void *rx; | ||
132 | void *rx_end; | ||
133 | int dma_mapped; | ||
134 | dma_addr_t rx_dma; | ||
135 | dma_addr_t tx_dma; | ||
136 | size_t rx_map_len; | ||
137 | size_t tx_map_len; | ||
138 | u8 n_bytes; | ||
139 | u32 dma_width; | ||
140 | int (*write)(struct driver_data *drv_data); | ||
141 | int (*read)(struct driver_data *drv_data); | ||
142 | irqreturn_t (*transfer_handler)(struct driver_data *drv_data); | ||
143 | void (*cs_control)(u32 command); | ||
144 | }; | ||
145 | 79 | ||
146 | struct chip_data { | 80 | /* |
147 | u32 cr0; | 81 | * Read and write LPSS SSP private registers. Caller must first check that |
148 | u32 cr1; | 82 | * is_lpss_ssp() returns true before these can be called. |
149 | u32 psp; | 83 | */ |
150 | u32 timeout; | 84 | static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset) |
151 | u8 n_bytes; | 85 | { |
152 | u32 dma_width; | 86 | WARN_ON(!drv_data->lpss_base); |
153 | u32 dma_burst_size; | 87 | return readl(drv_data->lpss_base + offset); |
154 | u32 threshold; | 88 | } |
155 | u32 dma_threshold; | 89 | |
156 | u8 enable_dma; | 90 | static void __lpss_ssp_write_priv(struct driver_data *drv_data, |
157 | u8 bits_per_word; | 91 | unsigned offset, u32 value) |
158 | u32 speed_hz; | 92 | { |
159 | union { | 93 | WARN_ON(!drv_data->lpss_base); |
160 | int gpio_cs; | 94 | writel(value, drv_data->lpss_base + offset); |
161 | unsigned int frm; | 95 | } |
162 | }; | 96 | |
163 | int gpio_cs_inverted; | 97 | /* |
164 | int (*write)(struct driver_data *drv_data); | 98 | * lpss_ssp_setup - perform LPSS SSP specific setup |
165 | int (*read)(struct driver_data *drv_data); | 99 | * @drv_data: pointer to the driver private data |
166 | void (*cs_control)(u32 command); | 100 | * |
167 | }; | 101 | * Perform LPSS SSP specific setup. This function must be called first if |
102 | * one is going to use LPSS SSP private registers. | ||
103 | */ | ||
104 | static void lpss_ssp_setup(struct driver_data *drv_data) | ||
105 | { | ||
106 | unsigned offset = 0x400; | ||
107 | u32 value, orig; | ||
108 | |||
109 | if (!is_lpss_ssp(drv_data)) | ||
110 | return; | ||
111 | |||
112 | /* | ||
113 | * Perform auto-detection of the LPSS SSP private registers. They | ||
114 | * can be either at 1k or 2k offset from the base address. | ||
115 | */ | ||
116 | orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL); | ||
117 | |||
118 | value = orig | SPI_CS_CONTROL_SW_MODE; | ||
119 | writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL); | ||
120 | value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL); | ||
121 | if (value != (orig | SPI_CS_CONTROL_SW_MODE)) { | ||
122 | offset = 0x800; | ||
123 | goto detection_done; | ||
124 | } | ||
125 | |||
126 | value &= ~SPI_CS_CONTROL_SW_MODE; | ||
127 | writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL); | ||
128 | value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL); | ||
129 | if (value != orig) { | ||
130 | offset = 0x800; | ||
131 | goto detection_done; | ||
132 | } | ||
133 | |||
134 | detection_done: | ||
135 | /* Now set the LPSS base */ | ||
136 | drv_data->lpss_base = drv_data->ioaddr + offset; | ||
137 | |||
138 | /* Enable software chip select control */ | ||
139 | value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH; | ||
140 | __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value); | ||
141 | } | ||
142 | |||
143 | static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable) | ||
144 | { | ||
145 | u32 value; | ||
146 | |||
147 | if (!is_lpss_ssp(drv_data)) | ||
148 | return; | ||
149 | |||
150 | value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL); | ||
151 | if (enable) | ||
152 | value &= ~SPI_CS_CONTROL_CS_HIGH; | ||
153 | else | ||
154 | value |= SPI_CS_CONTROL_CS_HIGH; | ||
155 | __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value); | ||
156 | } | ||
168 | 157 | ||
169 | static void cs_assert(struct driver_data *drv_data) | 158 | static void cs_assert(struct driver_data *drv_data) |
170 | { | 159 | { |
@@ -180,8 +169,12 @@ static void cs_assert(struct driver_data *drv_data) | |||
180 | return; | 169 | return; |
181 | } | 170 | } |
182 | 171 | ||
183 | if (gpio_is_valid(chip->gpio_cs)) | 172 | if (gpio_is_valid(chip->gpio_cs)) { |
184 | gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted); | 173 | gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted); |
174 | return; | ||
175 | } | ||
176 | |||
177 | lpss_ssp_cs_control(drv_data, true); | ||
185 | } | 178 | } |
186 | 179 | ||
187 | static void cs_deassert(struct driver_data *drv_data) | 180 | static void cs_deassert(struct driver_data *drv_data) |
@@ -196,30 +189,15 @@ static void cs_deassert(struct driver_data *drv_data) | |||
196 | return; | 189 | return; |
197 | } | 190 | } |
198 | 191 | ||
199 | if (gpio_is_valid(chip->gpio_cs)) | 192 | if (gpio_is_valid(chip->gpio_cs)) { |
200 | gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); | 193 | gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); |
201 | } | 194 | return; |
202 | 195 | } | |
203 | static void write_SSSR_CS(struct driver_data *drv_data, u32 val) | ||
204 | { | ||
205 | void __iomem *reg = drv_data->ioaddr; | ||
206 | |||
207 | if (drv_data->ssp_type == CE4100_SSP) | ||
208 | val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; | ||
209 | |||
210 | write_SSSR(val, reg); | ||
211 | } | ||
212 | 196 | ||
213 | static int pxa25x_ssp_comp(struct driver_data *drv_data) | 197 | lpss_ssp_cs_control(drv_data, false); |
214 | { | ||
215 | if (drv_data->ssp_type == PXA25x_SSP) | ||
216 | return 1; | ||
217 | if (drv_data->ssp_type == CE4100_SSP) | ||
218 | return 1; | ||
219 | return 0; | ||
220 | } | 198 | } |
221 | 199 | ||
222 | static int flush(struct driver_data *drv_data) | 200 | int pxa2xx_spi_flush(struct driver_data *drv_data) |
223 | { | 201 | { |
224 | unsigned long limit = loops_per_jiffy << 1; | 202 | unsigned long limit = loops_per_jiffy << 1; |
225 | 203 | ||
@@ -345,7 +323,7 @@ static int u32_reader(struct driver_data *drv_data) | |||
345 | return drv_data->rx == drv_data->rx_end; | 323 | return drv_data->rx == drv_data->rx_end; |
346 | } | 324 | } |
347 | 325 | ||
348 | static void *next_transfer(struct driver_data *drv_data) | 326 | void *pxa2xx_spi_next_transfer(struct driver_data *drv_data) |
349 | { | 327 | { |
350 | struct spi_message *msg = drv_data->cur_msg; | 328 | struct spi_message *msg = drv_data->cur_msg; |
351 | struct spi_transfer *trans = drv_data->cur_transfer; | 329 | struct spi_transfer *trans = drv_data->cur_transfer; |
@@ -361,76 +339,6 @@ static void *next_transfer(struct driver_data *drv_data) | |||
361 | return DONE_STATE; | 339 | return DONE_STATE; |
362 | } | 340 | } |
363 | 341 | ||
364 | static int map_dma_buffers(struct driver_data *drv_data) | ||
365 | { | ||
366 | struct spi_message *msg = drv_data->cur_msg; | ||
367 | struct device *dev = &msg->spi->dev; | ||
368 | |||
369 | if (!drv_data->cur_chip->enable_dma) | ||
370 | return 0; | ||
371 | |||
372 | if (msg->is_dma_mapped) | ||
373 | return drv_data->rx_dma && drv_data->tx_dma; | ||
374 | |||
375 | if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) | ||
376 | return 0; | ||
377 | |||
378 | /* Modify setup if rx buffer is null */ | ||
379 | if (drv_data->rx == NULL) { | ||
380 | *drv_data->null_dma_buf = 0; | ||
381 | drv_data->rx = drv_data->null_dma_buf; | ||
382 | drv_data->rx_map_len = 4; | ||
383 | } else | ||
384 | drv_data->rx_map_len = drv_data->len; | ||
385 | |||
386 | |||
387 | /* Modify setup if tx buffer is null */ | ||
388 | if (drv_data->tx == NULL) { | ||
389 | *drv_data->null_dma_buf = 0; | ||
390 | drv_data->tx = drv_data->null_dma_buf; | ||
391 | drv_data->tx_map_len = 4; | ||
392 | } else | ||
393 | drv_data->tx_map_len = drv_data->len; | ||
394 | |||
395 | /* Stream map the tx buffer. Always do DMA_TO_DEVICE first | ||
396 | * so we flush the cache *before* invalidating it, in case | ||
397 | * the tx and rx buffers overlap. | ||
398 | */ | ||
399 | drv_data->tx_dma = dma_map_single(dev, drv_data->tx, | ||
400 | drv_data->tx_map_len, DMA_TO_DEVICE); | ||
401 | if (dma_mapping_error(dev, drv_data->tx_dma)) | ||
402 | return 0; | ||
403 | |||
404 | /* Stream map the rx buffer */ | ||
405 | drv_data->rx_dma = dma_map_single(dev, drv_data->rx, | ||
406 | drv_data->rx_map_len, DMA_FROM_DEVICE); | ||
407 | if (dma_mapping_error(dev, drv_data->rx_dma)) { | ||
408 | dma_unmap_single(dev, drv_data->tx_dma, | ||
409 | drv_data->tx_map_len, DMA_TO_DEVICE); | ||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | return 1; | ||
414 | } | ||
415 | |||
416 | static void unmap_dma_buffers(struct driver_data *drv_data) | ||
417 | { | ||
418 | struct device *dev; | ||
419 | |||
420 | if (!drv_data->dma_mapped) | ||
421 | return; | ||
422 | |||
423 | if (!drv_data->cur_msg->is_dma_mapped) { | ||
424 | dev = &drv_data->cur_msg->spi->dev; | ||
425 | dma_unmap_single(dev, drv_data->rx_dma, | ||
426 | drv_data->rx_map_len, DMA_FROM_DEVICE); | ||
427 | dma_unmap_single(dev, drv_data->tx_dma, | ||
428 | drv_data->tx_map_len, DMA_TO_DEVICE); | ||
429 | } | ||
430 | |||
431 | drv_data->dma_mapped = 0; | ||
432 | } | ||
433 | |||
434 | /* caller already set message->status; dma and pio irqs are blocked */ | 342 | /* caller already set message->status; dma and pio irqs are blocked */ |
435 | static void giveback(struct driver_data *drv_data) | 343 | static void giveback(struct driver_data *drv_data) |
436 | { | 344 | { |
@@ -483,161 +391,6 @@ static void giveback(struct driver_data *drv_data) | |||
483 | drv_data->cur_chip = NULL; | 391 | drv_data->cur_chip = NULL; |
484 | } | 392 | } |
485 | 393 | ||
486 | static int wait_ssp_rx_stall(void const __iomem *ioaddr) | ||
487 | { | ||
488 | unsigned long limit = loops_per_jiffy << 1; | ||
489 | |||
490 | while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) | ||
491 | cpu_relax(); | ||
492 | |||
493 | return limit; | ||
494 | } | ||
495 | |||
496 | static int wait_dma_channel_stop(int channel) | ||
497 | { | ||
498 | unsigned long limit = loops_per_jiffy << 1; | ||
499 | |||
500 | while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit) | ||
501 | cpu_relax(); | ||
502 | |||
503 | return limit; | ||
504 | } | ||
505 | |||
506 | static void dma_error_stop(struct driver_data *drv_data, const char *msg) | ||
507 | { | ||
508 | void __iomem *reg = drv_data->ioaddr; | ||
509 | |||
510 | /* Stop and reset */ | ||
511 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
512 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
513 | write_SSSR_CS(drv_data, drv_data->clear_sr); | ||
514 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
515 | if (!pxa25x_ssp_comp(drv_data)) | ||
516 | write_SSTO(0, reg); | ||
517 | flush(drv_data); | ||
518 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | ||
519 | |||
520 | unmap_dma_buffers(drv_data); | ||
521 | |||
522 | dev_err(&drv_data->pdev->dev, "%s\n", msg); | ||
523 | |||
524 | drv_data->cur_msg->state = ERROR_STATE; | ||
525 | tasklet_schedule(&drv_data->pump_transfers); | ||
526 | } | ||
527 | |||
528 | static void dma_transfer_complete(struct driver_data *drv_data) | ||
529 | { | ||
530 | void __iomem *reg = drv_data->ioaddr; | ||
531 | struct spi_message *msg = drv_data->cur_msg; | ||
532 | |||
533 | /* Clear and disable interrupts on SSP and DMA channels*/ | ||
534 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
535 | write_SSSR_CS(drv_data, drv_data->clear_sr); | ||
536 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
537 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
538 | |||
539 | if (wait_dma_channel_stop(drv_data->rx_channel) == 0) | ||
540 | dev_err(&drv_data->pdev->dev, | ||
541 | "dma_handler: dma rx channel stop failed\n"); | ||
542 | |||
543 | if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) | ||
544 | dev_err(&drv_data->pdev->dev, | ||
545 | "dma_transfer: ssp rx stall failed\n"); | ||
546 | |||
547 | unmap_dma_buffers(drv_data); | ||
548 | |||
549 | /* update the buffer pointer for the amount completed in dma */ | ||
550 | drv_data->rx += drv_data->len - | ||
551 | (DCMD(drv_data->rx_channel) & DCMD_LENGTH); | ||
552 | |||
553 | /* read trailing data from fifo, it does not matter how many | ||
554 | * bytes are in the fifo just read until buffer is full | ||
555 | * or fifo is empty, which ever occurs first */ | ||
556 | drv_data->read(drv_data); | ||
557 | |||
558 | /* return count of what was actually read */ | ||
559 | msg->actual_length += drv_data->len - | ||
560 | (drv_data->rx_end - drv_data->rx); | ||
561 | |||
562 | /* Transfer delays and chip select release are | ||
563 | * handled in pump_transfers or giveback | ||
564 | */ | ||
565 | |||
566 | /* Move to next transfer */ | ||
567 | msg->state = next_transfer(drv_data); | ||
568 | |||
569 | /* Schedule transfer tasklet */ | ||
570 | tasklet_schedule(&drv_data->pump_transfers); | ||
571 | } | ||
572 | |||
573 | static void dma_handler(int channel, void *data) | ||
574 | { | ||
575 | struct driver_data *drv_data = data; | ||
576 | u32 irq_status = DCSR(channel) & DMA_INT_MASK; | ||
577 | |||
578 | if (irq_status & DCSR_BUSERR) { | ||
579 | |||
580 | if (channel == drv_data->tx_channel) | ||
581 | dma_error_stop(drv_data, | ||
582 | "dma_handler: " | ||
583 | "bad bus address on tx channel"); | ||
584 | else | ||
585 | dma_error_stop(drv_data, | ||
586 | "dma_handler: " | ||
587 | "bad bus address on rx channel"); | ||
588 | return; | ||
589 | } | ||
590 | |||
591 | /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ | ||
592 | if ((channel == drv_data->tx_channel) | ||
593 | && (irq_status & DCSR_ENDINTR) | ||
594 | && (drv_data->ssp_type == PXA25x_SSP)) { | ||
595 | |||
596 | /* Wait for rx to stall */ | ||
597 | if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) | ||
598 | dev_err(&drv_data->pdev->dev, | ||
599 | "dma_handler: ssp rx stall failed\n"); | ||
600 | |||
601 | /* finish this transfer, start the next */ | ||
602 | dma_transfer_complete(drv_data); | ||
603 | } | ||
604 | } | ||
605 | |||
606 | static irqreturn_t dma_transfer(struct driver_data *drv_data) | ||
607 | { | ||
608 | u32 irq_status; | ||
609 | void __iomem *reg = drv_data->ioaddr; | ||
610 | |||
611 | irq_status = read_SSSR(reg) & drv_data->mask_sr; | ||
612 | if (irq_status & SSSR_ROR) { | ||
613 | dma_error_stop(drv_data, "dma_transfer: fifo overrun"); | ||
614 | return IRQ_HANDLED; | ||
615 | } | ||
616 | |||
617 | /* Check for false positive timeout */ | ||
618 | if ((irq_status & SSSR_TINT) | ||
619 | && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { | ||
620 | write_SSSR(SSSR_TINT, reg); | ||
621 | return IRQ_HANDLED; | ||
622 | } | ||
623 | |||
624 | if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { | ||
625 | |||
626 | /* Clear and disable timeout interrupt, do the rest in | ||
627 | * dma_transfer_complete */ | ||
628 | if (!pxa25x_ssp_comp(drv_data)) | ||
629 | write_SSTO(0, reg); | ||
630 | |||
631 | /* finish this transfer, start the next */ | ||
632 | dma_transfer_complete(drv_data); | ||
633 | |||
634 | return IRQ_HANDLED; | ||
635 | } | ||
636 | |||
637 | /* Opps problem detected */ | ||
638 | return IRQ_NONE; | ||
639 | } | ||
640 | |||
641 | static void reset_sccr1(struct driver_data *drv_data) | 394 | static void reset_sccr1(struct driver_data *drv_data) |
642 | { | 395 | { |
643 | void __iomem *reg = drv_data->ioaddr; | 396 | void __iomem *reg = drv_data->ioaddr; |
@@ -659,7 +412,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg) | |||
659 | reset_sccr1(drv_data); | 412 | reset_sccr1(drv_data); |
660 | if (!pxa25x_ssp_comp(drv_data)) | 413 | if (!pxa25x_ssp_comp(drv_data)) |
661 | write_SSTO(0, reg); | 414 | write_SSTO(0, reg); |
662 | flush(drv_data); | 415 | pxa2xx_spi_flush(drv_data); |
663 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | 416 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); |
664 | 417 | ||
665 | dev_err(&drv_data->pdev->dev, "%s\n", msg); | 418 | dev_err(&drv_data->pdev->dev, "%s\n", msg); |
@@ -687,7 +440,7 @@ static void int_transfer_complete(struct driver_data *drv_data) | |||
687 | */ | 440 | */ |
688 | 441 | ||
689 | /* Move to next transfer */ | 442 | /* Move to next transfer */ |
690 | drv_data->cur_msg->state = next_transfer(drv_data); | 443 | drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data); |
691 | 444 | ||
692 | /* Schedule transfer tasklet */ | 445 | /* Schedule transfer tasklet */ |
693 | tasklet_schedule(&drv_data->pump_transfers); | 446 | tasklet_schedule(&drv_data->pump_transfers); |
@@ -767,10 +520,20 @@ static irqreturn_t ssp_int(int irq, void *dev_id) | |||
767 | { | 520 | { |
768 | struct driver_data *drv_data = dev_id; | 521 | struct driver_data *drv_data = dev_id; |
769 | void __iomem *reg = drv_data->ioaddr; | 522 | void __iomem *reg = drv_data->ioaddr; |
770 | u32 sccr1_reg = read_SSCR1(reg); | 523 | u32 sccr1_reg; |
771 | u32 mask = drv_data->mask_sr; | 524 | u32 mask = drv_data->mask_sr; |
772 | u32 status; | 525 | u32 status; |
773 | 526 | ||
527 | /* | ||
528 | * The IRQ might be shared with other peripherals so we must first | ||
529 | * check that are we RPM suspended or not. If we are we assume that | ||
530 | * the IRQ was not for us (we shouldn't be RPM suspended when the | ||
531 | * interrupt is enabled). | ||
532 | */ | ||
533 | if (pm_runtime_suspended(&drv_data->pdev->dev)) | ||
534 | return IRQ_NONE; | ||
535 | |||
536 | sccr1_reg = read_SSCR1(reg); | ||
774 | status = read_SSSR(reg); | 537 | status = read_SSSR(reg); |
775 | 538 | ||
776 | /* Ignore possible writes if we don't need to write */ | 539 | /* Ignore possible writes if we don't need to write */ |
@@ -798,103 +561,6 @@ static irqreturn_t ssp_int(int irq, void *dev_id) | |||
798 | return drv_data->transfer_handler(drv_data); | 561 | return drv_data->transfer_handler(drv_data); |
799 | } | 562 | } |
800 | 563 | ||
801 | static int set_dma_burst_and_threshold(struct chip_data *chip, | ||
802 | struct spi_device *spi, | ||
803 | u8 bits_per_word, u32 *burst_code, | ||
804 | u32 *threshold) | ||
805 | { | ||
806 | struct pxa2xx_spi_chip *chip_info = | ||
807 | (struct pxa2xx_spi_chip *)spi->controller_data; | ||
808 | int bytes_per_word; | ||
809 | int burst_bytes; | ||
810 | int thresh_words; | ||
811 | int req_burst_size; | ||
812 | int retval = 0; | ||
813 | |||
814 | /* Set the threshold (in registers) to equal the same amount of data | ||
815 | * as represented by burst size (in bytes). The computation below | ||
816 | * is (burst_size rounded up to nearest 8 byte, word or long word) | ||
817 | * divided by (bytes/register); the tx threshold is the inverse of | ||
818 | * the rx, so that there will always be enough data in the rx fifo | ||
819 | * to satisfy a burst, and there will always be enough space in the | ||
820 | * tx fifo to accept a burst (a tx burst will overwrite the fifo if | ||
821 | * there is not enough space), there must always remain enough empty | ||
822 | * space in the rx fifo for any data loaded to the tx fifo. | ||
823 | * Whenever burst_size (in bytes) equals bits/word, the fifo threshold | ||
824 | * will be 8, or half the fifo; | ||
825 | * The threshold can only be set to 2, 4 or 8, but not 16, because | ||
826 | * to burst 16 to the tx fifo, the fifo would have to be empty; | ||
827 | * however, the minimum fifo trigger level is 1, and the tx will | ||
828 | * request service when the fifo is at this level, with only 15 spaces. | ||
829 | */ | ||
830 | |||
831 | /* find bytes/word */ | ||
832 | if (bits_per_word <= 8) | ||
833 | bytes_per_word = 1; | ||
834 | else if (bits_per_word <= 16) | ||
835 | bytes_per_word = 2; | ||
836 | else | ||
837 | bytes_per_word = 4; | ||
838 | |||
839 | /* use struct pxa2xx_spi_chip->dma_burst_size if available */ | ||
840 | if (chip_info) | ||
841 | req_burst_size = chip_info->dma_burst_size; | ||
842 | else { | ||
843 | switch (chip->dma_burst_size) { | ||
844 | default: | ||
845 | /* if the default burst size is not set, | ||
846 | * do it now */ | ||
847 | chip->dma_burst_size = DCMD_BURST8; | ||
848 | case DCMD_BURST8: | ||
849 | req_burst_size = 8; | ||
850 | break; | ||
851 | case DCMD_BURST16: | ||
852 | req_burst_size = 16; | ||
853 | break; | ||
854 | case DCMD_BURST32: | ||
855 | req_burst_size = 32; | ||
856 | break; | ||
857 | } | ||
858 | } | ||
859 | if (req_burst_size <= 8) { | ||
860 | *burst_code = DCMD_BURST8; | ||
861 | burst_bytes = 8; | ||
862 | } else if (req_burst_size <= 16) { | ||
863 | if (bytes_per_word == 1) { | ||
864 | /* don't burst more than 1/2 the fifo */ | ||
865 | *burst_code = DCMD_BURST8; | ||
866 | burst_bytes = 8; | ||
867 | retval = 1; | ||
868 | } else { | ||
869 | *burst_code = DCMD_BURST16; | ||
870 | burst_bytes = 16; | ||
871 | } | ||
872 | } else { | ||
873 | if (bytes_per_word == 1) { | ||
874 | /* don't burst more than 1/2 the fifo */ | ||
875 | *burst_code = DCMD_BURST8; | ||
876 | burst_bytes = 8; | ||
877 | retval = 1; | ||
878 | } else if (bytes_per_word == 2) { | ||
879 | /* don't burst more than 1/2 the fifo */ | ||
880 | *burst_code = DCMD_BURST16; | ||
881 | burst_bytes = 16; | ||
882 | retval = 1; | ||
883 | } else { | ||
884 | *burst_code = DCMD_BURST32; | ||
885 | burst_bytes = 32; | ||
886 | } | ||
887 | } | ||
888 | |||
889 | thresh_words = burst_bytes / bytes_per_word; | ||
890 | |||
891 | /* thresh_words will be between 2 and 8 */ | ||
892 | *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT) | ||
893 | | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT); | ||
894 | |||
895 | return retval; | ||
896 | } | ||
897 | |||
898 | static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) | 564 | static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) |
899 | { | 565 | { |
900 | unsigned long ssp_clk = drv_data->max_clk_rate; | 566 | unsigned long ssp_clk = drv_data->max_clk_rate; |
@@ -956,8 +622,8 @@ static void pump_transfers(unsigned long data) | |||
956 | cs_deassert(drv_data); | 622 | cs_deassert(drv_data); |
957 | } | 623 | } |
958 | 624 | ||
959 | /* Check for transfers that need multiple DMA segments */ | 625 | /* Check if we can DMA this transfer */ |
960 | if (transfer->len > MAX_DMA_LEN && chip->enable_dma) { | 626 | if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) { |
961 | 627 | ||
962 | /* reject already-mapped transfers; PIO won't always work */ | 628 | /* reject already-mapped transfers; PIO won't always work */ |
963 | if (message->is_dma_mapped | 629 | if (message->is_dma_mapped |
@@ -980,21 +646,20 @@ static void pump_transfers(unsigned long data) | |||
980 | } | 646 | } |
981 | 647 | ||
982 | /* Setup the transfer state based on the type of transfer */ | 648 | /* Setup the transfer state based on the type of transfer */ |
983 | if (flush(drv_data) == 0) { | 649 | if (pxa2xx_spi_flush(drv_data) == 0) { |
984 | dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); | 650 | dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); |
985 | message->status = -EIO; | 651 | message->status = -EIO; |
986 | giveback(drv_data); | 652 | giveback(drv_data); |
987 | return; | 653 | return; |
988 | } | 654 | } |
989 | drv_data->n_bytes = chip->n_bytes; | 655 | drv_data->n_bytes = chip->n_bytes; |
990 | drv_data->dma_width = chip->dma_width; | ||
991 | drv_data->tx = (void *)transfer->tx_buf; | 656 | drv_data->tx = (void *)transfer->tx_buf; |
992 | drv_data->tx_end = drv_data->tx + transfer->len; | 657 | drv_data->tx_end = drv_data->tx + transfer->len; |
993 | drv_data->rx = transfer->rx_buf; | 658 | drv_data->rx = transfer->rx_buf; |
994 | drv_data->rx_end = drv_data->rx + transfer->len; | 659 | drv_data->rx_end = drv_data->rx + transfer->len; |
995 | drv_data->rx_dma = transfer->rx_dma; | 660 | drv_data->rx_dma = transfer->rx_dma; |
996 | drv_data->tx_dma = transfer->tx_dma; | 661 | drv_data->tx_dma = transfer->tx_dma; |
997 | drv_data->len = transfer->len & DCMD_LENGTH; | 662 | drv_data->len = transfer->len; |
998 | drv_data->write = drv_data->tx ? chip->write : null_writer; | 663 | drv_data->write = drv_data->tx ? chip->write : null_writer; |
999 | drv_data->read = drv_data->rx ? chip->read : null_reader; | 664 | drv_data->read = drv_data->rx ? chip->read : null_reader; |
1000 | 665 | ||
@@ -1015,21 +680,18 @@ static void pump_transfers(unsigned long data) | |||
1015 | 680 | ||
1016 | if (bits <= 8) { | 681 | if (bits <= 8) { |
1017 | drv_data->n_bytes = 1; | 682 | drv_data->n_bytes = 1; |
1018 | drv_data->dma_width = DCMD_WIDTH1; | ||
1019 | drv_data->read = drv_data->read != null_reader ? | 683 | drv_data->read = drv_data->read != null_reader ? |
1020 | u8_reader : null_reader; | 684 | u8_reader : null_reader; |
1021 | drv_data->write = drv_data->write != null_writer ? | 685 | drv_data->write = drv_data->write != null_writer ? |
1022 | u8_writer : null_writer; | 686 | u8_writer : null_writer; |
1023 | } else if (bits <= 16) { | 687 | } else if (bits <= 16) { |
1024 | drv_data->n_bytes = 2; | 688 | drv_data->n_bytes = 2; |
1025 | drv_data->dma_width = DCMD_WIDTH2; | ||
1026 | drv_data->read = drv_data->read != null_reader ? | 689 | drv_data->read = drv_data->read != null_reader ? |
1027 | u16_reader : null_reader; | 690 | u16_reader : null_reader; |
1028 | drv_data->write = drv_data->write != null_writer ? | 691 | drv_data->write = drv_data->write != null_writer ? |
1029 | u16_writer : null_writer; | 692 | u16_writer : null_writer; |
1030 | } else if (bits <= 32) { | 693 | } else if (bits <= 32) { |
1031 | drv_data->n_bytes = 4; | 694 | drv_data->n_bytes = 4; |
1032 | drv_data->dma_width = DCMD_WIDTH4; | ||
1033 | drv_data->read = drv_data->read != null_reader ? | 695 | drv_data->read = drv_data->read != null_reader ? |
1034 | u32_reader : null_reader; | 696 | u32_reader : null_reader; |
1035 | drv_data->write = drv_data->write != null_writer ? | 697 | drv_data->write = drv_data->write != null_writer ? |
@@ -1038,7 +700,8 @@ static void pump_transfers(unsigned long data) | |||
1038 | /* if bits/word is changed in dma mode, then must check the | 700 | /* if bits/word is changed in dma mode, then must check the |
1039 | * thresholds and burst also */ | 701 | * thresholds and burst also */ |
1040 | if (chip->enable_dma) { | 702 | if (chip->enable_dma) { |
1041 | if (set_dma_burst_and_threshold(chip, message->spi, | 703 | if (pxa2xx_spi_set_dma_burst_and_threshold(chip, |
704 | message->spi, | ||
1042 | bits, &dma_burst, | 705 | bits, &dma_burst, |
1043 | &dma_thresh)) | 706 | &dma_thresh)) |
1044 | if (printk_ratelimit()) | 707 | if (printk_ratelimit()) |
@@ -1057,70 +720,21 @@ static void pump_transfers(unsigned long data) | |||
1057 | 720 | ||
1058 | message->state = RUNNING_STATE; | 721 | message->state = RUNNING_STATE; |
1059 | 722 | ||
1060 | /* Try to map dma buffer and do a dma transfer if successful, but | ||
1061 | * only if the length is non-zero and less than MAX_DMA_LEN. | ||
1062 | * | ||
1063 | * Zero-length non-descriptor DMA is illegal on PXA2xx; force use | ||
1064 | * of PIO instead. Care is needed above because the transfer may | ||
1065 | * have have been passed with buffers that are already dma mapped. | ||
1066 | * A zero-length transfer in PIO mode will not try to write/read | ||
1067 | * to/from the buffers | ||
1068 | * | ||
1069 | * REVISIT large transfers are exactly where we most want to be | ||
1070 | * using DMA. If this happens much, split those transfers into | ||
1071 | * multiple DMA segments rather than forcing PIO. | ||
1072 | */ | ||
1073 | drv_data->dma_mapped = 0; | 723 | drv_data->dma_mapped = 0; |
1074 | if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN) | 724 | if (pxa2xx_spi_dma_is_possible(drv_data->len)) |
1075 | drv_data->dma_mapped = map_dma_buffers(drv_data); | 725 | drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data); |
1076 | if (drv_data->dma_mapped) { | 726 | if (drv_data->dma_mapped) { |
1077 | 727 | ||
1078 | /* Ensure we have the correct interrupt handler */ | 728 | /* Ensure we have the correct interrupt handler */ |
1079 | drv_data->transfer_handler = dma_transfer; | 729 | drv_data->transfer_handler = pxa2xx_spi_dma_transfer; |
1080 | 730 | ||
1081 | /* Setup rx DMA Channel */ | 731 | pxa2xx_spi_dma_prepare(drv_data, dma_burst); |
1082 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
1083 | DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; | ||
1084 | DTADR(drv_data->rx_channel) = drv_data->rx_dma; | ||
1085 | if (drv_data->rx == drv_data->null_dma_buf) | ||
1086 | /* No target address increment */ | ||
1087 | DCMD(drv_data->rx_channel) = DCMD_FLOWSRC | ||
1088 | | drv_data->dma_width | ||
1089 | | dma_burst | ||
1090 | | drv_data->len; | ||
1091 | else | ||
1092 | DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR | ||
1093 | | DCMD_FLOWSRC | ||
1094 | | drv_data->dma_width | ||
1095 | | dma_burst | ||
1096 | | drv_data->len; | ||
1097 | |||
1098 | /* Setup tx DMA Channel */ | ||
1099 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
1100 | DSADR(drv_data->tx_channel) = drv_data->tx_dma; | ||
1101 | DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; | ||
1102 | if (drv_data->tx == drv_data->null_dma_buf) | ||
1103 | /* No source address increment */ | ||
1104 | DCMD(drv_data->tx_channel) = DCMD_FLOWTRG | ||
1105 | | drv_data->dma_width | ||
1106 | | dma_burst | ||
1107 | | drv_data->len; | ||
1108 | else | ||
1109 | DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR | ||
1110 | | DCMD_FLOWTRG | ||
1111 | | drv_data->dma_width | ||
1112 | | dma_burst | ||
1113 | | drv_data->len; | ||
1114 | |||
1115 | /* Enable dma end irqs on SSP to detect end of transfer */ | ||
1116 | if (drv_data->ssp_type == PXA25x_SSP) | ||
1117 | DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; | ||
1118 | 732 | ||
1119 | /* Clear status and start DMA engine */ | 733 | /* Clear status and start DMA engine */ |
1120 | cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; | 734 | cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; |
1121 | write_SSSR(drv_data->clear_sr, reg); | 735 | write_SSSR(drv_data->clear_sr, reg); |
1122 | DCSR(drv_data->rx_channel) |= DCSR_RUN; | 736 | |
1123 | DCSR(drv_data->tx_channel) |= DCSR_RUN; | 737 | pxa2xx_spi_dma_start(drv_data); |
1124 | } else { | 738 | } else { |
1125 | /* Ensure we have the correct interrupt handler */ | 739 | /* Ensure we have the correct interrupt handler */ |
1126 | drv_data->transfer_handler = interrupt_transfer; | 740 | drv_data->transfer_handler = interrupt_transfer; |
@@ -1130,6 +744,13 @@ static void pump_transfers(unsigned long data) | |||
1130 | write_SSSR_CS(drv_data, drv_data->clear_sr); | 744 | write_SSSR_CS(drv_data, drv_data->clear_sr); |
1131 | } | 745 | } |
1132 | 746 | ||
747 | if (is_lpss_ssp(drv_data)) { | ||
748 | if ((read_SSIRF(reg) & 0xff) != chip->lpss_rx_threshold) | ||
749 | write_SSIRF(chip->lpss_rx_threshold, reg); | ||
750 | if ((read_SSITF(reg) & 0xffff) != chip->lpss_tx_threshold) | ||
751 | write_SSITF(chip->lpss_tx_threshold, reg); | ||
752 | } | ||
753 | |||
1133 | /* see if we need to reload the config registers */ | 754 | /* see if we need to reload the config registers */ |
1134 | if ((read_SSCR0(reg) != cr0) | 755 | if ((read_SSCR0(reg) != cr0) |
1135 | || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) != | 756 | || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) != |
@@ -1177,6 +798,27 @@ static int pxa2xx_spi_transfer_one_message(struct spi_master *master, | |||
1177 | return 0; | 798 | return 0; |
1178 | } | 799 | } |
1179 | 800 | ||
801 | static int pxa2xx_spi_prepare_transfer(struct spi_master *master) | ||
802 | { | ||
803 | struct driver_data *drv_data = spi_master_get_devdata(master); | ||
804 | |||
805 | pm_runtime_get_sync(&drv_data->pdev->dev); | ||
806 | return 0; | ||
807 | } | ||
808 | |||
809 | static int pxa2xx_spi_unprepare_transfer(struct spi_master *master) | ||
810 | { | ||
811 | struct driver_data *drv_data = spi_master_get_devdata(master); | ||
812 | |||
813 | /* Disable the SSP now */ | ||
814 | write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE, | ||
815 | drv_data->ioaddr); | ||
816 | |||
817 | pm_runtime_mark_last_busy(&drv_data->pdev->dev); | ||
818 | pm_runtime_put_autosuspend(&drv_data->pdev->dev); | ||
819 | return 0; | ||
820 | } | ||
821 | |||
1180 | static int setup_cs(struct spi_device *spi, struct chip_data *chip, | 822 | static int setup_cs(struct spi_device *spi, struct chip_data *chip, |
1181 | struct pxa2xx_spi_chip *chip_info) | 823 | struct pxa2xx_spi_chip *chip_info) |
1182 | { | 824 | { |
@@ -1221,8 +863,17 @@ static int setup(struct spi_device *spi) | |||
1221 | struct chip_data *chip; | 863 | struct chip_data *chip; |
1222 | struct driver_data *drv_data = spi_master_get_devdata(spi->master); | 864 | struct driver_data *drv_data = spi_master_get_devdata(spi->master); |
1223 | unsigned int clk_div; | 865 | unsigned int clk_div; |
1224 | uint tx_thres = TX_THRESH_DFLT; | 866 | uint tx_thres, tx_hi_thres, rx_thres; |
1225 | uint rx_thres = RX_THRESH_DFLT; | 867 | |
868 | if (is_lpss_ssp(drv_data)) { | ||
869 | tx_thres = LPSS_TX_LOTHRESH_DFLT; | ||
870 | tx_hi_thres = LPSS_TX_HITHRESH_DFLT; | ||
871 | rx_thres = LPSS_RX_THRESH_DFLT; | ||
872 | } else { | ||
873 | tx_thres = TX_THRESH_DFLT; | ||
874 | tx_hi_thres = 0; | ||
875 | rx_thres = RX_THRESH_DFLT; | ||
876 | } | ||
1226 | 877 | ||
1227 | if (!pxa25x_ssp_comp(drv_data) | 878 | if (!pxa25x_ssp_comp(drv_data) |
1228 | && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { | 879 | && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { |
@@ -1262,8 +913,6 @@ static int setup(struct spi_device *spi) | |||
1262 | chip->gpio_cs = -1; | 913 | chip->gpio_cs = -1; |
1263 | chip->enable_dma = 0; | 914 | chip->enable_dma = 0; |
1264 | chip->timeout = TIMOUT_DFLT; | 915 | chip->timeout = TIMOUT_DFLT; |
1265 | chip->dma_burst_size = drv_data->master_info->enable_dma ? | ||
1266 | DCMD_BURST8 : 0; | ||
1267 | } | 916 | } |
1268 | 917 | ||
1269 | /* protocol drivers may change the chip settings, so... | 918 | /* protocol drivers may change the chip settings, so... |
@@ -1277,23 +926,37 @@ static int setup(struct spi_device *spi) | |||
1277 | chip->timeout = chip_info->timeout; | 926 | chip->timeout = chip_info->timeout; |
1278 | if (chip_info->tx_threshold) | 927 | if (chip_info->tx_threshold) |
1279 | tx_thres = chip_info->tx_threshold; | 928 | tx_thres = chip_info->tx_threshold; |
929 | if (chip_info->tx_hi_threshold) | ||
930 | tx_hi_thres = chip_info->tx_hi_threshold; | ||
1280 | if (chip_info->rx_threshold) | 931 | if (chip_info->rx_threshold) |
1281 | rx_thres = chip_info->rx_threshold; | 932 | rx_thres = chip_info->rx_threshold; |
1282 | chip->enable_dma = drv_data->master_info->enable_dma; | 933 | chip->enable_dma = drv_data->master_info->enable_dma; |
1283 | chip->dma_threshold = 0; | 934 | chip->dma_threshold = 0; |
1284 | if (chip_info->enable_loopback) | 935 | if (chip_info->enable_loopback) |
1285 | chip->cr1 = SSCR1_LBM; | 936 | chip->cr1 = SSCR1_LBM; |
937 | } else if (ACPI_HANDLE(&spi->dev)) { | ||
938 | /* | ||
939 | * Slave devices enumerated from ACPI namespace don't | ||
940 | * usually have chip_info but we still might want to use | ||
941 | * DMA with them. | ||
942 | */ | ||
943 | chip->enable_dma = drv_data->master_info->enable_dma; | ||
1286 | } | 944 | } |
1287 | 945 | ||
1288 | chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) | | 946 | chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) | |
1289 | (SSCR1_TxTresh(tx_thres) & SSCR1_TFT); | 947 | (SSCR1_TxTresh(tx_thres) & SSCR1_TFT); |
1290 | 948 | ||
949 | chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres); | ||
950 | chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres) | ||
951 | | SSITF_TxHiThresh(tx_hi_thres); | ||
952 | |||
1291 | /* set dma burst and threshold outside of chip_info path so that if | 953 | /* set dma burst and threshold outside of chip_info path so that if |
1292 | * chip_info goes away after setting chip->enable_dma, the | 954 | * chip_info goes away after setting chip->enable_dma, the |
1293 | * burst and threshold can still respond to changes in bits_per_word */ | 955 | * burst and threshold can still respond to changes in bits_per_word */ |
1294 | if (chip->enable_dma) { | 956 | if (chip->enable_dma) { |
1295 | /* set up legal burst and threshold for dma */ | 957 | /* set up legal burst and threshold for dma */ |
1296 | if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word, | 958 | if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi, |
959 | spi->bits_per_word, | ||
1297 | &chip->dma_burst_size, | 960 | &chip->dma_burst_size, |
1298 | &chip->dma_threshold)) { | 961 | &chip->dma_threshold)) { |
1299 | dev_warn(&spi->dev, "in setup: DMA burst size reduced " | 962 | dev_warn(&spi->dev, "in setup: DMA burst size reduced " |
@@ -1314,6 +977,9 @@ static int setup(struct spi_device *spi) | |||
1314 | chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0) | 977 | chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0) |
1315 | | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); | 978 | | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); |
1316 | 979 | ||
980 | if (spi->mode & SPI_LOOP) | ||
981 | chip->cr1 |= SSCR1_LBM; | ||
982 | |||
1317 | /* NOTE: PXA25x_SSP _could_ use external clocking ... */ | 983 | /* NOTE: PXA25x_SSP _could_ use external clocking ... */ |
1318 | if (!pxa25x_ssp_comp(drv_data)) | 984 | if (!pxa25x_ssp_comp(drv_data)) |
1319 | dev_dbg(&spi->dev, "%ld Hz actual, %s\n", | 985 | dev_dbg(&spi->dev, "%ld Hz actual, %s\n", |
@@ -1328,18 +994,15 @@ static int setup(struct spi_device *spi) | |||
1328 | 994 | ||
1329 | if (spi->bits_per_word <= 8) { | 995 | if (spi->bits_per_word <= 8) { |
1330 | chip->n_bytes = 1; | 996 | chip->n_bytes = 1; |
1331 | chip->dma_width = DCMD_WIDTH1; | ||
1332 | chip->read = u8_reader; | 997 | chip->read = u8_reader; |
1333 | chip->write = u8_writer; | 998 | chip->write = u8_writer; |
1334 | } else if (spi->bits_per_word <= 16) { | 999 | } else if (spi->bits_per_word <= 16) { |
1335 | chip->n_bytes = 2; | 1000 | chip->n_bytes = 2; |
1336 | chip->dma_width = DCMD_WIDTH2; | ||
1337 | chip->read = u16_reader; | 1001 | chip->read = u16_reader; |
1338 | chip->write = u16_writer; | 1002 | chip->write = u16_writer; |
1339 | } else if (spi->bits_per_word <= 32) { | 1003 | } else if (spi->bits_per_word <= 32) { |
1340 | chip->cr0 |= SSCR0_EDSS; | 1004 | chip->cr0 |= SSCR0_EDSS; |
1341 | chip->n_bytes = 4; | 1005 | chip->n_bytes = 4; |
1342 | chip->dma_width = DCMD_WIDTH4; | ||
1343 | chip->read = u32_reader; | 1006 | chip->read = u32_reader; |
1344 | chip->write = u32_writer; | 1007 | chip->write = u32_writer; |
1345 | } else { | 1008 | } else { |
@@ -1370,6 +1033,99 @@ static void cleanup(struct spi_device *spi) | |||
1370 | kfree(chip); | 1033 | kfree(chip); |
1371 | } | 1034 | } |
1372 | 1035 | ||
1036 | #ifdef CONFIG_ACPI | ||
1037 | static int pxa2xx_spi_acpi_add_dma(struct acpi_resource *res, void *data) | ||
1038 | { | ||
1039 | struct pxa2xx_spi_master *pdata = data; | ||
1040 | |||
1041 | if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) { | ||
1042 | const struct acpi_resource_fixed_dma *dma; | ||
1043 | |||
1044 | dma = &res->data.fixed_dma; | ||
1045 | if (pdata->tx_slave_id < 0) { | ||
1046 | pdata->tx_slave_id = dma->request_lines; | ||
1047 | pdata->tx_chan_id = dma->channels; | ||
1048 | } else if (pdata->rx_slave_id < 0) { | ||
1049 | pdata->rx_slave_id = dma->request_lines; | ||
1050 | pdata->rx_chan_id = dma->channels; | ||
1051 | } | ||
1052 | } | ||
1053 | |||
1054 | /* Tell the ACPI core to skip this resource */ | ||
1055 | return 1; | ||
1056 | } | ||
1057 | |||
1058 | static struct pxa2xx_spi_master * | ||
1059 | pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) | ||
1060 | { | ||
1061 | struct pxa2xx_spi_master *pdata; | ||
1062 | struct list_head resource_list; | ||
1063 | struct acpi_device *adev; | ||
1064 | struct ssp_device *ssp; | ||
1065 | struct resource *res; | ||
1066 | int devid; | ||
1067 | |||
1068 | if (!ACPI_HANDLE(&pdev->dev) || | ||
1069 | acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) | ||
1070 | return NULL; | ||
1071 | |||
1072 | pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL); | ||
1073 | if (!pdata) { | ||
1074 | dev_err(&pdev->dev, | ||
1075 | "failed to allocate memory for platform data\n"); | ||
1076 | return NULL; | ||
1077 | } | ||
1078 | |||
1079 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1080 | if (!res) | ||
1081 | return NULL; | ||
1082 | |||
1083 | ssp = &pdata->ssp; | ||
1084 | |||
1085 | ssp->phys_base = res->start; | ||
1086 | ssp->mmio_base = devm_request_and_ioremap(&pdev->dev, res); | ||
1087 | if (!ssp->mmio_base) { | ||
1088 | dev_err(&pdev->dev, "failed to ioremap mmio_base\n"); | ||
1089 | return NULL; | ||
1090 | } | ||
1091 | |||
1092 | ssp->clk = devm_clk_get(&pdev->dev, NULL); | ||
1093 | ssp->irq = platform_get_irq(pdev, 0); | ||
1094 | ssp->type = LPSS_SSP; | ||
1095 | ssp->pdev = pdev; | ||
1096 | |||
1097 | ssp->port_id = -1; | ||
1098 | if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &devid)) | ||
1099 | ssp->port_id = devid; | ||
1100 | |||
1101 | pdata->num_chipselect = 1; | ||
1102 | pdata->rx_slave_id = -1; | ||
1103 | pdata->tx_slave_id = -1; | ||
1104 | |||
1105 | INIT_LIST_HEAD(&resource_list); | ||
1106 | acpi_dev_get_resources(adev, &resource_list, pxa2xx_spi_acpi_add_dma, | ||
1107 | pdata); | ||
1108 | acpi_dev_free_resource_list(&resource_list); | ||
1109 | |||
1110 | pdata->enable_dma = pdata->rx_slave_id >= 0 && pdata->tx_slave_id >= 0; | ||
1111 | |||
1112 | return pdata; | ||
1113 | } | ||
1114 | |||
1115 | static struct acpi_device_id pxa2xx_spi_acpi_match[] = { | ||
1116 | { "INT33C0", 0 }, | ||
1117 | { "INT33C1", 0 }, | ||
1118 | { }, | ||
1119 | }; | ||
1120 | MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match); | ||
1121 | #else | ||
1122 | static inline struct pxa2xx_spi_master * | ||
1123 | pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) | ||
1124 | { | ||
1125 | return NULL; | ||
1126 | } | ||
1127 | #endif | ||
1128 | |||
1373 | static int pxa2xx_spi_probe(struct platform_device *pdev) | 1129 | static int pxa2xx_spi_probe(struct platform_device *pdev) |
1374 | { | 1130 | { |
1375 | struct device *dev = &pdev->dev; | 1131 | struct device *dev = &pdev->dev; |
@@ -1381,8 +1137,11 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1381 | 1137 | ||
1382 | platform_info = dev_get_platdata(dev); | 1138 | platform_info = dev_get_platdata(dev); |
1383 | if (!platform_info) { | 1139 | if (!platform_info) { |
1384 | dev_err(&pdev->dev, "missing platform data\n"); | 1140 | platform_info = pxa2xx_spi_acpi_get_pdata(pdev); |
1385 | return -ENODEV; | 1141 | if (!platform_info) { |
1142 | dev_err(&pdev->dev, "missing platform data\n"); | ||
1143 | return -ENODEV; | ||
1144 | } | ||
1386 | } | 1145 | } |
1387 | 1146 | ||
1388 | ssp = pxa_ssp_request(pdev->id, pdev->name); | 1147 | ssp = pxa_ssp_request(pdev->id, pdev->name); |
@@ -1409,8 +1168,9 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1409 | 1168 | ||
1410 | master->dev.parent = &pdev->dev; | 1169 | master->dev.parent = &pdev->dev; |
1411 | master->dev.of_node = pdev->dev.of_node; | 1170 | master->dev.of_node = pdev->dev.of_node; |
1171 | ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev)); | ||
1412 | /* the spi->mode bits understood by this driver: */ | 1172 | /* the spi->mode bits understood by this driver: */ |
1413 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | 1173 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; |
1414 | 1174 | ||
1415 | master->bus_num = ssp->port_id; | 1175 | master->bus_num = ssp->port_id; |
1416 | master->num_chipselect = platform_info->num_chipselect; | 1176 | master->num_chipselect = platform_info->num_chipselect; |
@@ -1418,6 +1178,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1418 | master->cleanup = cleanup; | 1178 | master->cleanup = cleanup; |
1419 | master->setup = setup; | 1179 | master->setup = setup; |
1420 | master->transfer_one_message = pxa2xx_spi_transfer_one_message; | 1180 | master->transfer_one_message = pxa2xx_spi_transfer_one_message; |
1181 | master->prepare_transfer_hardware = pxa2xx_spi_prepare_transfer; | ||
1182 | master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer; | ||
1421 | 1183 | ||
1422 | drv_data->ssp_type = ssp->type; | 1184 | drv_data->ssp_type = ssp->type; |
1423 | drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT); | 1185 | drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT); |
@@ -1431,7 +1193,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1431 | drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; | 1193 | drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; |
1432 | } else { | 1194 | } else { |
1433 | drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; | 1195 | drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; |
1434 | drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE; | 1196 | drv_data->dma_cr1 = DEFAULT_DMA_CR1; |
1435 | drv_data->clear_sr = SSSR_ROR | SSSR_TINT; | 1197 | drv_data->clear_sr = SSSR_ROR | SSSR_TINT; |
1436 | drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; | 1198 | drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; |
1437 | } | 1199 | } |
@@ -1447,31 +1209,11 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1447 | drv_data->tx_channel = -1; | 1209 | drv_data->tx_channel = -1; |
1448 | drv_data->rx_channel = -1; | 1210 | drv_data->rx_channel = -1; |
1449 | if (platform_info->enable_dma) { | 1211 | if (platform_info->enable_dma) { |
1450 | 1212 | status = pxa2xx_spi_dma_setup(drv_data); | |
1451 | /* Get two DMA channels (rx and tx) */ | 1213 | if (status) { |
1452 | drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", | 1214 | dev_warn(dev, "failed to setup DMA, using PIO\n"); |
1453 | DMA_PRIO_HIGH, | 1215 | platform_info->enable_dma = false; |
1454 | dma_handler, | ||
1455 | drv_data); | ||
1456 | if (drv_data->rx_channel < 0) { | ||
1457 | dev_err(dev, "problem (%d) requesting rx channel\n", | ||
1458 | drv_data->rx_channel); | ||
1459 | status = -ENODEV; | ||
1460 | goto out_error_irq_alloc; | ||
1461 | } | ||
1462 | drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", | ||
1463 | DMA_PRIO_MEDIUM, | ||
1464 | dma_handler, | ||
1465 | drv_data); | ||
1466 | if (drv_data->tx_channel < 0) { | ||
1467 | dev_err(dev, "problem (%d) requesting tx channel\n", | ||
1468 | drv_data->tx_channel); | ||
1469 | status = -ENODEV; | ||
1470 | goto out_error_dma_alloc; | ||
1471 | } | 1216 | } |
1472 | |||
1473 | DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel; | ||
1474 | DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel; | ||
1475 | } | 1217 | } |
1476 | 1218 | ||
1477 | /* Enable SOC clock */ | 1219 | /* Enable SOC clock */ |
@@ -1492,6 +1234,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1492 | write_SSTO(0, drv_data->ioaddr); | 1234 | write_SSTO(0, drv_data->ioaddr); |
1493 | write_SSPSP(0, drv_data->ioaddr); | 1235 | write_SSPSP(0, drv_data->ioaddr); |
1494 | 1236 | ||
1237 | lpss_ssp_setup(drv_data); | ||
1238 | |||
1495 | tasklet_init(&drv_data->pump_transfers, pump_transfers, | 1239 | tasklet_init(&drv_data->pump_transfers, pump_transfers, |
1496 | (unsigned long)drv_data); | 1240 | (unsigned long)drv_data); |
1497 | 1241 | ||
@@ -1503,18 +1247,16 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1503 | goto out_error_clock_enabled; | 1247 | goto out_error_clock_enabled; |
1504 | } | 1248 | } |
1505 | 1249 | ||
1250 | pm_runtime_set_autosuspend_delay(&pdev->dev, 50); | ||
1251 | pm_runtime_use_autosuspend(&pdev->dev); | ||
1252 | pm_runtime_set_active(&pdev->dev); | ||
1253 | pm_runtime_enable(&pdev->dev); | ||
1254 | |||
1506 | return status; | 1255 | return status; |
1507 | 1256 | ||
1508 | out_error_clock_enabled: | 1257 | out_error_clock_enabled: |
1509 | clk_disable_unprepare(ssp->clk); | 1258 | clk_disable_unprepare(ssp->clk); |
1510 | 1259 | pxa2xx_spi_dma_release(drv_data); | |
1511 | out_error_dma_alloc: | ||
1512 | if (drv_data->tx_channel != -1) | ||
1513 | pxa_free_dma(drv_data->tx_channel); | ||
1514 | if (drv_data->rx_channel != -1) | ||
1515 | pxa_free_dma(drv_data->rx_channel); | ||
1516 | |||
1517 | out_error_irq_alloc: | ||
1518 | free_irq(ssp->irq, drv_data); | 1260 | free_irq(ssp->irq, drv_data); |
1519 | 1261 | ||
1520 | out_error_master_alloc: | 1262 | out_error_master_alloc: |
@@ -1532,17 +1274,18 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) | |||
1532 | return 0; | 1274 | return 0; |
1533 | ssp = drv_data->ssp; | 1275 | ssp = drv_data->ssp; |
1534 | 1276 | ||
1277 | pm_runtime_get_sync(&pdev->dev); | ||
1278 | |||
1535 | /* Disable the SSP at the peripheral and SOC level */ | 1279 | /* Disable the SSP at the peripheral and SOC level */ |
1536 | write_SSCR0(0, drv_data->ioaddr); | 1280 | write_SSCR0(0, drv_data->ioaddr); |
1537 | clk_disable_unprepare(ssp->clk); | 1281 | clk_disable_unprepare(ssp->clk); |
1538 | 1282 | ||
1539 | /* Release DMA */ | 1283 | /* Release DMA */ |
1540 | if (drv_data->master_info->enable_dma) { | 1284 | if (drv_data->master_info->enable_dma) |
1541 | DRCMR(ssp->drcmr_rx) = 0; | 1285 | pxa2xx_spi_dma_release(drv_data); |
1542 | DRCMR(ssp->drcmr_tx) = 0; | 1286 | |
1543 | pxa_free_dma(drv_data->tx_channel); | 1287 | pm_runtime_put_noidle(&pdev->dev); |
1544 | pxa_free_dma(drv_data->rx_channel); | 1288 | pm_runtime_disable(&pdev->dev); |
1545 | } | ||
1546 | 1289 | ||
1547 | /* Release IRQ */ | 1290 | /* Release IRQ */ |
1548 | free_irq(ssp->irq, drv_data); | 1291 | free_irq(ssp->irq, drv_data); |
@@ -1589,12 +1332,7 @@ static int pxa2xx_spi_resume(struct device *dev) | |||
1589 | struct ssp_device *ssp = drv_data->ssp; | 1332 | struct ssp_device *ssp = drv_data->ssp; |
1590 | int status = 0; | 1333 | int status = 0; |
1591 | 1334 | ||
1592 | if (drv_data->rx_channel != -1) | 1335 | pxa2xx_spi_dma_resume(drv_data); |
1593 | DRCMR(drv_data->ssp->drcmr_rx) = | ||
1594 | DRCMR_MAPVLD | drv_data->rx_channel; | ||
1595 | if (drv_data->tx_channel != -1) | ||
1596 | DRCMR(drv_data->ssp->drcmr_tx) = | ||
1597 | DRCMR_MAPVLD | drv_data->tx_channel; | ||
1598 | 1336 | ||
1599 | /* Enable the SSP clock */ | 1337 | /* Enable the SSP clock */ |
1600 | clk_prepare_enable(ssp->clk); | 1338 | clk_prepare_enable(ssp->clk); |
@@ -1608,20 +1346,38 @@ static int pxa2xx_spi_resume(struct device *dev) | |||
1608 | 1346 | ||
1609 | return 0; | 1347 | return 0; |
1610 | } | 1348 | } |
1349 | #endif | ||
1350 | |||
1351 | #ifdef CONFIG_PM_RUNTIME | ||
1352 | static int pxa2xx_spi_runtime_suspend(struct device *dev) | ||
1353 | { | ||
1354 | struct driver_data *drv_data = dev_get_drvdata(dev); | ||
1355 | |||
1356 | clk_disable_unprepare(drv_data->ssp->clk); | ||
1357 | return 0; | ||
1358 | } | ||
1359 | |||
1360 | static int pxa2xx_spi_runtime_resume(struct device *dev) | ||
1361 | { | ||
1362 | struct driver_data *drv_data = dev_get_drvdata(dev); | ||
1363 | |||
1364 | clk_prepare_enable(drv_data->ssp->clk); | ||
1365 | return 0; | ||
1366 | } | ||
1367 | #endif | ||
1611 | 1368 | ||
1612 | static const struct dev_pm_ops pxa2xx_spi_pm_ops = { | 1369 | static const struct dev_pm_ops pxa2xx_spi_pm_ops = { |
1613 | .suspend = pxa2xx_spi_suspend, | 1370 | SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume) |
1614 | .resume = pxa2xx_spi_resume, | 1371 | SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend, |
1372 | pxa2xx_spi_runtime_resume, NULL) | ||
1615 | }; | 1373 | }; |
1616 | #endif | ||
1617 | 1374 | ||
1618 | static struct platform_driver driver = { | 1375 | static struct platform_driver driver = { |
1619 | .driver = { | 1376 | .driver = { |
1620 | .name = "pxa2xx-spi", | 1377 | .name = "pxa2xx-spi", |
1621 | .owner = THIS_MODULE, | 1378 | .owner = THIS_MODULE, |
1622 | #ifdef CONFIG_PM | ||
1623 | .pm = &pxa2xx_spi_pm_ops, | 1379 | .pm = &pxa2xx_spi_pm_ops, |
1624 | #endif | 1380 | .acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match), |
1625 | }, | 1381 | }, |
1626 | .probe = pxa2xx_spi_probe, | 1382 | .probe = pxa2xx_spi_probe, |
1627 | .remove = pxa2xx_spi_remove, | 1383 | .remove = pxa2xx_spi_remove, |
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h new file mode 100644 index 000000000000..5adc2a11c7bc --- /dev/null +++ b/drivers/spi/spi-pxa2xx.h | |||
@@ -0,0 +1,221 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs | ||
3 | * Copyright (C) 2013, Intel Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #ifndef SPI_PXA2XX_H | ||
11 | #define SPI_PXA2XX_H | ||
12 | |||
13 | #include <linux/atomic.h> | ||
14 | #include <linux/dmaengine.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/io.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/pxa2xx_ssp.h> | ||
20 | #include <linux/scatterlist.h> | ||
21 | #include <linux/sizes.h> | ||
22 | #include <linux/spi/spi.h> | ||
23 | #include <linux/spi/pxa2xx_spi.h> | ||
24 | |||
25 | struct driver_data { | ||
26 | /* Driver model hookup */ | ||
27 | struct platform_device *pdev; | ||
28 | |||
29 | /* SSP Info */ | ||
30 | struct ssp_device *ssp; | ||
31 | |||
32 | /* SPI framework hookup */ | ||
33 | enum pxa_ssp_type ssp_type; | ||
34 | struct spi_master *master; | ||
35 | |||
36 | /* PXA hookup */ | ||
37 | struct pxa2xx_spi_master *master_info; | ||
38 | |||
39 | /* PXA private DMA setup stuff */ | ||
40 | int rx_channel; | ||
41 | int tx_channel; | ||
42 | u32 *null_dma_buf; | ||
43 | |||
44 | /* SSP register addresses */ | ||
45 | void __iomem *ioaddr; | ||
46 | u32 ssdr_physical; | ||
47 | |||
48 | /* SSP masks*/ | ||
49 | u32 dma_cr1; | ||
50 | u32 int_cr1; | ||
51 | u32 clear_sr; | ||
52 | u32 mask_sr; | ||
53 | |||
54 | /* Maximun clock rate */ | ||
55 | unsigned long max_clk_rate; | ||
56 | |||
57 | /* Message Transfer pump */ | ||
58 | struct tasklet_struct pump_transfers; | ||
59 | |||
60 | /* DMA engine support */ | ||
61 | struct dma_chan *rx_chan; | ||
62 | struct dma_chan *tx_chan; | ||
63 | struct sg_table rx_sgt; | ||
64 | struct sg_table tx_sgt; | ||
65 | int rx_nents; | ||
66 | int tx_nents; | ||
67 | void *dummy; | ||
68 | atomic_t dma_running; | ||
69 | |||
70 | /* Current message transfer state info */ | ||
71 | struct spi_message *cur_msg; | ||
72 | struct spi_transfer *cur_transfer; | ||
73 | struct chip_data *cur_chip; | ||
74 | size_t len; | ||
75 | void *tx; | ||
76 | void *tx_end; | ||
77 | void *rx; | ||
78 | void *rx_end; | ||
79 | int dma_mapped; | ||
80 | dma_addr_t rx_dma; | ||
81 | dma_addr_t tx_dma; | ||
82 | size_t rx_map_len; | ||
83 | size_t tx_map_len; | ||
84 | u8 n_bytes; | ||
85 | int (*write)(struct driver_data *drv_data); | ||
86 | int (*read)(struct driver_data *drv_data); | ||
87 | irqreturn_t (*transfer_handler)(struct driver_data *drv_data); | ||
88 | void (*cs_control)(u32 command); | ||
89 | |||
90 | void __iomem *lpss_base; | ||
91 | }; | ||
92 | |||
93 | struct chip_data { | ||
94 | u32 cr0; | ||
95 | u32 cr1; | ||
96 | u32 psp; | ||
97 | u32 timeout; | ||
98 | u8 n_bytes; | ||
99 | u32 dma_burst_size; | ||
100 | u32 threshold; | ||
101 | u32 dma_threshold; | ||
102 | u16 lpss_rx_threshold; | ||
103 | u16 lpss_tx_threshold; | ||
104 | u8 enable_dma; | ||
105 | u8 bits_per_word; | ||
106 | u32 speed_hz; | ||
107 | union { | ||
108 | int gpio_cs; | ||
109 | unsigned int frm; | ||
110 | }; | ||
111 | int gpio_cs_inverted; | ||
112 | int (*write)(struct driver_data *drv_data); | ||
113 | int (*read)(struct driver_data *drv_data); | ||
114 | void (*cs_control)(u32 command); | ||
115 | }; | ||
116 | |||
117 | #define DEFINE_SSP_REG(reg, off) \ | ||
118 | static inline u32 read_##reg(void const __iomem *p) \ | ||
119 | { return __raw_readl(p + (off)); } \ | ||
120 | \ | ||
121 | static inline void write_##reg(u32 v, void __iomem *p) \ | ||
122 | { __raw_writel(v, p + (off)); } | ||
123 | |||
124 | DEFINE_SSP_REG(SSCR0, 0x00) | ||
125 | DEFINE_SSP_REG(SSCR1, 0x04) | ||
126 | DEFINE_SSP_REG(SSSR, 0x08) | ||
127 | DEFINE_SSP_REG(SSITR, 0x0c) | ||
128 | DEFINE_SSP_REG(SSDR, 0x10) | ||
129 | DEFINE_SSP_REG(SSTO, 0x28) | ||
130 | DEFINE_SSP_REG(SSPSP, 0x2c) | ||
131 | DEFINE_SSP_REG(SSITF, SSITF) | ||
132 | DEFINE_SSP_REG(SSIRF, SSIRF) | ||
133 | |||
134 | #define START_STATE ((void *)0) | ||
135 | #define RUNNING_STATE ((void *)1) | ||
136 | #define DONE_STATE ((void *)2) | ||
137 | #define ERROR_STATE ((void *)-1) | ||
138 | |||
139 | #define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT) | ||
140 | #define DMA_ALIGNMENT 8 | ||
141 | |||
142 | static inline int pxa25x_ssp_comp(struct driver_data *drv_data) | ||
143 | { | ||
144 | if (drv_data->ssp_type == PXA25x_SSP) | ||
145 | return 1; | ||
146 | if (drv_data->ssp_type == CE4100_SSP) | ||
147 | return 1; | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val) | ||
152 | { | ||
153 | void __iomem *reg = drv_data->ioaddr; | ||
154 | |||
155 | if (drv_data->ssp_type == CE4100_SSP) | ||
156 | val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; | ||
157 | |||
158 | write_SSSR(val, reg); | ||
159 | } | ||
160 | |||
161 | extern int pxa2xx_spi_flush(struct driver_data *drv_data); | ||
162 | extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data); | ||
163 | |||
164 | /* | ||
165 | * Select the right DMA implementation. | ||
166 | */ | ||
167 | #if defined(CONFIG_SPI_PXA2XX_PXADMA) | ||
168 | #define SPI_PXA2XX_USE_DMA 1 | ||
169 | #define MAX_DMA_LEN 8191 | ||
170 | #define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE) | ||
171 | #elif defined(CONFIG_SPI_PXA2XX_DMA) | ||
172 | #define SPI_PXA2XX_USE_DMA 1 | ||
173 | #define MAX_DMA_LEN SZ_64K | ||
174 | #define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL) | ||
175 | #else | ||
176 | #undef SPI_PXA2XX_USE_DMA | ||
177 | #define MAX_DMA_LEN 0 | ||
178 | #define DEFAULT_DMA_CR1 0 | ||
179 | #endif | ||
180 | |||
181 | #ifdef SPI_PXA2XX_USE_DMA | ||
182 | extern bool pxa2xx_spi_dma_is_possible(size_t len); | ||
183 | extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data); | ||
184 | extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data); | ||
185 | extern int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst); | ||
186 | extern void pxa2xx_spi_dma_start(struct driver_data *drv_data); | ||
187 | extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data); | ||
188 | extern void pxa2xx_spi_dma_release(struct driver_data *drv_data); | ||
189 | extern void pxa2xx_spi_dma_resume(struct driver_data *drv_data); | ||
190 | extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, | ||
191 | struct spi_device *spi, | ||
192 | u8 bits_per_word, | ||
193 | u32 *burst_code, | ||
194 | u32 *threshold); | ||
195 | #else | ||
196 | static inline bool pxa2xx_spi_dma_is_possible(size_t len) { return false; } | ||
197 | static inline int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data) | ||
198 | { | ||
199 | return 0; | ||
200 | } | ||
201 | #define pxa2xx_spi_dma_transfer NULL | ||
202 | static inline void pxa2xx_spi_dma_prepare(struct driver_data *drv_data, | ||
203 | u32 dma_burst) {} | ||
204 | static inline void pxa2xx_spi_dma_start(struct driver_data *drv_data) {} | ||
205 | static inline int pxa2xx_spi_dma_setup(struct driver_data *drv_data) | ||
206 | { | ||
207 | return 0; | ||
208 | } | ||
209 | static inline void pxa2xx_spi_dma_release(struct driver_data *drv_data) {} | ||
210 | static inline void pxa2xx_spi_dma_resume(struct driver_data *drv_data) {} | ||
211 | static inline int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, | ||
212 | struct spi_device *spi, | ||
213 | u8 bits_per_word, | ||
214 | u32 *burst_code, | ||
215 | u32 *threshold) | ||
216 | { | ||
217 | return -ENODEV; | ||
218 | } | ||
219 | #endif | ||
220 | |||
221 | #endif /* SPI_PXA2XX_H */ | ||