aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2013-01-22 05:26:28 -0500
committerMark Brown <broonie@opensource.wolfsonmicro.com>2013-02-08 07:15:21 -0500
commitcd7bed00340475ee72a013a070e200e065085ef3 (patch)
tree98f3cd20ecfaf090a772d0c0b185df7260a8336e
parentd560040f7d6fbe0a2990b8f6edca1815e19e72f5 (diff)
spi/pxa2xx: break out the private DMA API usage into a separate file
The PXA SPI driver uses PXA platform specific private DMA implementation which does not work on non-PXA platforms. In order to use this driver on other platforms we break out the private DMA implementation into a separate file that gets compiled only when CONFIG_SPI_PXA2XX_PXADMA is set. The DMA functions are stubbed out if there is no DMA implementation selected (i.e we are building on non-PXA platform). While we are there we can kill the dummy DMA bits in pxa2xx_spi.h as they are not needed anymore for CE4100. Once this is done we can add the generic DMA engine support to the driver that allows usage of any DMA controller that implements DMA engine API. Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Acked-by: Linus Walleij <linus.walleij@linaro.org> Tested-by: Lu Cao <lucao@marvell.com> Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
-rw-r--r--drivers/spi/Kconfig6
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/spi-pxa2xx-pxadma.c490
-rw-r--r--drivers/spi/spi-pxa2xx.c595
-rw-r--r--drivers/spi/spi-pxa2xx.h185
-rw-r--r--include/linux/spi/pxa2xx_spi.h80
6 files changed, 712 insertions, 648 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a90393d7f106..f1878666e917 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -297,6 +297,12 @@ config SPI_PPC4xx
297 help 297 help
298 This selects a driver for the PPC4xx SPI Controller. 298 This selects a driver for the PPC4xx SPI Controller.
299 299
300config SPI_PXA2XX_PXADMA
301 bool "PXA2xx SSP legacy PXA DMA API support"
302 depends on SPI_PXA2XX && ARCH_PXA
303 help
304 Enable PXA private legacy DMA API support.
305
300config SPI_PXA2XX 306config SPI_PXA2XX
301 tristate "PXA2xx SSP SPI master" 307 tristate "PXA2xx SSP SPI master"
302 depends on ARCH_PXA || PCI 308 depends on ARCH_PXA || PCI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 64e970ba261c..2e3cdd3caba9 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -47,7 +47,9 @@ obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
47obj-$(CONFIG_SPI_ORION) += spi-orion.o 47obj-$(CONFIG_SPI_ORION) += spi-orion.o
48obj-$(CONFIG_SPI_PL022) += spi-pl022.o 48obj-$(CONFIG_SPI_PL022) += spi-pl022.o
49obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o 49obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
50obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx.o 50spi-pxa2xx-platform-objs := spi-pxa2xx.o
51spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_PXADMA) += spi-pxa2xx-pxadma.o
52obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
51obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o 53obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
52obj-$(CONFIG_SPI_RSPI) += spi-rspi.o 54obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
53obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o 55obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c
new file mode 100644
index 000000000000..2916efc7cfe5
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx-pxadma.c
@@ -0,0 +1,490 @@
1/*
2 * PXA2xx SPI private DMA support.
3 *
4 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/dma-mapping.h>
25#include <linux/pxa2xx_ssp.h>
26#include <linux/spi/spi.h>
27#include <linux/spi/pxa2xx_spi.h>
28
29#include "spi-pxa2xx.h"
30
31#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
32#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
33
34bool pxa2xx_spi_dma_is_possible(size_t len)
35{
36 /* Try to map dma buffer and do a dma transfer if successful, but
37 * only if the length is non-zero and less than MAX_DMA_LEN.
38 *
39 * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
40 * of PIO instead. Care is needed above because the transfer may
41 * have have been passed with buffers that are already dma mapped.
42 * A zero-length transfer in PIO mode will not try to write/read
43 * to/from the buffers
44 *
45 * REVISIT large transfers are exactly where we most want to be
46 * using DMA. If this happens much, split those transfers into
47 * multiple DMA segments rather than forcing PIO.
48 */
49 return len > 0 && len <= MAX_DMA_LEN;
50}
51
52int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
53{
54 struct spi_message *msg = drv_data->cur_msg;
55 struct device *dev = &msg->spi->dev;
56
57 if (!drv_data->cur_chip->enable_dma)
58 return 0;
59
60 if (msg->is_dma_mapped)
61 return drv_data->rx_dma && drv_data->tx_dma;
62
63 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
64 return 0;
65
66 /* Modify setup if rx buffer is null */
67 if (drv_data->rx == NULL) {
68 *drv_data->null_dma_buf = 0;
69 drv_data->rx = drv_data->null_dma_buf;
70 drv_data->rx_map_len = 4;
71 } else
72 drv_data->rx_map_len = drv_data->len;
73
74
75 /* Modify setup if tx buffer is null */
76 if (drv_data->tx == NULL) {
77 *drv_data->null_dma_buf = 0;
78 drv_data->tx = drv_data->null_dma_buf;
79 drv_data->tx_map_len = 4;
80 } else
81 drv_data->tx_map_len = drv_data->len;
82
83 /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
84 * so we flush the cache *before* invalidating it, in case
85 * the tx and rx buffers overlap.
86 */
87 drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
88 drv_data->tx_map_len, DMA_TO_DEVICE);
89 if (dma_mapping_error(dev, drv_data->tx_dma))
90 return 0;
91
92 /* Stream map the rx buffer */
93 drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
94 drv_data->rx_map_len, DMA_FROM_DEVICE);
95 if (dma_mapping_error(dev, drv_data->rx_dma)) {
96 dma_unmap_single(dev, drv_data->tx_dma,
97 drv_data->tx_map_len, DMA_TO_DEVICE);
98 return 0;
99 }
100
101 return 1;
102}
103
104static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
105{
106 struct device *dev;
107
108 if (!drv_data->dma_mapped)
109 return;
110
111 if (!drv_data->cur_msg->is_dma_mapped) {
112 dev = &drv_data->cur_msg->spi->dev;
113 dma_unmap_single(dev, drv_data->rx_dma,
114 drv_data->rx_map_len, DMA_FROM_DEVICE);
115 dma_unmap_single(dev, drv_data->tx_dma,
116 drv_data->tx_map_len, DMA_TO_DEVICE);
117 }
118
119 drv_data->dma_mapped = 0;
120}
121
122static int wait_ssp_rx_stall(void const __iomem *ioaddr)
123{
124 unsigned long limit = loops_per_jiffy << 1;
125
126 while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
127 cpu_relax();
128
129 return limit;
130}
131
132static int wait_dma_channel_stop(int channel)
133{
134 unsigned long limit = loops_per_jiffy << 1;
135
136 while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
137 cpu_relax();
138
139 return limit;
140}
141
142static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
143 const char *msg)
144{
145 void __iomem *reg = drv_data->ioaddr;
146
147 /* Stop and reset */
148 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
149 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
150 write_SSSR_CS(drv_data, drv_data->clear_sr);
151 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
152 if (!pxa25x_ssp_comp(drv_data))
153 write_SSTO(0, reg);
154 pxa2xx_spi_flush(drv_data);
155 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
156
157 pxa2xx_spi_unmap_dma_buffers(drv_data);
158
159 dev_err(&drv_data->pdev->dev, "%s\n", msg);
160
161 drv_data->cur_msg->state = ERROR_STATE;
162 tasklet_schedule(&drv_data->pump_transfers);
163}
164
165static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
166{
167 void __iomem *reg = drv_data->ioaddr;
168 struct spi_message *msg = drv_data->cur_msg;
169
170 /* Clear and disable interrupts on SSP and DMA channels*/
171 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
172 write_SSSR_CS(drv_data, drv_data->clear_sr);
173 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
174 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
175
176 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
177 dev_err(&drv_data->pdev->dev,
178 "dma_handler: dma rx channel stop failed\n");
179
180 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
181 dev_err(&drv_data->pdev->dev,
182 "dma_transfer: ssp rx stall failed\n");
183
184 pxa2xx_spi_unmap_dma_buffers(drv_data);
185
186 /* update the buffer pointer for the amount completed in dma */
187 drv_data->rx += drv_data->len -
188 (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
189
190 /* read trailing data from fifo, it does not matter how many
191 * bytes are in the fifo just read until buffer is full
192 * or fifo is empty, which ever occurs first */
193 drv_data->read(drv_data);
194
195 /* return count of what was actually read */
196 msg->actual_length += drv_data->len -
197 (drv_data->rx_end - drv_data->rx);
198
199 /* Transfer delays and chip select release are
200 * handled in pump_transfers or giveback
201 */
202
203 /* Move to next transfer */
204 msg->state = pxa2xx_spi_next_transfer(drv_data);
205
206 /* Schedule transfer tasklet */
207 tasklet_schedule(&drv_data->pump_transfers);
208}
209
210void pxa2xx_spi_dma_handler(int channel, void *data)
211{
212 struct driver_data *drv_data = data;
213 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
214
215 if (irq_status & DCSR_BUSERR) {
216
217 if (channel == drv_data->tx_channel)
218 pxa2xx_spi_dma_error_stop(drv_data,
219 "dma_handler: bad bus address on tx channel");
220 else
221 pxa2xx_spi_dma_error_stop(drv_data,
222 "dma_handler: bad bus address on rx channel");
223 return;
224 }
225
226 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
227 if ((channel == drv_data->tx_channel)
228 && (irq_status & DCSR_ENDINTR)
229 && (drv_data->ssp_type == PXA25x_SSP)) {
230
231 /* Wait for rx to stall */
232 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
233 dev_err(&drv_data->pdev->dev,
234 "dma_handler: ssp rx stall failed\n");
235
236 /* finish this transfer, start the next */
237 pxa2xx_spi_dma_transfer_complete(drv_data);
238 }
239}
240
241irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
242{
243 u32 irq_status;
244 void __iomem *reg = drv_data->ioaddr;
245
246 irq_status = read_SSSR(reg) & drv_data->mask_sr;
247 if (irq_status & SSSR_ROR) {
248 pxa2xx_spi_dma_error_stop(drv_data,
249 "dma_transfer: fifo overrun");
250 return IRQ_HANDLED;
251 }
252
253 /* Check for false positive timeout */
254 if ((irq_status & SSSR_TINT)
255 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
256 write_SSSR(SSSR_TINT, reg);
257 return IRQ_HANDLED;
258 }
259
260 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
261
262 /* Clear and disable timeout interrupt, do the rest in
263 * dma_transfer_complete */
264 if (!pxa25x_ssp_comp(drv_data))
265 write_SSTO(0, reg);
266
267 /* finish this transfer, start the next */
268 pxa2xx_spi_dma_transfer_complete(drv_data);
269
270 return IRQ_HANDLED;
271 }
272
273 /* Opps problem detected */
274 return IRQ_NONE;
275}
276
277int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
278{
279 u32 dma_width;
280
281 switch (drv_data->n_bytes) {
282 case 1:
283 dma_width = DCMD_WIDTH1;
284 break;
285 case 2:
286 dma_width = DCMD_WIDTH2;
287 break;
288 default:
289 dma_width = DCMD_WIDTH4;
290 break;
291 }
292
293 /* Setup rx DMA Channel */
294 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
295 DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
296 DTADR(drv_data->rx_channel) = drv_data->rx_dma;
297 if (drv_data->rx == drv_data->null_dma_buf)
298 /* No target address increment */
299 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
300 | dma_width
301 | dma_burst
302 | drv_data->len;
303 else
304 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
305 | DCMD_FLOWSRC
306 | dma_width
307 | dma_burst
308 | drv_data->len;
309
310 /* Setup tx DMA Channel */
311 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
312 DSADR(drv_data->tx_channel) = drv_data->tx_dma;
313 DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
314 if (drv_data->tx == drv_data->null_dma_buf)
315 /* No source address increment */
316 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
317 | dma_width
318 | dma_burst
319 | drv_data->len;
320 else
321 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
322 | DCMD_FLOWTRG
323 | dma_width
324 | dma_burst
325 | drv_data->len;
326
327 /* Enable dma end irqs on SSP to detect end of transfer */
328 if (drv_data->ssp_type == PXA25x_SSP)
329 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
330
331 return 0;
332}
333
334void pxa2xx_spi_dma_start(struct driver_data *drv_data)
335{
336 DCSR(drv_data->rx_channel) |= DCSR_RUN;
337 DCSR(drv_data->tx_channel) |= DCSR_RUN;
338}
339
340int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
341{
342 struct device *dev = &drv_data->pdev->dev;
343 struct ssp_device *ssp = drv_data->ssp;
344
345 /* Get two DMA channels (rx and tx) */
346 drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
347 DMA_PRIO_HIGH,
348 pxa2xx_spi_dma_handler,
349 drv_data);
350 if (drv_data->rx_channel < 0) {
351 dev_err(dev, "problem (%d) requesting rx channel\n",
352 drv_data->rx_channel);
353 return -ENODEV;
354 }
355 drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
356 DMA_PRIO_MEDIUM,
357 pxa2xx_spi_dma_handler,
358 drv_data);
359 if (drv_data->tx_channel < 0) {
360 dev_err(dev, "problem (%d) requesting tx channel\n",
361 drv_data->tx_channel);
362 pxa_free_dma(drv_data->rx_channel);
363 return -ENODEV;
364 }
365
366 DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
367 DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
368
369 return 0;
370}
371
372void pxa2xx_spi_dma_release(struct driver_data *drv_data)
373{
374 struct ssp_device *ssp = drv_data->ssp;
375
376 DRCMR(ssp->drcmr_rx) = 0;
377 DRCMR(ssp->drcmr_tx) = 0;
378
379 if (drv_data->tx_channel != 0)
380 pxa_free_dma(drv_data->tx_channel);
381 if (drv_data->rx_channel != 0)
382 pxa_free_dma(drv_data->rx_channel);
383}
384
385void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
386{
387 if (drv_data->rx_channel != -1)
388 DRCMR(drv_data->ssp->drcmr_rx) =
389 DRCMR_MAPVLD | drv_data->rx_channel;
390 if (drv_data->tx_channel != -1)
391 DRCMR(drv_data->ssp->drcmr_tx) =
392 DRCMR_MAPVLD | drv_data->tx_channel;
393}
394
395int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
396 struct spi_device *spi,
397 u8 bits_per_word, u32 *burst_code,
398 u32 *threshold)
399{
400 struct pxa2xx_spi_chip *chip_info =
401 (struct pxa2xx_spi_chip *)spi->controller_data;
402 int bytes_per_word;
403 int burst_bytes;
404 int thresh_words;
405 int req_burst_size;
406 int retval = 0;
407
408 /* Set the threshold (in registers) to equal the same amount of data
409 * as represented by burst size (in bytes). The computation below
410 * is (burst_size rounded up to nearest 8 byte, word or long word)
411 * divided by (bytes/register); the tx threshold is the inverse of
412 * the rx, so that there will always be enough data in the rx fifo
413 * to satisfy a burst, and there will always be enough space in the
414 * tx fifo to accept a burst (a tx burst will overwrite the fifo if
415 * there is not enough space), there must always remain enough empty
416 * space in the rx fifo for any data loaded to the tx fifo.
417 * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
418 * will be 8, or half the fifo;
419 * The threshold can only be set to 2, 4 or 8, but not 16, because
420 * to burst 16 to the tx fifo, the fifo would have to be empty;
421 * however, the minimum fifo trigger level is 1, and the tx will
422 * request service when the fifo is at this level, with only 15 spaces.
423 */
424
425 /* find bytes/word */
426 if (bits_per_word <= 8)
427 bytes_per_word = 1;
428 else if (bits_per_word <= 16)
429 bytes_per_word = 2;
430 else
431 bytes_per_word = 4;
432
433 /* use struct pxa2xx_spi_chip->dma_burst_size if available */
434 if (chip_info)
435 req_burst_size = chip_info->dma_burst_size;
436 else {
437 switch (chip->dma_burst_size) {
438 default:
439 /* if the default burst size is not set,
440 * do it now */
441 chip->dma_burst_size = DCMD_BURST8;
442 case DCMD_BURST8:
443 req_burst_size = 8;
444 break;
445 case DCMD_BURST16:
446 req_burst_size = 16;
447 break;
448 case DCMD_BURST32:
449 req_burst_size = 32;
450 break;
451 }
452 }
453 if (req_burst_size <= 8) {
454 *burst_code = DCMD_BURST8;
455 burst_bytes = 8;
456 } else if (req_burst_size <= 16) {
457 if (bytes_per_word == 1) {
458 /* don't burst more than 1/2 the fifo */
459 *burst_code = DCMD_BURST8;
460 burst_bytes = 8;
461 retval = 1;
462 } else {
463 *burst_code = DCMD_BURST16;
464 burst_bytes = 16;
465 }
466 } else {
467 if (bytes_per_word == 1) {
468 /* don't burst more than 1/2 the fifo */
469 *burst_code = DCMD_BURST8;
470 burst_bytes = 8;
471 retval = 1;
472 } else if (bytes_per_word == 2) {
473 /* don't burst more than 1/2 the fifo */
474 *burst_code = DCMD_BURST16;
475 burst_bytes = 16;
476 retval = 1;
477 } else {
478 *burst_code = DCMD_BURST32;
479 burst_bytes = 32;
480 }
481 }
482
483 thresh_words = burst_bytes / bytes_per_word;
484
485 /* thresh_words will be between 2 and 8 */
486 *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
487 | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
488
489 return retval;
490}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 304cf6eb50e6..5b7c2a4ba828 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -24,7 +24,6 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/spi/pxa2xx_spi.h> 26#include <linux/spi/pxa2xx_spi.h>
27#include <linux/dma-mapping.h>
28#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
29#include <linux/workqueue.h> 28#include <linux/workqueue.h>
30#include <linux/delay.h> 29#include <linux/delay.h>
@@ -36,6 +35,7 @@
36#include <asm/irq.h> 35#include <asm/irq.h>
37#include <asm/delay.h> 36#include <asm/delay.h>
38 37
38#include "spi-pxa2xx.h"
39 39
40MODULE_AUTHOR("Stephen Street"); 40MODULE_AUTHOR("Stephen Street");
41MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); 41MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
@@ -46,12 +46,6 @@ MODULE_ALIAS("platform:pxa2xx-spi");
46 46
47#define TIMOUT_DFLT 1000 47#define TIMOUT_DFLT 1000
48 48
49#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
50#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
51#define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT)
52#define MAX_DMA_LEN 8191
53#define DMA_ALIGNMENT 8
54
55/* 49/*
56 * for testing SSCR1 changes that require SSP restart, basically 50 * for testing SSCR1 changes that require SSP restart, basically
57 * everything except the service and interrupt enables, the pxa270 developer 51 * everything except the service and interrupt enables, the pxa270 developer
@@ -66,106 +60,6 @@ MODULE_ALIAS("platform:pxa2xx-spi");
66 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ 60 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
67 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) 61 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
68 62
69#define DEFINE_SSP_REG(reg, off) \
70static inline u32 read_##reg(void const __iomem *p) \
71{ return __raw_readl(p + (off)); } \
72\
73static inline void write_##reg(u32 v, void __iomem *p) \
74{ __raw_writel(v, p + (off)); }
75
76DEFINE_SSP_REG(SSCR0, 0x00)
77DEFINE_SSP_REG(SSCR1, 0x04)
78DEFINE_SSP_REG(SSSR, 0x08)
79DEFINE_SSP_REG(SSITR, 0x0c)
80DEFINE_SSP_REG(SSDR, 0x10)
81DEFINE_SSP_REG(SSTO, 0x28)
82DEFINE_SSP_REG(SSPSP, 0x2c)
83
84#define START_STATE ((void*)0)
85#define RUNNING_STATE ((void*)1)
86#define DONE_STATE ((void*)2)
87#define ERROR_STATE ((void*)-1)
88
89struct driver_data {
90 /* Driver model hookup */
91 struct platform_device *pdev;
92
93 /* SSP Info */
94 struct ssp_device *ssp;
95
96 /* SPI framework hookup */
97 enum pxa_ssp_type ssp_type;
98 struct spi_master *master;
99
100 /* PXA hookup */
101 struct pxa2xx_spi_master *master_info;
102
103 /* DMA setup stuff */
104 int rx_channel;
105 int tx_channel;
106 u32 *null_dma_buf;
107
108 /* SSP register addresses */
109 void __iomem *ioaddr;
110 u32 ssdr_physical;
111
112 /* SSP masks*/
113 u32 dma_cr1;
114 u32 int_cr1;
115 u32 clear_sr;
116 u32 mask_sr;
117
118 /* Maximun clock rate */
119 unsigned long max_clk_rate;
120
121 /* Message Transfer pump */
122 struct tasklet_struct pump_transfers;
123
124 /* Current message transfer state info */
125 struct spi_message* cur_msg;
126 struct spi_transfer* cur_transfer;
127 struct chip_data *cur_chip;
128 size_t len;
129 void *tx;
130 void *tx_end;
131 void *rx;
132 void *rx_end;
133 int dma_mapped;
134 dma_addr_t rx_dma;
135 dma_addr_t tx_dma;
136 size_t rx_map_len;
137 size_t tx_map_len;
138 u8 n_bytes;
139 u32 dma_width;
140 int (*write)(struct driver_data *drv_data);
141 int (*read)(struct driver_data *drv_data);
142 irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
143 void (*cs_control)(u32 command);
144};
145
146struct chip_data {
147 u32 cr0;
148 u32 cr1;
149 u32 psp;
150 u32 timeout;
151 u8 n_bytes;
152 u32 dma_width;
153 u32 dma_burst_size;
154 u32 threshold;
155 u32 dma_threshold;
156 u8 enable_dma;
157 u8 bits_per_word;
158 u32 speed_hz;
159 union {
160 int gpio_cs;
161 unsigned int frm;
162 };
163 int gpio_cs_inverted;
164 int (*write)(struct driver_data *drv_data);
165 int (*read)(struct driver_data *drv_data);
166 void (*cs_control)(u32 command);
167};
168
169static void cs_assert(struct driver_data *drv_data) 63static void cs_assert(struct driver_data *drv_data)
170{ 64{
171 struct chip_data *chip = drv_data->cur_chip; 65 struct chip_data *chip = drv_data->cur_chip;
@@ -200,26 +94,7 @@ static void cs_deassert(struct driver_data *drv_data)
200 gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); 94 gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
201} 95}
202 96
203static void write_SSSR_CS(struct driver_data *drv_data, u32 val) 97int pxa2xx_spi_flush(struct driver_data *drv_data)
204{
205 void __iomem *reg = drv_data->ioaddr;
206
207 if (drv_data->ssp_type == CE4100_SSP)
208 val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
209
210 write_SSSR(val, reg);
211}
212
213static int pxa25x_ssp_comp(struct driver_data *drv_data)
214{
215 if (drv_data->ssp_type == PXA25x_SSP)
216 return 1;
217 if (drv_data->ssp_type == CE4100_SSP)
218 return 1;
219 return 0;
220}
221
222static int flush(struct driver_data *drv_data)
223{ 98{
224 unsigned long limit = loops_per_jiffy << 1; 99 unsigned long limit = loops_per_jiffy << 1;
225 100
@@ -345,7 +220,7 @@ static int u32_reader(struct driver_data *drv_data)
345 return drv_data->rx == drv_data->rx_end; 220 return drv_data->rx == drv_data->rx_end;
346} 221}
347 222
348static void *next_transfer(struct driver_data *drv_data) 223void *pxa2xx_spi_next_transfer(struct driver_data *drv_data)
349{ 224{
350 struct spi_message *msg = drv_data->cur_msg; 225 struct spi_message *msg = drv_data->cur_msg;
351 struct spi_transfer *trans = drv_data->cur_transfer; 226 struct spi_transfer *trans = drv_data->cur_transfer;
@@ -361,76 +236,6 @@ static void *next_transfer(struct driver_data *drv_data)
361 return DONE_STATE; 236 return DONE_STATE;
362} 237}
363 238
364static int map_dma_buffers(struct driver_data *drv_data)
365{
366 struct spi_message *msg = drv_data->cur_msg;
367 struct device *dev = &msg->spi->dev;
368
369 if (!drv_data->cur_chip->enable_dma)
370 return 0;
371
372 if (msg->is_dma_mapped)
373 return drv_data->rx_dma && drv_data->tx_dma;
374
375 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
376 return 0;
377
378 /* Modify setup if rx buffer is null */
379 if (drv_data->rx == NULL) {
380 *drv_data->null_dma_buf = 0;
381 drv_data->rx = drv_data->null_dma_buf;
382 drv_data->rx_map_len = 4;
383 } else
384 drv_data->rx_map_len = drv_data->len;
385
386
387 /* Modify setup if tx buffer is null */
388 if (drv_data->tx == NULL) {
389 *drv_data->null_dma_buf = 0;
390 drv_data->tx = drv_data->null_dma_buf;
391 drv_data->tx_map_len = 4;
392 } else
393 drv_data->tx_map_len = drv_data->len;
394
395 /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
396 * so we flush the cache *before* invalidating it, in case
397 * the tx and rx buffers overlap.
398 */
399 drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
400 drv_data->tx_map_len, DMA_TO_DEVICE);
401 if (dma_mapping_error(dev, drv_data->tx_dma))
402 return 0;
403
404 /* Stream map the rx buffer */
405 drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
406 drv_data->rx_map_len, DMA_FROM_DEVICE);
407 if (dma_mapping_error(dev, drv_data->rx_dma)) {
408 dma_unmap_single(dev, drv_data->tx_dma,
409 drv_data->tx_map_len, DMA_TO_DEVICE);
410 return 0;
411 }
412
413 return 1;
414}
415
416static void unmap_dma_buffers(struct driver_data *drv_data)
417{
418 struct device *dev;
419
420 if (!drv_data->dma_mapped)
421 return;
422
423 if (!drv_data->cur_msg->is_dma_mapped) {
424 dev = &drv_data->cur_msg->spi->dev;
425 dma_unmap_single(dev, drv_data->rx_dma,
426 drv_data->rx_map_len, DMA_FROM_DEVICE);
427 dma_unmap_single(dev, drv_data->tx_dma,
428 drv_data->tx_map_len, DMA_TO_DEVICE);
429 }
430
431 drv_data->dma_mapped = 0;
432}
433
434/* caller already set message->status; dma and pio irqs are blocked */ 239/* caller already set message->status; dma and pio irqs are blocked */
435static void giveback(struct driver_data *drv_data) 240static void giveback(struct driver_data *drv_data)
436{ 241{
@@ -483,161 +288,6 @@ static void giveback(struct driver_data *drv_data)
483 drv_data->cur_chip = NULL; 288 drv_data->cur_chip = NULL;
484} 289}
485 290
486static int wait_ssp_rx_stall(void const __iomem *ioaddr)
487{
488 unsigned long limit = loops_per_jiffy << 1;
489
490 while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
491 cpu_relax();
492
493 return limit;
494}
495
496static int wait_dma_channel_stop(int channel)
497{
498 unsigned long limit = loops_per_jiffy << 1;
499
500 while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
501 cpu_relax();
502
503 return limit;
504}
505
506static void dma_error_stop(struct driver_data *drv_data, const char *msg)
507{
508 void __iomem *reg = drv_data->ioaddr;
509
510 /* Stop and reset */
511 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
512 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
513 write_SSSR_CS(drv_data, drv_data->clear_sr);
514 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
515 if (!pxa25x_ssp_comp(drv_data))
516 write_SSTO(0, reg);
517 flush(drv_data);
518 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
519
520 unmap_dma_buffers(drv_data);
521
522 dev_err(&drv_data->pdev->dev, "%s\n", msg);
523
524 drv_data->cur_msg->state = ERROR_STATE;
525 tasklet_schedule(&drv_data->pump_transfers);
526}
527
528static void dma_transfer_complete(struct driver_data *drv_data)
529{
530 void __iomem *reg = drv_data->ioaddr;
531 struct spi_message *msg = drv_data->cur_msg;
532
533 /* Clear and disable interrupts on SSP and DMA channels*/
534 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
535 write_SSSR_CS(drv_data, drv_data->clear_sr);
536 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
537 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
538
539 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
540 dev_err(&drv_data->pdev->dev,
541 "dma_handler: dma rx channel stop failed\n");
542
543 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
544 dev_err(&drv_data->pdev->dev,
545 "dma_transfer: ssp rx stall failed\n");
546
547 unmap_dma_buffers(drv_data);
548
549 /* update the buffer pointer for the amount completed in dma */
550 drv_data->rx += drv_data->len -
551 (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
552
553 /* read trailing data from fifo, it does not matter how many
554 * bytes are in the fifo just read until buffer is full
555 * or fifo is empty, which ever occurs first */
556 drv_data->read(drv_data);
557
558 /* return count of what was actually read */
559 msg->actual_length += drv_data->len -
560 (drv_data->rx_end - drv_data->rx);
561
562 /* Transfer delays and chip select release are
563 * handled in pump_transfers or giveback
564 */
565
566 /* Move to next transfer */
567 msg->state = next_transfer(drv_data);
568
569 /* Schedule transfer tasklet */
570 tasklet_schedule(&drv_data->pump_transfers);
571}
572
573static void dma_handler(int channel, void *data)
574{
575 struct driver_data *drv_data = data;
576 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
577
578 if (irq_status & DCSR_BUSERR) {
579
580 if (channel == drv_data->tx_channel)
581 dma_error_stop(drv_data,
582 "dma_handler: "
583 "bad bus address on tx channel");
584 else
585 dma_error_stop(drv_data,
586 "dma_handler: "
587 "bad bus address on rx channel");
588 return;
589 }
590
591 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
592 if ((channel == drv_data->tx_channel)
593 && (irq_status & DCSR_ENDINTR)
594 && (drv_data->ssp_type == PXA25x_SSP)) {
595
596 /* Wait for rx to stall */
597 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
598 dev_err(&drv_data->pdev->dev,
599 "dma_handler: ssp rx stall failed\n");
600
601 /* finish this transfer, start the next */
602 dma_transfer_complete(drv_data);
603 }
604}
605
606static irqreturn_t dma_transfer(struct driver_data *drv_data)
607{
608 u32 irq_status;
609 void __iomem *reg = drv_data->ioaddr;
610
611 irq_status = read_SSSR(reg) & drv_data->mask_sr;
612 if (irq_status & SSSR_ROR) {
613 dma_error_stop(drv_data, "dma_transfer: fifo overrun");
614 return IRQ_HANDLED;
615 }
616
617 /* Check for false positive timeout */
618 if ((irq_status & SSSR_TINT)
619 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
620 write_SSSR(SSSR_TINT, reg);
621 return IRQ_HANDLED;
622 }
623
624 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
625
626 /* Clear and disable timeout interrupt, do the rest in
627 * dma_transfer_complete */
628 if (!pxa25x_ssp_comp(drv_data))
629 write_SSTO(0, reg);
630
631 /* finish this transfer, start the next */
632 dma_transfer_complete(drv_data);
633
634 return IRQ_HANDLED;
635 }
636
637 /* Opps problem detected */
638 return IRQ_NONE;
639}
640
641static void reset_sccr1(struct driver_data *drv_data) 291static void reset_sccr1(struct driver_data *drv_data)
642{ 292{
643 void __iomem *reg = drv_data->ioaddr; 293 void __iomem *reg = drv_data->ioaddr;
@@ -659,7 +309,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
659 reset_sccr1(drv_data); 309 reset_sccr1(drv_data);
660 if (!pxa25x_ssp_comp(drv_data)) 310 if (!pxa25x_ssp_comp(drv_data))
661 write_SSTO(0, reg); 311 write_SSTO(0, reg);
662 flush(drv_data); 312 pxa2xx_spi_flush(drv_data);
663 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 313 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
664 314
665 dev_err(&drv_data->pdev->dev, "%s\n", msg); 315 dev_err(&drv_data->pdev->dev, "%s\n", msg);
@@ -687,7 +337,7 @@ static void int_transfer_complete(struct driver_data *drv_data)
687 */ 337 */
688 338
689 /* Move to next transfer */ 339 /* Move to next transfer */
690 drv_data->cur_msg->state = next_transfer(drv_data); 340 drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data);
691 341
692 /* Schedule transfer tasklet */ 342 /* Schedule transfer tasklet */
693 tasklet_schedule(&drv_data->pump_transfers); 343 tasklet_schedule(&drv_data->pump_transfers);
@@ -798,103 +448,6 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
798 return drv_data->transfer_handler(drv_data); 448 return drv_data->transfer_handler(drv_data);
799} 449}
800 450
801static int set_dma_burst_and_threshold(struct chip_data *chip,
802 struct spi_device *spi,
803 u8 bits_per_word, u32 *burst_code,
804 u32 *threshold)
805{
806 struct pxa2xx_spi_chip *chip_info =
807 (struct pxa2xx_spi_chip *)spi->controller_data;
808 int bytes_per_word;
809 int burst_bytes;
810 int thresh_words;
811 int req_burst_size;
812 int retval = 0;
813
814 /* Set the threshold (in registers) to equal the same amount of data
815 * as represented by burst size (in bytes). The computation below
816 * is (burst_size rounded up to nearest 8 byte, word or long word)
817 * divided by (bytes/register); the tx threshold is the inverse of
818 * the rx, so that there will always be enough data in the rx fifo
819 * to satisfy a burst, and there will always be enough space in the
820 * tx fifo to accept a burst (a tx burst will overwrite the fifo if
821 * there is not enough space), there must always remain enough empty
822 * space in the rx fifo for any data loaded to the tx fifo.
823 * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
824 * will be 8, or half the fifo;
825 * The threshold can only be set to 2, 4 or 8, but not 16, because
826 * to burst 16 to the tx fifo, the fifo would have to be empty;
827 * however, the minimum fifo trigger level is 1, and the tx will
828 * request service when the fifo is at this level, with only 15 spaces.
829 */
830
831 /* find bytes/word */
832 if (bits_per_word <= 8)
833 bytes_per_word = 1;
834 else if (bits_per_word <= 16)
835 bytes_per_word = 2;
836 else
837 bytes_per_word = 4;
838
839 /* use struct pxa2xx_spi_chip->dma_burst_size if available */
840 if (chip_info)
841 req_burst_size = chip_info->dma_burst_size;
842 else {
843 switch (chip->dma_burst_size) {
844 default:
845 /* if the default burst size is not set,
846 * do it now */
847 chip->dma_burst_size = DCMD_BURST8;
848 case DCMD_BURST8:
849 req_burst_size = 8;
850 break;
851 case DCMD_BURST16:
852 req_burst_size = 16;
853 break;
854 case DCMD_BURST32:
855 req_burst_size = 32;
856 break;
857 }
858 }
859 if (req_burst_size <= 8) {
860 *burst_code = DCMD_BURST8;
861 burst_bytes = 8;
862 } else if (req_burst_size <= 16) {
863 if (bytes_per_word == 1) {
864 /* don't burst more than 1/2 the fifo */
865 *burst_code = DCMD_BURST8;
866 burst_bytes = 8;
867 retval = 1;
868 } else {
869 *burst_code = DCMD_BURST16;
870 burst_bytes = 16;
871 }
872 } else {
873 if (bytes_per_word == 1) {
874 /* don't burst more than 1/2 the fifo */
875 *burst_code = DCMD_BURST8;
876 burst_bytes = 8;
877 retval = 1;
878 } else if (bytes_per_word == 2) {
879 /* don't burst more than 1/2 the fifo */
880 *burst_code = DCMD_BURST16;
881 burst_bytes = 16;
882 retval = 1;
883 } else {
884 *burst_code = DCMD_BURST32;
885 burst_bytes = 32;
886 }
887 }
888
889 thresh_words = burst_bytes / bytes_per_word;
890
891 /* thresh_words will be between 2 and 8 */
892 *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
893 | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
894
895 return retval;
896}
897
898static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) 451static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
899{ 452{
900 unsigned long ssp_clk = drv_data->max_clk_rate; 453 unsigned long ssp_clk = drv_data->max_clk_rate;
@@ -956,8 +509,8 @@ static void pump_transfers(unsigned long data)
956 cs_deassert(drv_data); 509 cs_deassert(drv_data);
957 } 510 }
958 511
959 /* Check for transfers that need multiple DMA segments */ 512 /* Check if we can DMA this transfer */
960 if (transfer->len > MAX_DMA_LEN && chip->enable_dma) { 513 if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) {
961 514
962 /* reject already-mapped transfers; PIO won't always work */ 515 /* reject already-mapped transfers; PIO won't always work */
963 if (message->is_dma_mapped 516 if (message->is_dma_mapped
@@ -980,21 +533,20 @@ static void pump_transfers(unsigned long data)
980 } 533 }
981 534
982 /* Setup the transfer state based on the type of transfer */ 535 /* Setup the transfer state based on the type of transfer */
983 if (flush(drv_data) == 0) { 536 if (pxa2xx_spi_flush(drv_data) == 0) {
984 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); 537 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
985 message->status = -EIO; 538 message->status = -EIO;
986 giveback(drv_data); 539 giveback(drv_data);
987 return; 540 return;
988 } 541 }
989 drv_data->n_bytes = chip->n_bytes; 542 drv_data->n_bytes = chip->n_bytes;
990 drv_data->dma_width = chip->dma_width;
991 drv_data->tx = (void *)transfer->tx_buf; 543 drv_data->tx = (void *)transfer->tx_buf;
992 drv_data->tx_end = drv_data->tx + transfer->len; 544 drv_data->tx_end = drv_data->tx + transfer->len;
993 drv_data->rx = transfer->rx_buf; 545 drv_data->rx = transfer->rx_buf;
994 drv_data->rx_end = drv_data->rx + transfer->len; 546 drv_data->rx_end = drv_data->rx + transfer->len;
995 drv_data->rx_dma = transfer->rx_dma; 547 drv_data->rx_dma = transfer->rx_dma;
996 drv_data->tx_dma = transfer->tx_dma; 548 drv_data->tx_dma = transfer->tx_dma;
997 drv_data->len = transfer->len & DCMD_LENGTH; 549 drv_data->len = transfer->len;
998 drv_data->write = drv_data->tx ? chip->write : null_writer; 550 drv_data->write = drv_data->tx ? chip->write : null_writer;
999 drv_data->read = drv_data->rx ? chip->read : null_reader; 551 drv_data->read = drv_data->rx ? chip->read : null_reader;
1000 552
@@ -1015,21 +567,18 @@ static void pump_transfers(unsigned long data)
1015 567
1016 if (bits <= 8) { 568 if (bits <= 8) {
1017 drv_data->n_bytes = 1; 569 drv_data->n_bytes = 1;
1018 drv_data->dma_width = DCMD_WIDTH1;
1019 drv_data->read = drv_data->read != null_reader ? 570 drv_data->read = drv_data->read != null_reader ?
1020 u8_reader : null_reader; 571 u8_reader : null_reader;
1021 drv_data->write = drv_data->write != null_writer ? 572 drv_data->write = drv_data->write != null_writer ?
1022 u8_writer : null_writer; 573 u8_writer : null_writer;
1023 } else if (bits <= 16) { 574 } else if (bits <= 16) {
1024 drv_data->n_bytes = 2; 575 drv_data->n_bytes = 2;
1025 drv_data->dma_width = DCMD_WIDTH2;
1026 drv_data->read = drv_data->read != null_reader ? 576 drv_data->read = drv_data->read != null_reader ?
1027 u16_reader : null_reader; 577 u16_reader : null_reader;
1028 drv_data->write = drv_data->write != null_writer ? 578 drv_data->write = drv_data->write != null_writer ?
1029 u16_writer : null_writer; 579 u16_writer : null_writer;
1030 } else if (bits <= 32) { 580 } else if (bits <= 32) {
1031 drv_data->n_bytes = 4; 581 drv_data->n_bytes = 4;
1032 drv_data->dma_width = DCMD_WIDTH4;
1033 drv_data->read = drv_data->read != null_reader ? 582 drv_data->read = drv_data->read != null_reader ?
1034 u32_reader : null_reader; 583 u32_reader : null_reader;
1035 drv_data->write = drv_data->write != null_writer ? 584 drv_data->write = drv_data->write != null_writer ?
@@ -1038,7 +587,8 @@ static void pump_transfers(unsigned long data)
1038 /* if bits/word is changed in dma mode, then must check the 587 /* if bits/word is changed in dma mode, then must check the
1039 * thresholds and burst also */ 588 * thresholds and burst also */
1040 if (chip->enable_dma) { 589 if (chip->enable_dma) {
1041 if (set_dma_burst_and_threshold(chip, message->spi, 590 if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
591 message->spi,
1042 bits, &dma_burst, 592 bits, &dma_burst,
1043 &dma_thresh)) 593 &dma_thresh))
1044 if (printk_ratelimit()) 594 if (printk_ratelimit())
@@ -1057,70 +607,21 @@ static void pump_transfers(unsigned long data)
1057 607
1058 message->state = RUNNING_STATE; 608 message->state = RUNNING_STATE;
1059 609
1060 /* Try to map dma buffer and do a dma transfer if successful, but
1061 * only if the length is non-zero and less than MAX_DMA_LEN.
1062 *
1063 * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
1064 * of PIO instead. Care is needed above because the transfer may
1065 * have have been passed with buffers that are already dma mapped.
1066 * A zero-length transfer in PIO mode will not try to write/read
1067 * to/from the buffers
1068 *
1069 * REVISIT large transfers are exactly where we most want to be
1070 * using DMA. If this happens much, split those transfers into
1071 * multiple DMA segments rather than forcing PIO.
1072 */
1073 drv_data->dma_mapped = 0; 610 drv_data->dma_mapped = 0;
1074 if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN) 611 if (pxa2xx_spi_dma_is_possible(drv_data->len))
1075 drv_data->dma_mapped = map_dma_buffers(drv_data); 612 drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data);
1076 if (drv_data->dma_mapped) { 613 if (drv_data->dma_mapped) {
1077 614
1078 /* Ensure we have the correct interrupt handler */ 615 /* Ensure we have the correct interrupt handler */
1079 drv_data->transfer_handler = dma_transfer; 616 drv_data->transfer_handler = pxa2xx_spi_dma_transfer;
1080 617
1081 /* Setup rx DMA Channel */ 618 pxa2xx_spi_dma_prepare(drv_data, dma_burst);
1082 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
1083 DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
1084 DTADR(drv_data->rx_channel) = drv_data->rx_dma;
1085 if (drv_data->rx == drv_data->null_dma_buf)
1086 /* No target address increment */
1087 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
1088 | drv_data->dma_width
1089 | dma_burst
1090 | drv_data->len;
1091 else
1092 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
1093 | DCMD_FLOWSRC
1094 | drv_data->dma_width
1095 | dma_burst
1096 | drv_data->len;
1097
1098 /* Setup tx DMA Channel */
1099 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
1100 DSADR(drv_data->tx_channel) = drv_data->tx_dma;
1101 DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
1102 if (drv_data->tx == drv_data->null_dma_buf)
1103 /* No source address increment */
1104 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
1105 | drv_data->dma_width
1106 | dma_burst
1107 | drv_data->len;
1108 else
1109 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
1110 | DCMD_FLOWTRG
1111 | drv_data->dma_width
1112 | dma_burst
1113 | drv_data->len;
1114
1115 /* Enable dma end irqs on SSP to detect end of transfer */
1116 if (drv_data->ssp_type == PXA25x_SSP)
1117 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
1118 619
1119 /* Clear status and start DMA engine */ 620 /* Clear status and start DMA engine */
1120 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; 621 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
1121 write_SSSR(drv_data->clear_sr, reg); 622 write_SSSR(drv_data->clear_sr, reg);
1122 DCSR(drv_data->rx_channel) |= DCSR_RUN; 623
1123 DCSR(drv_data->tx_channel) |= DCSR_RUN; 624 pxa2xx_spi_dma_start(drv_data);
1124 } else { 625 } else {
1125 /* Ensure we have the correct interrupt handler */ 626 /* Ensure we have the correct interrupt handler */
1126 drv_data->transfer_handler = interrupt_transfer; 627 drv_data->transfer_handler = interrupt_transfer;
@@ -1262,8 +763,6 @@ static int setup(struct spi_device *spi)
1262 chip->gpio_cs = -1; 763 chip->gpio_cs = -1;
1263 chip->enable_dma = 0; 764 chip->enable_dma = 0;
1264 chip->timeout = TIMOUT_DFLT; 765 chip->timeout = TIMOUT_DFLT;
1265 chip->dma_burst_size = drv_data->master_info->enable_dma ?
1266 DCMD_BURST8 : 0;
1267 } 766 }
1268 767
1269 /* protocol drivers may change the chip settings, so... 768 /* protocol drivers may change the chip settings, so...
@@ -1293,7 +792,8 @@ static int setup(struct spi_device *spi)
1293 * burst and threshold can still respond to changes in bits_per_word */ 792 * burst and threshold can still respond to changes in bits_per_word */
1294 if (chip->enable_dma) { 793 if (chip->enable_dma) {
1295 /* set up legal burst and threshold for dma */ 794 /* set up legal burst and threshold for dma */
1296 if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word, 795 if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
796 spi->bits_per_word,
1297 &chip->dma_burst_size, 797 &chip->dma_burst_size,
1298 &chip->dma_threshold)) { 798 &chip->dma_threshold)) {
1299 dev_warn(&spi->dev, "in setup: DMA burst size reduced " 799 dev_warn(&spi->dev, "in setup: DMA burst size reduced "
@@ -1328,18 +828,15 @@ static int setup(struct spi_device *spi)
1328 828
1329 if (spi->bits_per_word <= 8) { 829 if (spi->bits_per_word <= 8) {
1330 chip->n_bytes = 1; 830 chip->n_bytes = 1;
1331 chip->dma_width = DCMD_WIDTH1;
1332 chip->read = u8_reader; 831 chip->read = u8_reader;
1333 chip->write = u8_writer; 832 chip->write = u8_writer;
1334 } else if (spi->bits_per_word <= 16) { 833 } else if (spi->bits_per_word <= 16) {
1335 chip->n_bytes = 2; 834 chip->n_bytes = 2;
1336 chip->dma_width = DCMD_WIDTH2;
1337 chip->read = u16_reader; 835 chip->read = u16_reader;
1338 chip->write = u16_writer; 836 chip->write = u16_writer;
1339 } else if (spi->bits_per_word <= 32) { 837 } else if (spi->bits_per_word <= 32) {
1340 chip->cr0 |= SSCR0_EDSS; 838 chip->cr0 |= SSCR0_EDSS;
1341 chip->n_bytes = 4; 839 chip->n_bytes = 4;
1342 chip->dma_width = DCMD_WIDTH4;
1343 chip->read = u32_reader; 840 chip->read = u32_reader;
1344 chip->write = u32_writer; 841 chip->write = u32_writer;
1345 } else { 842 } else {
@@ -1447,31 +944,11 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1447 drv_data->tx_channel = -1; 944 drv_data->tx_channel = -1;
1448 drv_data->rx_channel = -1; 945 drv_data->rx_channel = -1;
1449 if (platform_info->enable_dma) { 946 if (platform_info->enable_dma) {
1450 947 status = pxa2xx_spi_dma_setup(drv_data);
1451 /* Get two DMA channels (rx and tx) */ 948 if (status) {
1452 drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", 949 dev_warn(dev, "failed to setup DMA, using PIO\n");
1453 DMA_PRIO_HIGH, 950 platform_info->enable_dma = false;
1454 dma_handler,
1455 drv_data);
1456 if (drv_data->rx_channel < 0) {
1457 dev_err(dev, "problem (%d) requesting rx channel\n",
1458 drv_data->rx_channel);
1459 status = -ENODEV;
1460 goto out_error_irq_alloc;
1461 } 951 }
1462 drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
1463 DMA_PRIO_MEDIUM,
1464 dma_handler,
1465 drv_data);
1466 if (drv_data->tx_channel < 0) {
1467 dev_err(dev, "problem (%d) requesting tx channel\n",
1468 drv_data->tx_channel);
1469 status = -ENODEV;
1470 goto out_error_dma_alloc;
1471 }
1472
1473 DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
1474 DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
1475 } 952 }
1476 953
1477 /* Enable SOC clock */ 954 /* Enable SOC clock */
@@ -1507,14 +984,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1507 984
1508out_error_clock_enabled: 985out_error_clock_enabled:
1509 clk_disable_unprepare(ssp->clk); 986 clk_disable_unprepare(ssp->clk);
1510 987 pxa2xx_spi_dma_release(drv_data);
1511out_error_dma_alloc:
1512 if (drv_data->tx_channel != -1)
1513 pxa_free_dma(drv_data->tx_channel);
1514 if (drv_data->rx_channel != -1)
1515 pxa_free_dma(drv_data->rx_channel);
1516
1517out_error_irq_alloc:
1518 free_irq(ssp->irq, drv_data); 988 free_irq(ssp->irq, drv_data);
1519 989
1520out_error_master_alloc: 990out_error_master_alloc:
@@ -1537,12 +1007,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
1537 clk_disable_unprepare(ssp->clk); 1007 clk_disable_unprepare(ssp->clk);
1538 1008
1539 /* Release DMA */ 1009 /* Release DMA */
1540 if (drv_data->master_info->enable_dma) { 1010 if (drv_data->master_info->enable_dma)
1541 DRCMR(ssp->drcmr_rx) = 0; 1011 pxa2xx_spi_dma_release(drv_data);
1542 DRCMR(ssp->drcmr_tx) = 0;
1543 pxa_free_dma(drv_data->tx_channel);
1544 pxa_free_dma(drv_data->rx_channel);
1545 }
1546 1012
1547 /* Release IRQ */ 1013 /* Release IRQ */
1548 free_irq(ssp->irq, drv_data); 1014 free_irq(ssp->irq, drv_data);
@@ -1589,12 +1055,7 @@ static int pxa2xx_spi_resume(struct device *dev)
1589 struct ssp_device *ssp = drv_data->ssp; 1055 struct ssp_device *ssp = drv_data->ssp;
1590 int status = 0; 1056 int status = 0;
1591 1057
1592 if (drv_data->rx_channel != -1) 1058 pxa2xx_spi_dma_resume(drv_data);
1593 DRCMR(drv_data->ssp->drcmr_rx) =
1594 DRCMR_MAPVLD | drv_data->rx_channel;
1595 if (drv_data->tx_channel != -1)
1596 DRCMR(drv_data->ssp->drcmr_tx) =
1597 DRCMR_MAPVLD | drv_data->tx_channel;
1598 1059
1599 /* Enable the SSP clock */ 1060 /* Enable the SSP clock */
1600 clk_prepare_enable(ssp->clk); 1061 clk_prepare_enable(ssp->clk);
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
new file mode 100644
index 000000000000..0a98905c916e
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx.h
@@ -0,0 +1,185 @@
1/*
2 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3 * Copyright (C) 2013, Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef SPI_PXA2XX_H
11#define SPI_PXA2XX_H
12
13#include <linux/errno.h>
14#include <linux/io.h>
15#include <linux/interrupt.h>
16#include <linux/platform_device.h>
17#include <linux/pxa2xx_ssp.h>
18#include <linux/spi/spi.h>
19#include <linux/spi/pxa2xx_spi.h>
20
21struct driver_data {
22 /* Driver model hookup */
23 struct platform_device *pdev;
24
25 /* SSP Info */
26 struct ssp_device *ssp;
27
28 /* SPI framework hookup */
29 enum pxa_ssp_type ssp_type;
30 struct spi_master *master;
31
32 /* PXA hookup */
33 struct pxa2xx_spi_master *master_info;
34
35 /* PXA private DMA setup stuff */
36 int rx_channel;
37 int tx_channel;
38 u32 *null_dma_buf;
39
40 /* SSP register addresses */
41 void __iomem *ioaddr;
42 u32 ssdr_physical;
43
44 /* SSP masks*/
45 u32 dma_cr1;
46 u32 int_cr1;
47 u32 clear_sr;
48 u32 mask_sr;
49
50 /* Maximun clock rate */
51 unsigned long max_clk_rate;
52
53 /* Message Transfer pump */
54 struct tasklet_struct pump_transfers;
55
56 /* Current message transfer state info */
57 struct spi_message *cur_msg;
58 struct spi_transfer *cur_transfer;
59 struct chip_data *cur_chip;
60 size_t len;
61 void *tx;
62 void *tx_end;
63 void *rx;
64 void *rx_end;
65 int dma_mapped;
66 dma_addr_t rx_dma;
67 dma_addr_t tx_dma;
68 size_t rx_map_len;
69 size_t tx_map_len;
70 u8 n_bytes;
71 int (*write)(struct driver_data *drv_data);
72 int (*read)(struct driver_data *drv_data);
73 irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
74 void (*cs_control)(u32 command);
75};
76
77struct chip_data {
78 u32 cr0;
79 u32 cr1;
80 u32 psp;
81 u32 timeout;
82 u8 n_bytes;
83 u32 dma_burst_size;
84 u32 threshold;
85 u32 dma_threshold;
86 u8 enable_dma;
87 u8 bits_per_word;
88 u32 speed_hz;
89 union {
90 int gpio_cs;
91 unsigned int frm;
92 };
93 int gpio_cs_inverted;
94 int (*write)(struct driver_data *drv_data);
95 int (*read)(struct driver_data *drv_data);
96 void (*cs_control)(u32 command);
97};
98
99#define DEFINE_SSP_REG(reg, off) \
100static inline u32 read_##reg(void const __iomem *p) \
101{ return __raw_readl(p + (off)); } \
102\
103static inline void write_##reg(u32 v, void __iomem *p) \
104{ __raw_writel(v, p + (off)); }
105
106DEFINE_SSP_REG(SSCR0, 0x00)
107DEFINE_SSP_REG(SSCR1, 0x04)
108DEFINE_SSP_REG(SSSR, 0x08)
109DEFINE_SSP_REG(SSITR, 0x0c)
110DEFINE_SSP_REG(SSDR, 0x10)
111DEFINE_SSP_REG(SSTO, 0x28)
112DEFINE_SSP_REG(SSPSP, 0x2c)
113
114#define START_STATE ((void *)0)
115#define RUNNING_STATE ((void *)1)
116#define DONE_STATE ((void *)2)
117#define ERROR_STATE ((void *)-1)
118
119#define MAX_DMA_LEN 8191
120#define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT)
121#define DMA_ALIGNMENT 8
122
123static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
124{
125 if (drv_data->ssp_type == PXA25x_SSP)
126 return 1;
127 if (drv_data->ssp_type == CE4100_SSP)
128 return 1;
129 return 0;
130}
131
132static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
133{
134 void __iomem *reg = drv_data->ioaddr;
135
136 if (drv_data->ssp_type == CE4100_SSP)
137 val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
138
139 write_SSSR(val, reg);
140}
141
142extern int pxa2xx_spi_flush(struct driver_data *drv_data);
143extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data);
144
145#if defined(CONFIG_SPI_PXA2XX_PXADMA)
146extern bool pxa2xx_spi_dma_is_possible(size_t len);
147extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data);
148extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data);
149extern int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst);
150extern void pxa2xx_spi_dma_start(struct driver_data *drv_data);
151extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data);
152extern void pxa2xx_spi_dma_release(struct driver_data *drv_data);
153extern void pxa2xx_spi_dma_resume(struct driver_data *drv_data);
154extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
155 struct spi_device *spi,
156 u8 bits_per_word,
157 u32 *burst_code,
158 u32 *threshold);
159#else
160static inline bool pxa2xx_spi_dma_is_possible(size_t len) { return false; }
161static inline int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
162{
163 return 0;
164}
165#define pxa2xx_spi_dma_transfer NULL
166static inline void pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
167 u32 dma_burst) {}
168static inline void pxa2xx_spi_dma_start(struct driver_data *drv_data) {}
169static inline int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
170{
171 return 0;
172}
173static inline void pxa2xx_spi_dma_release(struct driver_data *drv_data) {}
174static inline void pxa2xx_spi_dma_resume(struct driver_data *drv_data) {}
175static inline int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
176 struct spi_device *spi,
177 u8 bits_per_word,
178 u32 *burst_code,
179 u32 *threshold)
180{
181 return -ENODEV;
182}
183#endif
184
185#endif /* SPI_PXA2XX_H */
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index 053b5ba51b25..d6d2b4d557f8 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -53,85 +53,5 @@ struct pxa2xx_spi_chip {
53 53
54extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); 54extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
55 55
56#else
57/*
58 * This is the implemtation for CE4100 on x86. ARM defines them in mach/ or
59 * plat/ include path.
60 * The CE4100 does not provide DMA support. This bits are here to let the driver
61 * compile and will never be used. Maybe we get DMA support at a later point in
62 * time.
63 */
64
65#define DCSR(n) (n)
66#define DSADR(n) (n)
67#define DTADR(n) (n)
68#define DCMD(n) (n)
69#define DRCMR(n) (n)
70
71#define DCSR_RUN (1 << 31) /* Run Bit */
72#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch */
73#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable */
74#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
75#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
76#define DCSR_ENDINTR (1 << 2) /* End Interrupt */
77#define DCSR_STARTINTR (1 << 1) /* Start Interrupt */
78#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt */
79
80#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable */
81#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
82#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
83#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
84#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
85#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
86#define DCSR_EORINTR (1 << 9) /* The end of Receive */
87
88#define DRCMR_MAPVLD (1 << 7) /* Map Valid */
89#define DRCMR_CHLNUM 0x1f /* mask for Channel Number */
90
91#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor */
92#define DDADR_STOP (1 << 0) /* Stop */
93
94#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
95#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
96#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
97#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
98#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
99#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
100#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
101#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
102#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
103#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
104#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
105#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
106#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
107#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
108
109/*
110 * Descriptor structure for PXA's DMA engine
111 * Note: this structure must always be aligned to a 16-byte boundary.
112 */
113
114typedef enum {
115 DMA_PRIO_HIGH = 0,
116 DMA_PRIO_MEDIUM = 1,
117 DMA_PRIO_LOW = 2
118} pxa_dma_prio;
119
120/*
121 * DMA registration
122 */
123
124static inline int pxa_request_dma(char *name,
125 pxa_dma_prio prio,
126 void (*irq_handler)(int, void *),
127 void *data)
128{
129 return -ENODEV;
130}
131
132static inline void pxa_free_dma(int dma_ch)
133{
134}
135
136#endif 56#endif
137#endif 57#endif