diff options
39 files changed, 1539 insertions, 2729 deletions
diff --git a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt index aad527b357a0..523341a0e113 100644 --- a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt +++ b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt | |||
@@ -2,11 +2,21 @@ | |||
2 | (CSPI/eCSPI) for i.MX | 2 | (CSPI/eCSPI) for i.MX |
3 | 3 | ||
4 | Required properties: | 4 | Required properties: |
5 | - compatible : Should be "fsl,<soc>-cspi" or "fsl,<soc>-ecspi" | 5 | - compatible : |
6 | - "fsl,imx1-cspi" for SPI compatible with the one integrated on i.MX1 | ||
7 | - "fsl,imx21-cspi" for SPI compatible with the one integrated on i.MX21 | ||
8 | - "fsl,imx27-cspi" for SPI compatible with the one integrated on i.MX27 | ||
9 | - "fsl,imx31-cspi" for SPI compatible with the one integrated on i.MX31 | ||
10 | - "fsl,imx35-cspi" for SPI compatible with the one integrated on i.MX35 | ||
11 | - "fsl,imx51-ecspi" for SPI compatible with the one integrated on i.MX51 | ||
6 | - reg : Offset and length of the register set for the device | 12 | - reg : Offset and length of the register set for the device |
7 | - interrupts : Should contain CSPI/eCSPI interrupt | 13 | - interrupts : Should contain CSPI/eCSPI interrupt |
8 | - fsl,spi-num-chipselects : Contains the number of the chipselect | 14 | - fsl,spi-num-chipselects : Contains the number of the chipselect |
9 | - cs-gpios : Specifies the gpio pins to be used for chipselects. | 15 | - cs-gpios : Specifies the gpio pins to be used for chipselects. |
16 | - clocks : Clock specifiers for both ipg and per clocks. | ||
17 | - clock-names : Clock names should include both "ipg" and "per" | ||
18 | See the clock consumer binding, | ||
19 | Documentation/devicetree/bindings/clock/clock-bindings.txt | ||
10 | - dmas: DMA specifiers for tx and rx dma. See the DMA client binding, | 20 | - dmas: DMA specifiers for tx and rx dma. See the DMA client binding, |
11 | Documentation/devicetree/bindings/dma/dma.txt | 21 | Documentation/devicetree/bindings/dma/dma.txt |
12 | - dma-names: DMA request names should include "tx" and "rx" if present. | 22 | - dma-names: DMA request names should include "tx" and "rx" if present. |
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt b/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt index e2c88df2cc15..5c090771c016 100644 --- a/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt +++ b/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt | |||
@@ -33,6 +33,11 @@ Optional properties: | |||
33 | nodes. If unspecified, a single SPI device without a chip | 33 | nodes. If unspecified, a single SPI device without a chip |
34 | select can be used. | 34 | select can be used. |
35 | 35 | ||
36 | - dmas: Two DMA channel specifiers following the convention outlined | ||
37 | in bindings/dma/dma.txt | ||
38 | - dma-names: Names for the dma channels, if present. There must be at | ||
39 | least one channel named "tx" for transmit and named "rx" for | ||
40 | receive. | ||
36 | 41 | ||
37 | SPI slave nodes must be children of the SPI master node and can contain | 42 | SPI slave nodes must be children of the SPI master node and can contain |
38 | properties described in Documentation/devicetree/bindings/spi/spi-bus.txt | 43 | properties described in Documentation/devicetree/bindings/spi/spi-bus.txt |
@@ -51,6 +56,9 @@ Example: | |||
51 | clocks = <&gcc GCC_BLSP2_QUP2_SPI_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>; | 56 | clocks = <&gcc GCC_BLSP2_QUP2_SPI_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>; |
52 | clock-names = "core", "iface"; | 57 | clock-names = "core", "iface"; |
53 | 58 | ||
59 | dmas = <&blsp1_bam 13>, <&blsp1_bam 12>; | ||
60 | dma-names = "rx", "tx"; | ||
61 | |||
54 | pinctrl-names = "default"; | 62 | pinctrl-names = "default"; |
55 | pinctrl-0 = <&spi8_default>; | 63 | pinctrl-0 = <&spi8_default>; |
56 | 64 | ||
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt index cbbe16ed3874..70af78a9185e 100644 --- a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt +++ b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt | |||
@@ -16,6 +16,12 @@ Optional property: | |||
16 | in big endian mode, otherwise in native mode(same with CPU), for more | 16 | in big endian mode, otherwise in native mode(same with CPU), for more |
17 | detail please see: Documentation/devicetree/bindings/regmap/regmap.txt. | 17 | detail please see: Documentation/devicetree/bindings/regmap/regmap.txt. |
18 | 18 | ||
19 | Optional SPI slave node properties: | ||
20 | - fsl,spi-cs-sck-delay: a delay in nanoseconds between activating chip | ||
21 | select and the start of clock signal, at the start of a transfer. | ||
22 | - fsl,spi-sck-cs-delay: a delay in nanoseconds between stopping the clock | ||
23 | signal and deactivating chip select, at the end of a transfer. | ||
24 | |||
19 | Example: | 25 | Example: |
20 | 26 | ||
21 | dspi0@4002c000 { | 27 | dspi0@4002c000 { |
@@ -43,6 +49,8 @@ dspi0@4002c000 { | |||
43 | reg = <0>; | 49 | reg = <0>; |
44 | linux,modalias = "m25p80"; | 50 | linux,modalias = "m25p80"; |
45 | modal = "at26df081a"; | 51 | modal = "at26df081a"; |
52 | fsl,spi-cs-sck-delay = <100>; | ||
53 | fsl,spi-sck-cs-delay = <50>; | ||
46 | }; | 54 | }; |
47 | }; | 55 | }; |
48 | 56 | ||
diff --git a/Documentation/devicetree/bindings/spi/spi-img-spfi.txt b/Documentation/devicetree/bindings/spi/spi-img-spfi.txt index c7dd50fb8eb2..e02fbf18c82c 100644 --- a/Documentation/devicetree/bindings/spi/spi-img-spfi.txt +++ b/Documentation/devicetree/bindings/spi/spi-img-spfi.txt | |||
@@ -14,6 +14,7 @@ Required properties: | |||
14 | - dma-names: Must include the following entries: | 14 | - dma-names: Must include the following entries: |
15 | - rx | 15 | - rx |
16 | - tx | 16 | - tx |
17 | - cs-gpios: Must specify the GPIOs used for chipselect lines. | ||
17 | - #address-cells: Must be 1. | 18 | - #address-cells: Must be 1. |
18 | - #size-cells: Must be 0. | 19 | - #size-cells: Must be 0. |
19 | 20 | ||
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.txt b/Documentation/devicetree/bindings/spi/spi-rockchip.txt index 467dec441c62..0c491bda4c65 100644 --- a/Documentation/devicetree/bindings/spi/spi-rockchip.txt +++ b/Documentation/devicetree/bindings/spi/spi-rockchip.txt | |||
@@ -24,6 +24,9 @@ Optional Properties: | |||
24 | - dmas: DMA specifiers for tx and rx dma. See the DMA client binding, | 24 | - dmas: DMA specifiers for tx and rx dma. See the DMA client binding, |
25 | Documentation/devicetree/bindings/dma/dma.txt | 25 | Documentation/devicetree/bindings/dma/dma.txt |
26 | - dma-names: DMA request names should include "tx" and "rx" if present. | 26 | - dma-names: DMA request names should include "tx" and "rx" if present. |
27 | - rx-sample-delay-ns: nanoseconds to delay after the SCLK edge before sampling | ||
28 | Rx data (may need to be fine tuned for high capacitance lines). | ||
29 | No delay (0) by default. | ||
27 | 30 | ||
28 | 31 | ||
29 | Example: | 32 | Example: |
@@ -33,6 +36,7 @@ Example: | |||
33 | reg = <0xff110000 0x1000>; | 36 | reg = <0xff110000 0x1000>; |
34 | dmas = <&pdma1 11>, <&pdma1 12>; | 37 | dmas = <&pdma1 11>, <&pdma1 12>; |
35 | dma-names = "tx", "rx"; | 38 | dma-names = "tx", "rx"; |
39 | rx-sample-delay-ns = <10>; | ||
36 | #address-cells = <1>; | 40 | #address-cells = <1>; |
37 | #size-cells = <0>; | 41 | #size-cells = <0>; |
38 | interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>; | 42 | interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>; |
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary index d29734bff28c..d1824b399b2d 100644 --- a/Documentation/spi/spi-summary +++ b/Documentation/spi/spi-summary | |||
@@ -342,12 +342,11 @@ SPI protocol drivers somewhat resemble platform device drivers: | |||
342 | .driver = { | 342 | .driver = { |
343 | .name = "CHIP", | 343 | .name = "CHIP", |
344 | .owner = THIS_MODULE, | 344 | .owner = THIS_MODULE, |
345 | .pm = &CHIP_pm_ops, | ||
345 | }, | 346 | }, |
346 | 347 | ||
347 | .probe = CHIP_probe, | 348 | .probe = CHIP_probe, |
348 | .remove = CHIP_remove, | 349 | .remove = CHIP_remove, |
349 | .suspend = CHIP_suspend, | ||
350 | .resume = CHIP_resume, | ||
351 | }; | 350 | }; |
352 | 351 | ||
353 | The driver core will automatically attempt to bind this driver to any SPI | 352 | The driver core will automatically attempt to bind this driver to any SPI |
diff --git a/Documentation/spi/spidev_test.c b/Documentation/spi/spidev_test.c index 3a2f9d59edab..94f574b0fdb2 100644 --- a/Documentation/spi/spidev_test.c +++ b/Documentation/spi/spidev_test.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <unistd.h> | 15 | #include <unistd.h> |
16 | #include <stdio.h> | 16 | #include <stdio.h> |
17 | #include <stdlib.h> | 17 | #include <stdlib.h> |
18 | #include <string.h> | ||
18 | #include <getopt.h> | 19 | #include <getopt.h> |
19 | #include <fcntl.h> | 20 | #include <fcntl.h> |
20 | #include <sys/ioctl.h> | 21 | #include <sys/ioctl.h> |
@@ -34,24 +35,79 @@ static uint32_t mode; | |||
34 | static uint8_t bits = 8; | 35 | static uint8_t bits = 8; |
35 | static uint32_t speed = 500000; | 36 | static uint32_t speed = 500000; |
36 | static uint16_t delay; | 37 | static uint16_t delay; |
38 | static int verbose; | ||
37 | 39 | ||
38 | static void transfer(int fd) | 40 | uint8_t default_tx[] = { |
41 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
42 | 0x40, 0x00, 0x00, 0x00, 0x00, 0x95, | ||
43 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
44 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
45 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
46 | 0xF0, 0x0D, | ||
47 | }; | ||
48 | |||
49 | uint8_t default_rx[ARRAY_SIZE(default_tx)] = {0, }; | ||
50 | char *input_tx; | ||
51 | |||
52 | static void hex_dump(const void *src, size_t length, size_t line_size, char *prefix) | ||
53 | { | ||
54 | int i = 0; | ||
55 | const unsigned char *address = src; | ||
56 | const unsigned char *line = address; | ||
57 | unsigned char c; | ||
58 | |||
59 | printf("%s | ", prefix); | ||
60 | while (length-- > 0) { | ||
61 | printf("%02X ", *address++); | ||
62 | if (!(++i % line_size) || (length == 0 && i % line_size)) { | ||
63 | if (length == 0) { | ||
64 | while (i++ % line_size) | ||
65 | printf("__ "); | ||
66 | } | ||
67 | printf(" | "); /* right close */ | ||
68 | while (line < address) { | ||
69 | c = *line++; | ||
70 | printf("%c", (c < 33 || c == 255) ? 0x2E : c); | ||
71 | } | ||
72 | printf("\n"); | ||
73 | if (length > 0) | ||
74 | printf("%s | ", prefix); | ||
75 | } | ||
76 | } | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * Unescape - process hexadecimal escape character | ||
81 | * converts shell input "\x23" -> 0x23 | ||
82 | */ | ||
83 | int unespcape(char *_dst, char *_src, size_t len) | ||
84 | { | ||
85 | int ret = 0; | ||
86 | char *src = _src; | ||
87 | char *dst = _dst; | ||
88 | unsigned int ch; | ||
89 | |||
90 | while (*src) { | ||
91 | if (*src == '\\' && *(src+1) == 'x') { | ||
92 | sscanf(src + 2, "%2x", &ch); | ||
93 | src += 4; | ||
94 | *dst++ = (unsigned char)ch; | ||
95 | } else { | ||
96 | *dst++ = *src++; | ||
97 | } | ||
98 | ret++; | ||
99 | } | ||
100 | return ret; | ||
101 | } | ||
102 | |||
103 | static void transfer(int fd, uint8_t const *tx, uint8_t const *rx, size_t len) | ||
39 | { | 104 | { |
40 | int ret; | 105 | int ret; |
41 | uint8_t tx[] = { | 106 | |
42 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
43 | 0x40, 0x00, 0x00, 0x00, 0x00, 0x95, | ||
44 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
45 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
46 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
47 | 0xDE, 0xAD, 0xBE, 0xEF, 0xBA, 0xAD, | ||
48 | 0xF0, 0x0D, | ||
49 | }; | ||
50 | uint8_t rx[ARRAY_SIZE(tx)] = {0, }; | ||
51 | struct spi_ioc_transfer tr = { | 107 | struct spi_ioc_transfer tr = { |
52 | .tx_buf = (unsigned long)tx, | 108 | .tx_buf = (unsigned long)tx, |
53 | .rx_buf = (unsigned long)rx, | 109 | .rx_buf = (unsigned long)rx, |
54 | .len = ARRAY_SIZE(tx), | 110 | .len = len, |
55 | .delay_usecs = delay, | 111 | .delay_usecs = delay, |
56 | .speed_hz = speed, | 112 | .speed_hz = speed, |
57 | .bits_per_word = bits, | 113 | .bits_per_word = bits, |
@@ -76,12 +132,9 @@ static void transfer(int fd) | |||
76 | if (ret < 1) | 132 | if (ret < 1) |
77 | pabort("can't send spi message"); | 133 | pabort("can't send spi message"); |
78 | 134 | ||
79 | for (ret = 0; ret < ARRAY_SIZE(tx); ret++) { | 135 | if (verbose) |
80 | if (!(ret % 6)) | 136 | hex_dump(tx, len, 32, "TX"); |
81 | puts(""); | 137 | hex_dump(rx, len, 32, "RX"); |
82 | printf("%.2X ", rx[ret]); | ||
83 | } | ||
84 | puts(""); | ||
85 | } | 138 | } |
86 | 139 | ||
87 | static void print_usage(const char *prog) | 140 | static void print_usage(const char *prog) |
@@ -97,6 +150,8 @@ static void print_usage(const char *prog) | |||
97 | " -L --lsb least significant bit first\n" | 150 | " -L --lsb least significant bit first\n" |
98 | " -C --cs-high chip select active high\n" | 151 | " -C --cs-high chip select active high\n" |
99 | " -3 --3wire SI/SO signals shared\n" | 152 | " -3 --3wire SI/SO signals shared\n" |
153 | " -v --verbose Verbose (show tx buffer)\n" | ||
154 | " -p Send data (e.g. \"1234\\xde\\xad\")\n" | ||
100 | " -N --no-cs no chip select\n" | 155 | " -N --no-cs no chip select\n" |
101 | " -R --ready slave pulls low to pause\n" | 156 | " -R --ready slave pulls low to pause\n" |
102 | " -2 --dual dual transfer\n" | 157 | " -2 --dual dual transfer\n" |
@@ -121,12 +176,13 @@ static void parse_opts(int argc, char *argv[]) | |||
121 | { "no-cs", 0, 0, 'N' }, | 176 | { "no-cs", 0, 0, 'N' }, |
122 | { "ready", 0, 0, 'R' }, | 177 | { "ready", 0, 0, 'R' }, |
123 | { "dual", 0, 0, '2' }, | 178 | { "dual", 0, 0, '2' }, |
179 | { "verbose", 0, 0, 'v' }, | ||
124 | { "quad", 0, 0, '4' }, | 180 | { "quad", 0, 0, '4' }, |
125 | { NULL, 0, 0, 0 }, | 181 | { NULL, 0, 0, 0 }, |
126 | }; | 182 | }; |
127 | int c; | 183 | int c; |
128 | 184 | ||
129 | c = getopt_long(argc, argv, "D:s:d:b:lHOLC3NR24", lopts, NULL); | 185 | c = getopt_long(argc, argv, "D:s:d:b:lHOLC3NR24p:v", lopts, NULL); |
130 | 186 | ||
131 | if (c == -1) | 187 | if (c == -1) |
132 | break; | 188 | break; |
@@ -165,9 +221,15 @@ static void parse_opts(int argc, char *argv[]) | |||
165 | case 'N': | 221 | case 'N': |
166 | mode |= SPI_NO_CS; | 222 | mode |= SPI_NO_CS; |
167 | break; | 223 | break; |
224 | case 'v': | ||
225 | verbose = 1; | ||
226 | break; | ||
168 | case 'R': | 227 | case 'R': |
169 | mode |= SPI_READY; | 228 | mode |= SPI_READY; |
170 | break; | 229 | break; |
230 | case 'p': | ||
231 | input_tx = optarg; | ||
232 | break; | ||
171 | case '2': | 233 | case '2': |
172 | mode |= SPI_TX_DUAL; | 234 | mode |= SPI_TX_DUAL; |
173 | break; | 235 | break; |
@@ -191,6 +253,9 @@ int main(int argc, char *argv[]) | |||
191 | { | 253 | { |
192 | int ret = 0; | 254 | int ret = 0; |
193 | int fd; | 255 | int fd; |
256 | uint8_t *tx; | ||
257 | uint8_t *rx; | ||
258 | int size; | ||
194 | 259 | ||
195 | parse_opts(argc, argv); | 260 | parse_opts(argc, argv); |
196 | 261 | ||
@@ -235,7 +300,17 @@ int main(int argc, char *argv[]) | |||
235 | printf("bits per word: %d\n", bits); | 300 | printf("bits per word: %d\n", bits); |
236 | printf("max speed: %d Hz (%d KHz)\n", speed, speed/1000); | 301 | printf("max speed: %d Hz (%d KHz)\n", speed, speed/1000); |
237 | 302 | ||
238 | transfer(fd); | 303 | if (input_tx) { |
304 | size = strlen(input_tx+1); | ||
305 | tx = malloc(size); | ||
306 | rx = malloc(size); | ||
307 | size = unespcape((char *)tx, input_tx, size); | ||
308 | transfer(fd, tx, rx, size); | ||
309 | free(rx); | ||
310 | free(tx); | ||
311 | } else { | ||
312 | transfer(fd, default_tx, default_rx, sizeof(default_tx)); | ||
313 | } | ||
239 | 314 | ||
240 | close(fd); | 315 | close(fd); |
241 | 316 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index a874b6ec6650..942ca541dcbd 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -51,19 +51,6 @@ config INTEL_MIC_X100_DMA | |||
51 | OS and tools for MIC to use with this driver are available from | 51 | OS and tools for MIC to use with this driver are available from |
52 | <http://software.intel.com/en-us/mic-developer>. | 52 | <http://software.intel.com/en-us/mic-developer>. |
53 | 53 | ||
54 | config INTEL_MID_DMAC | ||
55 | tristate "Intel MID DMA support for Peripheral DMA controllers" | ||
56 | depends on PCI && X86 | ||
57 | select DMA_ENGINE | ||
58 | default n | ||
59 | help | ||
60 | Enable support for the Intel(R) MID DMA engine present | ||
61 | in Intel MID chipsets. | ||
62 | |||
63 | Say Y here if you have such a chipset. | ||
64 | |||
65 | If unsure, say N. | ||
66 | |||
67 | config ASYNC_TX_ENABLE_CHANNEL_SWITCH | 54 | config ASYNC_TX_ENABLE_CHANNEL_SWITCH |
68 | bool | 55 | bool |
69 | 56 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index f915f61ec574..539d4825bd76 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -6,7 +6,6 @@ obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o | |||
6 | obj-$(CONFIG_DMA_ACPI) += acpi-dma.o | 6 | obj-$(CONFIG_DMA_ACPI) += acpi-dma.o |
7 | obj-$(CONFIG_DMA_OF) += of-dma.o | 7 | obj-$(CONFIG_DMA_OF) += of-dma.o |
8 | 8 | ||
9 | obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o | ||
10 | obj-$(CONFIG_DMATEST) += dmatest.o | 9 | obj-$(CONFIG_DMATEST) += dmatest.o |
11 | obj-$(CONFIG_INTEL_IOATDMA) += ioat/ | 10 | obj-$(CONFIG_INTEL_IOATDMA) += ioat/ |
12 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o | 11 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o |
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c deleted file mode 100644 index 5aaead9b56f7..000000000000 --- a/drivers/dma/intel_mid_dma.c +++ /dev/null | |||
@@ -1,1447 +0,0 @@ | |||
1 | /* | ||
2 | * intel_mid_dma.c - Intel Langwell DMA Drivers | ||
3 | * | ||
4 | * Copyright (C) 2008-10 Intel Corp | ||
5 | * Author: Vinod Koul <vinod.koul@intel.com> | ||
6 | * The driver design is based on dw_dmac driver | ||
7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; version 2 of the License. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
21 | * | ||
22 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
23 | * | ||
24 | * | ||
25 | */ | ||
26 | #include <linux/pci.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/pm_runtime.h> | ||
29 | #include <linux/intel_mid_dma.h> | ||
30 | #include <linux/module.h> | ||
31 | |||
32 | #include "dmaengine.h" | ||
33 | |||
34 | #define MAX_CHAN 4 /*max ch across controllers*/ | ||
35 | #include "intel_mid_dma_regs.h" | ||
36 | |||
37 | #define INTEL_MID_DMAC1_ID 0x0814 | ||
38 | #define INTEL_MID_DMAC2_ID 0x0813 | ||
39 | #define INTEL_MID_GP_DMAC2_ID 0x0827 | ||
40 | #define INTEL_MFLD_DMAC1_ID 0x0830 | ||
41 | #define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008 | ||
42 | #define LNW_PERIPHRAL_MASK_SIZE 0x10 | ||
43 | #define LNW_PERIPHRAL_STATUS 0x0 | ||
44 | #define LNW_PERIPHRAL_MASK 0x8 | ||
45 | |||
46 | struct intel_mid_dma_probe_info { | ||
47 | u8 max_chan; | ||
48 | u8 ch_base; | ||
49 | u16 block_size; | ||
50 | u32 pimr_mask; | ||
51 | }; | ||
52 | |||
53 | #define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \ | ||
54 | ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \ | ||
55 | .max_chan = (_max_chan), \ | ||
56 | .ch_base = (_ch_base), \ | ||
57 | .block_size = (_block_size), \ | ||
58 | .pimr_mask = (_pimr_mask), \ | ||
59 | }) | ||
60 | |||
61 | /***************************************************************************** | ||
62 | Utility Functions*/ | ||
63 | /** | ||
64 | * get_ch_index - convert status to channel | ||
65 | * @status: status mask | ||
66 | * @base: dma ch base value | ||
67 | * | ||
68 | * Modify the status mask and return the channel index needing | ||
69 | * attention (or -1 if neither) | ||
70 | */ | ||
71 | static int get_ch_index(int *status, unsigned int base) | ||
72 | { | ||
73 | int i; | ||
74 | for (i = 0; i < MAX_CHAN; i++) { | ||
75 | if (*status & (1 << (i + base))) { | ||
76 | *status = *status & ~(1 << (i + base)); | ||
77 | pr_debug("MDMA: index %d New status %x\n", i, *status); | ||
78 | return i; | ||
79 | } | ||
80 | } | ||
81 | return -1; | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * get_block_ts - calculates dma transaction length | ||
86 | * @len: dma transfer length | ||
87 | * @tx_width: dma transfer src width | ||
88 | * @block_size: dma controller max block size | ||
89 | * | ||
90 | * Based on src width calculate the DMA trsaction length in data items | ||
91 | * return data items or FFFF if exceeds max length for block | ||
92 | */ | ||
93 | static int get_block_ts(int len, int tx_width, int block_size) | ||
94 | { | ||
95 | int byte_width = 0, block_ts = 0; | ||
96 | |||
97 | switch (tx_width) { | ||
98 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
99 | byte_width = 1; | ||
100 | break; | ||
101 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
102 | byte_width = 2; | ||
103 | break; | ||
104 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
105 | default: | ||
106 | byte_width = 4; | ||
107 | break; | ||
108 | } | ||
109 | |||
110 | block_ts = len/byte_width; | ||
111 | if (block_ts > block_size) | ||
112 | block_ts = 0xFFFF; | ||
113 | return block_ts; | ||
114 | } | ||
115 | |||
116 | /***************************************************************************** | ||
117 | DMAC1 interrupt Functions*/ | ||
118 | |||
119 | /** | ||
120 | * dmac1_mask_periphral_intr - mask the periphral interrupt | ||
121 | * @mid: dma device for which masking is required | ||
122 | * | ||
123 | * Masks the DMA periphral interrupt | ||
124 | * this is valid for DMAC1 family controllers only | ||
125 | * This controller should have periphral mask registers already mapped | ||
126 | */ | ||
127 | static void dmac1_mask_periphral_intr(struct middma_device *mid) | ||
128 | { | ||
129 | u32 pimr; | ||
130 | |||
131 | if (mid->pimr_mask) { | ||
132 | pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); | ||
133 | pimr |= mid->pimr_mask; | ||
134 | writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); | ||
135 | } | ||
136 | return; | ||
137 | } | ||
138 | |||
139 | /** | ||
140 | * dmac1_unmask_periphral_intr - unmask the periphral interrupt | ||
141 | * @midc: dma channel for which masking is required | ||
142 | * | ||
143 | * UnMasks the DMA periphral interrupt, | ||
144 | * this is valid for DMAC1 family controllers only | ||
145 | * This controller should have periphral mask registers already mapped | ||
146 | */ | ||
147 | static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc) | ||
148 | { | ||
149 | u32 pimr; | ||
150 | struct middma_device *mid = to_middma_device(midc->chan.device); | ||
151 | |||
152 | if (mid->pimr_mask) { | ||
153 | pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); | ||
154 | pimr &= ~mid->pimr_mask; | ||
155 | writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); | ||
156 | } | ||
157 | return; | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * enable_dma_interrupt - enable the periphral interrupt | ||
162 | * @midc: dma channel for which enable interrupt is required | ||
163 | * | ||
164 | * Enable the DMA periphral interrupt, | ||
165 | * this is valid for DMAC1 family controllers only | ||
166 | * This controller should have periphral mask registers already mapped | ||
167 | */ | ||
168 | static void enable_dma_interrupt(struct intel_mid_dma_chan *midc) | ||
169 | { | ||
170 | dmac1_unmask_periphral_intr(midc); | ||
171 | |||
172 | /*en ch interrupts*/ | ||
173 | iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); | ||
174 | iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); | ||
175 | return; | ||
176 | } | ||
177 | |||
178 | /** | ||
179 | * disable_dma_interrupt - disable the periphral interrupt | ||
180 | * @midc: dma channel for which disable interrupt is required | ||
181 | * | ||
182 | * Disable the DMA periphral interrupt, | ||
183 | * this is valid for DMAC1 family controllers only | ||
184 | * This controller should have periphral mask registers already mapped | ||
185 | */ | ||
186 | static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) | ||
187 | { | ||
188 | /*Check LPE PISR, make sure fwd is disabled*/ | ||
189 | iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); | ||
190 | iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); | ||
191 | iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); | ||
192 | return; | ||
193 | } | ||
194 | |||
195 | /***************************************************************************** | ||
196 | DMA channel helper Functions*/ | ||
197 | /** | ||
198 | * mid_desc_get - get a descriptor | ||
199 | * @midc: dma channel for which descriptor is required | ||
200 | * | ||
201 | * Obtain a descriptor for the channel. Returns NULL if none are free. | ||
202 | * Once the descriptor is returned it is private until put on another | ||
203 | * list or freed | ||
204 | */ | ||
205 | static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc) | ||
206 | { | ||
207 | struct intel_mid_dma_desc *desc, *_desc; | ||
208 | struct intel_mid_dma_desc *ret = NULL; | ||
209 | |||
210 | spin_lock_bh(&midc->lock); | ||
211 | list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { | ||
212 | if (async_tx_test_ack(&desc->txd)) { | ||
213 | list_del(&desc->desc_node); | ||
214 | ret = desc; | ||
215 | break; | ||
216 | } | ||
217 | } | ||
218 | spin_unlock_bh(&midc->lock); | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | /** | ||
223 | * mid_desc_put - put a descriptor | ||
224 | * @midc: dma channel for which descriptor is required | ||
225 | * @desc: descriptor to put | ||
226 | * | ||
227 | * Return a descriptor from lwn_desc_get back to the free pool | ||
228 | */ | ||
229 | static void midc_desc_put(struct intel_mid_dma_chan *midc, | ||
230 | struct intel_mid_dma_desc *desc) | ||
231 | { | ||
232 | if (desc) { | ||
233 | spin_lock_bh(&midc->lock); | ||
234 | list_add_tail(&desc->desc_node, &midc->free_list); | ||
235 | spin_unlock_bh(&midc->lock); | ||
236 | } | ||
237 | } | ||
238 | /** | ||
239 | * midc_dostart - begin a DMA transaction | ||
240 | * @midc: channel for which txn is to be started | ||
241 | * @first: first descriptor of series | ||
242 | * | ||
243 | * Load a transaction into the engine. This must be called with midc->lock | ||
244 | * held and bh disabled. | ||
245 | */ | ||
246 | static void midc_dostart(struct intel_mid_dma_chan *midc, | ||
247 | struct intel_mid_dma_desc *first) | ||
248 | { | ||
249 | struct middma_device *mid = to_middma_device(midc->chan.device); | ||
250 | |||
251 | /* channel is idle */ | ||
252 | if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { | ||
253 | /*error*/ | ||
254 | pr_err("ERR_MDMA: channel is busy in start\n"); | ||
255 | /* The tasklet will hopefully advance the queue... */ | ||
256 | return; | ||
257 | } | ||
258 | midc->busy = true; | ||
259 | /*write registers and en*/ | ||
260 | iowrite32(first->sar, midc->ch_regs + SAR); | ||
261 | iowrite32(first->dar, midc->ch_regs + DAR); | ||
262 | iowrite32(first->lli_phys, midc->ch_regs + LLP); | ||
263 | iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); | ||
264 | iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); | ||
265 | iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); | ||
266 | iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH); | ||
267 | pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", | ||
268 | (int)first->sar, (int)first->dar, first->cfg_hi, | ||
269 | first->cfg_lo, first->ctl_hi, first->ctl_lo); | ||
270 | first->status = DMA_IN_PROGRESS; | ||
271 | |||
272 | iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); | ||
273 | } | ||
274 | |||
275 | /** | ||
276 | * midc_descriptor_complete - process completed descriptor | ||
277 | * @midc: channel owning the descriptor | ||
278 | * @desc: the descriptor itself | ||
279 | * | ||
280 | * Process a completed descriptor and perform any callbacks upon | ||
281 | * the completion. The completion handling drops the lock during the | ||
282 | * callbacks but must be called with the lock held. | ||
283 | */ | ||
284 | static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | ||
285 | struct intel_mid_dma_desc *desc) | ||
286 | __releases(&midc->lock) __acquires(&midc->lock) | ||
287 | { | ||
288 | struct dma_async_tx_descriptor *txd = &desc->txd; | ||
289 | dma_async_tx_callback callback_txd = NULL; | ||
290 | struct intel_mid_dma_lli *llitem; | ||
291 | void *param_txd = NULL; | ||
292 | |||
293 | dma_cookie_complete(txd); | ||
294 | callback_txd = txd->callback; | ||
295 | param_txd = txd->callback_param; | ||
296 | |||
297 | if (desc->lli != NULL) { | ||
298 | /*clear the DONE bit of completed LLI in memory*/ | ||
299 | llitem = desc->lli + desc->current_lli; | ||
300 | llitem->ctl_hi &= CLEAR_DONE; | ||
301 | if (desc->current_lli < desc->lli_length-1) | ||
302 | (desc->current_lli)++; | ||
303 | else | ||
304 | desc->current_lli = 0; | ||
305 | } | ||
306 | spin_unlock_bh(&midc->lock); | ||
307 | if (callback_txd) { | ||
308 | pr_debug("MDMA: TXD callback set ... calling\n"); | ||
309 | callback_txd(param_txd); | ||
310 | } | ||
311 | if (midc->raw_tfr) { | ||
312 | desc->status = DMA_COMPLETE; | ||
313 | if (desc->lli != NULL) { | ||
314 | pci_pool_free(desc->lli_pool, desc->lli, | ||
315 | desc->lli_phys); | ||
316 | pci_pool_destroy(desc->lli_pool); | ||
317 | desc->lli = NULL; | ||
318 | } | ||
319 | list_move(&desc->desc_node, &midc->free_list); | ||
320 | midc->busy = false; | ||
321 | } | ||
322 | spin_lock_bh(&midc->lock); | ||
323 | |||
324 | } | ||
325 | /** | ||
326 | * midc_scan_descriptors - check the descriptors in channel | ||
327 | * mark completed when tx is completete | ||
328 | * @mid: device | ||
329 | * @midc: channel to scan | ||
330 | * | ||
331 | * Walk the descriptor chain for the device and process any entries | ||
332 | * that are complete. | ||
333 | */ | ||
334 | static void midc_scan_descriptors(struct middma_device *mid, | ||
335 | struct intel_mid_dma_chan *midc) | ||
336 | { | ||
337 | struct intel_mid_dma_desc *desc = NULL, *_desc = NULL; | ||
338 | |||
339 | /*tx is complete*/ | ||
340 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | ||
341 | if (desc->status == DMA_IN_PROGRESS) | ||
342 | midc_descriptor_complete(midc, desc); | ||
343 | } | ||
344 | return; | ||
345 | } | ||
346 | /** | ||
347 | * midc_lli_fill_sg - Helper function to convert | ||
348 | * SG list to Linked List Items. | ||
349 | *@midc: Channel | ||
350 | *@desc: DMA descriptor | ||
351 | *@sglist: Pointer to SG list | ||
352 | *@sglen: SG list length | ||
353 | *@flags: DMA transaction flags | ||
354 | * | ||
355 | * Walk through the SG list and convert the SG list into Linked | ||
356 | * List Items (LLI). | ||
357 | */ | ||
358 | static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, | ||
359 | struct intel_mid_dma_desc *desc, | ||
360 | struct scatterlist *sglist, | ||
361 | unsigned int sglen, | ||
362 | unsigned int flags) | ||
363 | { | ||
364 | struct intel_mid_dma_slave *mids; | ||
365 | struct scatterlist *sg; | ||
366 | dma_addr_t lli_next, sg_phy_addr; | ||
367 | struct intel_mid_dma_lli *lli_bloc_desc; | ||
368 | union intel_mid_dma_ctl_lo ctl_lo; | ||
369 | union intel_mid_dma_ctl_hi ctl_hi; | ||
370 | int i; | ||
371 | |||
372 | pr_debug("MDMA: Entered midc_lli_fill_sg\n"); | ||
373 | mids = midc->mid_slave; | ||
374 | |||
375 | lli_bloc_desc = desc->lli; | ||
376 | lli_next = desc->lli_phys; | ||
377 | |||
378 | ctl_lo.ctl_lo = desc->ctl_lo; | ||
379 | ctl_hi.ctl_hi = desc->ctl_hi; | ||
380 | for_each_sg(sglist, sg, sglen, i) { | ||
381 | /*Populate CTL_LOW and LLI values*/ | ||
382 | if (i != sglen - 1) { | ||
383 | lli_next = lli_next + | ||
384 | sizeof(struct intel_mid_dma_lli); | ||
385 | } else { | ||
386 | /*Check for circular list, otherwise terminate LLI to ZERO*/ | ||
387 | if (flags & DMA_PREP_CIRCULAR_LIST) { | ||
388 | pr_debug("MDMA: LLI is configured in circular mode\n"); | ||
389 | lli_next = desc->lli_phys; | ||
390 | } else { | ||
391 | lli_next = 0; | ||
392 | ctl_lo.ctlx.llp_dst_en = 0; | ||
393 | ctl_lo.ctlx.llp_src_en = 0; | ||
394 | } | ||
395 | } | ||
396 | /*Populate CTL_HI values*/ | ||
397 | ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg), | ||
398 | desc->width, | ||
399 | midc->dma->block_size); | ||
400 | /*Populate SAR and DAR values*/ | ||
401 | sg_phy_addr = sg_dma_address(sg); | ||
402 | if (desc->dirn == DMA_MEM_TO_DEV) { | ||
403 | lli_bloc_desc->sar = sg_phy_addr; | ||
404 | lli_bloc_desc->dar = mids->dma_slave.dst_addr; | ||
405 | } else if (desc->dirn == DMA_DEV_TO_MEM) { | ||
406 | lli_bloc_desc->sar = mids->dma_slave.src_addr; | ||
407 | lli_bloc_desc->dar = sg_phy_addr; | ||
408 | } | ||
409 | /*Copy values into block descriptor in system memroy*/ | ||
410 | lli_bloc_desc->llp = lli_next; | ||
411 | lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; | ||
412 | lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; | ||
413 | |||
414 | lli_bloc_desc++; | ||
415 | } | ||
416 | /*Copy very first LLI values to descriptor*/ | ||
417 | desc->ctl_lo = desc->lli->ctl_lo; | ||
418 | desc->ctl_hi = desc->lli->ctl_hi; | ||
419 | desc->sar = desc->lli->sar; | ||
420 | desc->dar = desc->lli->dar; | ||
421 | |||
422 | return 0; | ||
423 | } | ||
424 | /***************************************************************************** | ||
425 | DMA engine callback Functions*/ | ||
426 | /** | ||
427 | * intel_mid_dma_tx_submit - callback to submit DMA transaction | ||
428 | * @tx: dma engine descriptor | ||
429 | * | ||
430 | * Submit the DMA transaction for this descriptor, start if ch idle | ||
431 | */ | ||
432 | static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
433 | { | ||
434 | struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx); | ||
435 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan); | ||
436 | dma_cookie_t cookie; | ||
437 | |||
438 | spin_lock_bh(&midc->lock); | ||
439 | cookie = dma_cookie_assign(tx); | ||
440 | |||
441 | if (list_empty(&midc->active_list)) | ||
442 | list_add_tail(&desc->desc_node, &midc->active_list); | ||
443 | else | ||
444 | list_add_tail(&desc->desc_node, &midc->queue); | ||
445 | |||
446 | midc_dostart(midc, desc); | ||
447 | spin_unlock_bh(&midc->lock); | ||
448 | |||
449 | return cookie; | ||
450 | } | ||
451 | |||
452 | /** | ||
453 | * intel_mid_dma_issue_pending - callback to issue pending txn | ||
454 | * @chan: chan where pending trascation needs to be checked and submitted | ||
455 | * | ||
456 | * Call for scan to issue pending descriptors | ||
457 | */ | ||
458 | static void intel_mid_dma_issue_pending(struct dma_chan *chan) | ||
459 | { | ||
460 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | ||
461 | |||
462 | spin_lock_bh(&midc->lock); | ||
463 | if (!list_empty(&midc->queue)) | ||
464 | midc_scan_descriptors(to_middma_device(chan->device), midc); | ||
465 | spin_unlock_bh(&midc->lock); | ||
466 | } | ||
467 | |||
468 | /** | ||
469 | * intel_mid_dma_tx_status - Return status of txn | ||
470 | * @chan: chan for where status needs to be checked | ||
471 | * @cookie: cookie for txn | ||
472 | * @txstate: DMA txn state | ||
473 | * | ||
474 | * Return status of DMA txn | ||
475 | */ | ||
476 | static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, | ||
477 | dma_cookie_t cookie, | ||
478 | struct dma_tx_state *txstate) | ||
479 | { | ||
480 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | ||
481 | enum dma_status ret; | ||
482 | |||
483 | ret = dma_cookie_status(chan, cookie, txstate); | ||
484 | if (ret != DMA_COMPLETE) { | ||
485 | spin_lock_bh(&midc->lock); | ||
486 | midc_scan_descriptors(to_middma_device(chan->device), midc); | ||
487 | spin_unlock_bh(&midc->lock); | ||
488 | |||
489 | ret = dma_cookie_status(chan, cookie, txstate); | ||
490 | } | ||
491 | |||
492 | return ret; | ||
493 | } | ||
494 | |||
495 | static int intel_mid_dma_config(struct dma_chan *chan, | ||
496 | struct dma_slave_config *slave) | ||
497 | { | ||
498 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | ||
499 | struct intel_mid_dma_slave *mid_slave; | ||
500 | |||
501 | BUG_ON(!midc); | ||
502 | BUG_ON(!slave); | ||
503 | pr_debug("MDMA: slave control called\n"); | ||
504 | |||
505 | mid_slave = to_intel_mid_dma_slave(slave); | ||
506 | |||
507 | BUG_ON(!mid_slave); | ||
508 | |||
509 | midc->mid_slave = mid_slave; | ||
510 | return 0; | ||
511 | } | ||
512 | |||
513 | static int intel_mid_dma_terminate_all(struct dma_chan *chan) | ||
514 | { | ||
515 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | ||
516 | struct middma_device *mid = to_middma_device(chan->device); | ||
517 | struct intel_mid_dma_desc *desc, *_desc; | ||
518 | union intel_mid_dma_cfg_lo cfg_lo; | ||
519 | |||
520 | spin_lock_bh(&midc->lock); | ||
521 | if (midc->busy == false) { | ||
522 | spin_unlock_bh(&midc->lock); | ||
523 | return 0; | ||
524 | } | ||
525 | /*Suspend and disable the channel*/ | ||
526 | cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); | ||
527 | cfg_lo.cfgx.ch_susp = 1; | ||
528 | iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); | ||
529 | iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); | ||
530 | midc->busy = false; | ||
531 | /* Disable interrupts */ | ||
532 | disable_dma_interrupt(midc); | ||
533 | midc->descs_allocated = 0; | ||
534 | |||
535 | spin_unlock_bh(&midc->lock); | ||
536 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | ||
537 | if (desc->lli != NULL) { | ||
538 | pci_pool_free(desc->lli_pool, desc->lli, | ||
539 | desc->lli_phys); | ||
540 | pci_pool_destroy(desc->lli_pool); | ||
541 | desc->lli = NULL; | ||
542 | } | ||
543 | list_move(&desc->desc_node, &midc->free_list); | ||
544 | } | ||
545 | return 0; | ||
546 | } | ||
547 | |||
548 | |||
549 | /** | ||
550 | * intel_mid_dma_prep_memcpy - Prep memcpy txn | ||
551 | * @chan: chan for DMA transfer | ||
552 | * @dest: destn address | ||
553 | * @src: src address | ||
554 | * @len: DMA transfer len | ||
555 | * @flags: DMA flags | ||
556 | * | ||
557 | * Perform a DMA memcpy. Note we support slave periphral DMA transfers only | ||
558 | * The periphral txn details should be filled in slave structure properly | ||
559 | * Returns the descriptor for this txn | ||
560 | */ | ||
561 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | ||
562 | struct dma_chan *chan, dma_addr_t dest, | ||
563 | dma_addr_t src, size_t len, unsigned long flags) | ||
564 | { | ||
565 | struct intel_mid_dma_chan *midc; | ||
566 | struct intel_mid_dma_desc *desc = NULL; | ||
567 | struct intel_mid_dma_slave *mids; | ||
568 | union intel_mid_dma_ctl_lo ctl_lo; | ||
569 | union intel_mid_dma_ctl_hi ctl_hi; | ||
570 | union intel_mid_dma_cfg_lo cfg_lo; | ||
571 | union intel_mid_dma_cfg_hi cfg_hi; | ||
572 | enum dma_slave_buswidth width; | ||
573 | |||
574 | pr_debug("MDMA: Prep for memcpy\n"); | ||
575 | BUG_ON(!chan); | ||
576 | if (!len) | ||
577 | return NULL; | ||
578 | |||
579 | midc = to_intel_mid_dma_chan(chan); | ||
580 | BUG_ON(!midc); | ||
581 | |||
582 | mids = midc->mid_slave; | ||
583 | BUG_ON(!mids); | ||
584 | |||
585 | pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", | ||
586 | midc->dma->pci_id, midc->ch_id, len); | ||
587 | pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", | ||
588 | mids->cfg_mode, mids->dma_slave.direction, | ||
589 | mids->hs_mode, mids->dma_slave.src_addr_width); | ||
590 | |||
591 | /*calculate CFG_LO*/ | ||
592 | if (mids->hs_mode == LNW_DMA_SW_HS) { | ||
593 | cfg_lo.cfg_lo = 0; | ||
594 | cfg_lo.cfgx.hs_sel_dst = 1; | ||
595 | cfg_lo.cfgx.hs_sel_src = 1; | ||
596 | } else if (mids->hs_mode == LNW_DMA_HW_HS) | ||
597 | cfg_lo.cfg_lo = 0x00000; | ||
598 | |||
599 | /*calculate CFG_HI*/ | ||
600 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { | ||
601 | /*SW HS only*/ | ||
602 | cfg_hi.cfg_hi = 0; | ||
603 | } else { | ||
604 | cfg_hi.cfg_hi = 0; | ||
605 | if (midc->dma->pimr_mask) { | ||
606 | cfg_hi.cfgx.protctl = 0x0; /*default value*/ | ||
607 | cfg_hi.cfgx.fifo_mode = 1; | ||
608 | if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { | ||
609 | cfg_hi.cfgx.src_per = 0; | ||
610 | if (mids->device_instance == 0) | ||
611 | cfg_hi.cfgx.dst_per = 3; | ||
612 | if (mids->device_instance == 1) | ||
613 | cfg_hi.cfgx.dst_per = 1; | ||
614 | } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { | ||
615 | if (mids->device_instance == 0) | ||
616 | cfg_hi.cfgx.src_per = 2; | ||
617 | if (mids->device_instance == 1) | ||
618 | cfg_hi.cfgx.src_per = 0; | ||
619 | cfg_hi.cfgx.dst_per = 0; | ||
620 | } | ||
621 | } else { | ||
622 | cfg_hi.cfgx.protctl = 0x1; /*default value*/ | ||
623 | cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per = | ||
624 | midc->ch_id - midc->dma->chan_base; | ||
625 | } | ||
626 | } | ||
627 | |||
628 | /*calculate CTL_HI*/ | ||
629 | ctl_hi.ctlx.reser = 0; | ||
630 | ctl_hi.ctlx.done = 0; | ||
631 | width = mids->dma_slave.src_addr_width; | ||
632 | |||
633 | ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); | ||
634 | pr_debug("MDMA:calc len %d for block size %d\n", | ||
635 | ctl_hi.ctlx.block_ts, midc->dma->block_size); | ||
636 | /*calculate CTL_LO*/ | ||
637 | ctl_lo.ctl_lo = 0; | ||
638 | ctl_lo.ctlx.int_en = 1; | ||
639 | ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; | ||
640 | ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; | ||
641 | |||
642 | /* | ||
643 | * Here we need some translation from "enum dma_slave_buswidth" | ||
644 | * to the format for our dma controller | ||
645 | * standard intel_mid_dmac's format | ||
646 | * 1 Byte 0b000 | ||
647 | * 2 Bytes 0b001 | ||
648 | * 4 Bytes 0b010 | ||
649 | */ | ||
650 | ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; | ||
651 | ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; | ||
652 | |||
653 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { | ||
654 | ctl_lo.ctlx.tt_fc = 0; | ||
655 | ctl_lo.ctlx.sinc = 0; | ||
656 | ctl_lo.ctlx.dinc = 0; | ||
657 | } else { | ||
658 | if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { | ||
659 | ctl_lo.ctlx.sinc = 0; | ||
660 | ctl_lo.ctlx.dinc = 2; | ||
661 | ctl_lo.ctlx.tt_fc = 1; | ||
662 | } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { | ||
663 | ctl_lo.ctlx.sinc = 2; | ||
664 | ctl_lo.ctlx.dinc = 0; | ||
665 | ctl_lo.ctlx.tt_fc = 2; | ||
666 | } | ||
667 | } | ||
668 | |||
669 | pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n", | ||
670 | ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi); | ||
671 | |||
672 | enable_dma_interrupt(midc); | ||
673 | |||
674 | desc = midc_desc_get(midc); | ||
675 | if (desc == NULL) | ||
676 | goto err_desc_get; | ||
677 | desc->sar = src; | ||
678 | desc->dar = dest ; | ||
679 | desc->len = len; | ||
680 | desc->cfg_hi = cfg_hi.cfg_hi; | ||
681 | desc->cfg_lo = cfg_lo.cfg_lo; | ||
682 | desc->ctl_lo = ctl_lo.ctl_lo; | ||
683 | desc->ctl_hi = ctl_hi.ctl_hi; | ||
684 | desc->width = width; | ||
685 | desc->dirn = mids->dma_slave.direction; | ||
686 | desc->lli_phys = 0; | ||
687 | desc->lli = NULL; | ||
688 | desc->lli_pool = NULL; | ||
689 | return &desc->txd; | ||
690 | |||
691 | err_desc_get: | ||
692 | pr_err("ERR_MDMA: Failed to get desc\n"); | ||
693 | midc_desc_put(midc, desc); | ||
694 | return NULL; | ||
695 | } | ||
696 | /** | ||
697 | * intel_mid_dma_prep_slave_sg - Prep slave sg txn | ||
698 | * @chan: chan for DMA transfer | ||
699 | * @sgl: scatter gather list | ||
700 | * @sg_len: length of sg txn | ||
701 | * @direction: DMA transfer dirtn | ||
702 | * @flags: DMA flags | ||
703 | * @context: transfer context (ignored) | ||
704 | * | ||
705 | * Prepares LLI based periphral transfer | ||
706 | */ | ||
707 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | ||
708 | struct dma_chan *chan, struct scatterlist *sgl, | ||
709 | unsigned int sg_len, enum dma_transfer_direction direction, | ||
710 | unsigned long flags, void *context) | ||
711 | { | ||
712 | struct intel_mid_dma_chan *midc = NULL; | ||
713 | struct intel_mid_dma_slave *mids = NULL; | ||
714 | struct intel_mid_dma_desc *desc = NULL; | ||
715 | struct dma_async_tx_descriptor *txd = NULL; | ||
716 | union intel_mid_dma_ctl_lo ctl_lo; | ||
717 | |||
718 | pr_debug("MDMA: Prep for slave SG\n"); | ||
719 | |||
720 | if (!sg_len) { | ||
721 | pr_err("MDMA: Invalid SG length\n"); | ||
722 | return NULL; | ||
723 | } | ||
724 | midc = to_intel_mid_dma_chan(chan); | ||
725 | BUG_ON(!midc); | ||
726 | |||
727 | mids = midc->mid_slave; | ||
728 | BUG_ON(!mids); | ||
729 | |||
730 | if (!midc->dma->pimr_mask) { | ||
731 | /* We can still handle sg list with only one item */ | ||
732 | if (sg_len == 1) { | ||
733 | txd = intel_mid_dma_prep_memcpy(chan, | ||
734 | mids->dma_slave.dst_addr, | ||
735 | mids->dma_slave.src_addr, | ||
736 | sg_dma_len(sgl), | ||
737 | flags); | ||
738 | return txd; | ||
739 | } else { | ||
740 | pr_warn("MDMA: SG list is not supported by this controller\n"); | ||
741 | return NULL; | ||
742 | } | ||
743 | } | ||
744 | |||
745 | pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", | ||
746 | sg_len, direction, flags); | ||
747 | |||
748 | txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags); | ||
749 | if (NULL == txd) { | ||
750 | pr_err("MDMA: Prep memcpy failed\n"); | ||
751 | return NULL; | ||
752 | } | ||
753 | |||
754 | desc = to_intel_mid_dma_desc(txd); | ||
755 | desc->dirn = direction; | ||
756 | ctl_lo.ctl_lo = desc->ctl_lo; | ||
757 | ctl_lo.ctlx.llp_dst_en = 1; | ||
758 | ctl_lo.ctlx.llp_src_en = 1; | ||
759 | desc->ctl_lo = ctl_lo.ctl_lo; | ||
760 | desc->lli_length = sg_len; | ||
761 | desc->current_lli = 0; | ||
762 | /* DMA coherent memory pool for LLI descriptors*/ | ||
763 | desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", | ||
764 | midc->dma->pdev, | ||
765 | (sizeof(struct intel_mid_dma_lli)*sg_len), | ||
766 | 32, 0); | ||
767 | if (NULL == desc->lli_pool) { | ||
768 | pr_err("MID_DMA:LLI pool create failed\n"); | ||
769 | return NULL; | ||
770 | } | ||
771 | |||
772 | desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); | ||
773 | if (!desc->lli) { | ||
774 | pr_err("MID_DMA: LLI alloc failed\n"); | ||
775 | pci_pool_destroy(desc->lli_pool); | ||
776 | return NULL; | ||
777 | } | ||
778 | |||
779 | midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); | ||
780 | if (flags & DMA_PREP_INTERRUPT) { | ||
781 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | ||
782 | midc->dma_base + MASK_BLOCK); | ||
783 | pr_debug("MDMA:Enabled Block interrupt\n"); | ||
784 | } | ||
785 | return &desc->txd; | ||
786 | } | ||
787 | |||
788 | /** | ||
789 | * intel_mid_dma_free_chan_resources - Frees dma resources | ||
790 | * @chan: chan requiring attention | ||
791 | * | ||
792 | * Frees the allocated resources on this DMA chan | ||
793 | */ | ||
794 | static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) | ||
795 | { | ||
796 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | ||
797 | struct middma_device *mid = to_middma_device(chan->device); | ||
798 | struct intel_mid_dma_desc *desc, *_desc; | ||
799 | |||
800 | if (true == midc->busy) { | ||
801 | /*trying to free ch in use!!!!!*/ | ||
802 | pr_err("ERR_MDMA: trying to free ch in use\n"); | ||
803 | } | ||
804 | spin_lock_bh(&midc->lock); | ||
805 | midc->descs_allocated = 0; | ||
806 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | ||
807 | list_del(&desc->desc_node); | ||
808 | pci_pool_free(mid->dma_pool, desc, desc->txd.phys); | ||
809 | } | ||
810 | list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { | ||
811 | list_del(&desc->desc_node); | ||
812 | pci_pool_free(mid->dma_pool, desc, desc->txd.phys); | ||
813 | } | ||
814 | list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) { | ||
815 | list_del(&desc->desc_node); | ||
816 | pci_pool_free(mid->dma_pool, desc, desc->txd.phys); | ||
817 | } | ||
818 | spin_unlock_bh(&midc->lock); | ||
819 | midc->in_use = false; | ||
820 | midc->busy = false; | ||
821 | /* Disable CH interrupts */ | ||
822 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); | ||
823 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); | ||
824 | pm_runtime_put(&mid->pdev->dev); | ||
825 | } | ||
826 | |||
827 | /** | ||
828 | * intel_mid_dma_alloc_chan_resources - Allocate dma resources | ||
829 | * @chan: chan requiring attention | ||
830 | * | ||
831 | * Allocates DMA resources on this chan | ||
832 | * Return the descriptors allocated | ||
833 | */ | ||
834 | static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | ||
835 | { | ||
836 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | ||
837 | struct middma_device *mid = to_middma_device(chan->device); | ||
838 | struct intel_mid_dma_desc *desc; | ||
839 | dma_addr_t phys; | ||
840 | int i = 0; | ||
841 | |||
842 | pm_runtime_get_sync(&mid->pdev->dev); | ||
843 | |||
844 | if (mid->state == SUSPENDED) { | ||
845 | if (dma_resume(&mid->pdev->dev)) { | ||
846 | pr_err("ERR_MDMA: resume failed"); | ||
847 | return -EFAULT; | ||
848 | } | ||
849 | } | ||
850 | |||
851 | /* ASSERT: channel is idle */ | ||
852 | if (test_ch_en(mid->dma_base, midc->ch_id)) { | ||
853 | /*ch is not idle*/ | ||
854 | pr_err("ERR_MDMA: ch not idle\n"); | ||
855 | pm_runtime_put(&mid->pdev->dev); | ||
856 | return -EIO; | ||
857 | } | ||
858 | dma_cookie_init(chan); | ||
859 | |||
860 | spin_lock_bh(&midc->lock); | ||
861 | while (midc->descs_allocated < DESCS_PER_CHANNEL) { | ||
862 | spin_unlock_bh(&midc->lock); | ||
863 | desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); | ||
864 | if (!desc) { | ||
865 | pr_err("ERR_MDMA: desc failed\n"); | ||
866 | pm_runtime_put(&mid->pdev->dev); | ||
867 | return -ENOMEM; | ||
868 | /*check*/ | ||
869 | } | ||
870 | dma_async_tx_descriptor_init(&desc->txd, chan); | ||
871 | desc->txd.tx_submit = intel_mid_dma_tx_submit; | ||
872 | desc->txd.flags = DMA_CTRL_ACK; | ||
873 | desc->txd.phys = phys; | ||
874 | spin_lock_bh(&midc->lock); | ||
875 | i = ++midc->descs_allocated; | ||
876 | list_add_tail(&desc->desc_node, &midc->free_list); | ||
877 | } | ||
878 | spin_unlock_bh(&midc->lock); | ||
879 | midc->in_use = true; | ||
880 | midc->busy = false; | ||
881 | pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); | ||
882 | return i; | ||
883 | } | ||
884 | |||
885 | /** | ||
886 | * midc_handle_error - Handle DMA txn error | ||
887 | * @mid: controller where error occurred | ||
888 | * @midc: chan where error occurred | ||
889 | * | ||
890 | * Scan the descriptor for error | ||
891 | */ | ||
892 | static void midc_handle_error(struct middma_device *mid, | ||
893 | struct intel_mid_dma_chan *midc) | ||
894 | { | ||
895 | midc_scan_descriptors(mid, midc); | ||
896 | } | ||
897 | |||
898 | /** | ||
899 | * dma_tasklet - DMA interrupt tasklet | ||
900 | * @data: tasklet arg (the controller structure) | ||
901 | * | ||
902 | * Scan the controller for interrupts for completion/error | ||
903 | * Clear the interrupt and call for handling completion/error | ||
904 | */ | ||
905 | static void dma_tasklet(unsigned long data) | ||
906 | { | ||
907 | struct middma_device *mid = NULL; | ||
908 | struct intel_mid_dma_chan *midc = NULL; | ||
909 | u32 status, raw_tfr, raw_block; | ||
910 | int i; | ||
911 | |||
912 | mid = (struct middma_device *)data; | ||
913 | if (mid == NULL) { | ||
914 | pr_err("ERR_MDMA: tasklet Null param\n"); | ||
915 | return; | ||
916 | } | ||
917 | pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); | ||
918 | raw_tfr = ioread32(mid->dma_base + RAW_TFR); | ||
919 | raw_block = ioread32(mid->dma_base + RAW_BLOCK); | ||
920 | status = raw_tfr | raw_block; | ||
921 | status &= mid->intr_mask; | ||
922 | while (status) { | ||
923 | /*txn interrupt*/ | ||
924 | i = get_ch_index(&status, mid->chan_base); | ||
925 | if (i < 0) { | ||
926 | pr_err("ERR_MDMA:Invalid ch index %x\n", i); | ||
927 | return; | ||
928 | } | ||
929 | midc = &mid->ch[i]; | ||
930 | if (midc == NULL) { | ||
931 | pr_err("ERR_MDMA:Null param midc\n"); | ||
932 | return; | ||
933 | } | ||
934 | pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", | ||
935 | status, midc->ch_id, i); | ||
936 | midc->raw_tfr = raw_tfr; | ||
937 | midc->raw_block = raw_block; | ||
938 | spin_lock_bh(&midc->lock); | ||
939 | /*clearing this interrupts first*/ | ||
940 | iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); | ||
941 | if (raw_block) { | ||
942 | iowrite32((1 << midc->ch_id), | ||
943 | mid->dma_base + CLEAR_BLOCK); | ||
944 | } | ||
945 | midc_scan_descriptors(mid, midc); | ||
946 | pr_debug("MDMA:Scan of desc... complete, unmasking\n"); | ||
947 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | ||
948 | mid->dma_base + MASK_TFR); | ||
949 | if (raw_block) { | ||
950 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | ||
951 | mid->dma_base + MASK_BLOCK); | ||
952 | } | ||
953 | spin_unlock_bh(&midc->lock); | ||
954 | } | ||
955 | |||
956 | status = ioread32(mid->dma_base + RAW_ERR); | ||
957 | status &= mid->intr_mask; | ||
958 | while (status) { | ||
959 | /*err interrupt*/ | ||
960 | i = get_ch_index(&status, mid->chan_base); | ||
961 | if (i < 0) { | ||
962 | pr_err("ERR_MDMA:Invalid ch index %x\n", i); | ||
963 | return; | ||
964 | } | ||
965 | midc = &mid->ch[i]; | ||
966 | if (midc == NULL) { | ||
967 | pr_err("ERR_MDMA:Null param midc\n"); | ||
968 | return; | ||
969 | } | ||
970 | pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", | ||
971 | status, midc->ch_id, i); | ||
972 | |||
973 | iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR); | ||
974 | spin_lock_bh(&midc->lock); | ||
975 | midc_handle_error(mid, midc); | ||
976 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | ||
977 | mid->dma_base + MASK_ERR); | ||
978 | spin_unlock_bh(&midc->lock); | ||
979 | } | ||
980 | pr_debug("MDMA:Exiting takslet...\n"); | ||
981 | return; | ||
982 | } | ||
983 | |||
984 | static void dma_tasklet1(unsigned long data) | ||
985 | { | ||
986 | pr_debug("MDMA:in takslet1...\n"); | ||
987 | return dma_tasklet(data); | ||
988 | } | ||
989 | |||
990 | static void dma_tasklet2(unsigned long data) | ||
991 | { | ||
992 | pr_debug("MDMA:in takslet2...\n"); | ||
993 | return dma_tasklet(data); | ||
994 | } | ||
995 | |||
996 | /** | ||
997 | * intel_mid_dma_interrupt - DMA ISR | ||
998 | * @irq: IRQ where interrupt occurred | ||
999 | * @data: ISR cllback data (the controller structure) | ||
1000 | * | ||
1001 | * See if this is our interrupt if so then schedule the tasklet | ||
1002 | * otherwise ignore | ||
1003 | */ | ||
1004 | static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) | ||
1005 | { | ||
1006 | struct middma_device *mid = data; | ||
1007 | u32 tfr_status, err_status; | ||
1008 | int call_tasklet = 0; | ||
1009 | |||
1010 | tfr_status = ioread32(mid->dma_base + RAW_TFR); | ||
1011 | err_status = ioread32(mid->dma_base + RAW_ERR); | ||
1012 | if (!tfr_status && !err_status) | ||
1013 | return IRQ_NONE; | ||
1014 | |||
1015 | /*DMA Interrupt*/ | ||
1016 | pr_debug("MDMA:Got an interrupt on irq %d\n", irq); | ||
1017 | pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); | ||
1018 | tfr_status &= mid->intr_mask; | ||
1019 | if (tfr_status) { | ||
1020 | /*need to disable intr*/ | ||
1021 | iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); | ||
1022 | iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); | ||
1023 | pr_debug("MDMA: Calling tasklet %x\n", tfr_status); | ||
1024 | call_tasklet = 1; | ||
1025 | } | ||
1026 | err_status &= mid->intr_mask; | ||
1027 | if (err_status) { | ||
1028 | iowrite32((err_status << INT_MASK_WE), | ||
1029 | mid->dma_base + MASK_ERR); | ||
1030 | call_tasklet = 1; | ||
1031 | } | ||
1032 | if (call_tasklet) | ||
1033 | tasklet_schedule(&mid->tasklet); | ||
1034 | |||
1035 | return IRQ_HANDLED; | ||
1036 | } | ||
1037 | |||
1038 | static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data) | ||
1039 | { | ||
1040 | return intel_mid_dma_interrupt(irq, data); | ||
1041 | } | ||
1042 | |||
1043 | static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) | ||
1044 | { | ||
1045 | return intel_mid_dma_interrupt(irq, data); | ||
1046 | } | ||
1047 | |||
1048 | /** | ||
1049 | * mid_setup_dma - Setup the DMA controller | ||
1050 | * @pdev: Controller PCI device structure | ||
1051 | * | ||
1052 | * Initialize the DMA controller, channels, registers with DMA engine, | ||
1053 | * ISR. Initialize DMA controller channels. | ||
1054 | */ | ||
1055 | static int mid_setup_dma(struct pci_dev *pdev) | ||
1056 | { | ||
1057 | struct middma_device *dma = pci_get_drvdata(pdev); | ||
1058 | int err, i; | ||
1059 | |||
1060 | /* DMA coherent memory pool for DMA descriptor allocations */ | ||
1061 | dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, | ||
1062 | sizeof(struct intel_mid_dma_desc), | ||
1063 | 32, 0); | ||
1064 | if (NULL == dma->dma_pool) { | ||
1065 | pr_err("ERR_MDMA:pci_pool_create failed\n"); | ||
1066 | err = -ENOMEM; | ||
1067 | goto err_dma_pool; | ||
1068 | } | ||
1069 | |||
1070 | INIT_LIST_HEAD(&dma->common.channels); | ||
1071 | dma->pci_id = pdev->device; | ||
1072 | if (dma->pimr_mask) { | ||
1073 | dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, | ||
1074 | LNW_PERIPHRAL_MASK_SIZE); | ||
1075 | if (dma->mask_reg == NULL) { | ||
1076 | pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); | ||
1077 | err = -ENOMEM; | ||
1078 | goto err_ioremap; | ||
1079 | } | ||
1080 | } else | ||
1081 | dma->mask_reg = NULL; | ||
1082 | |||
1083 | pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); | ||
1084 | /*init CH structures*/ | ||
1085 | dma->intr_mask = 0; | ||
1086 | dma->state = RUNNING; | ||
1087 | for (i = 0; i < dma->max_chan; i++) { | ||
1088 | struct intel_mid_dma_chan *midch = &dma->ch[i]; | ||
1089 | |||
1090 | midch->chan.device = &dma->common; | ||
1091 | dma_cookie_init(&midch->chan); | ||
1092 | midch->ch_id = dma->chan_base + i; | ||
1093 | pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); | ||
1094 | |||
1095 | midch->dma_base = dma->dma_base; | ||
1096 | midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id; | ||
1097 | midch->dma = dma; | ||
1098 | dma->intr_mask |= 1 << (dma->chan_base + i); | ||
1099 | spin_lock_init(&midch->lock); | ||
1100 | |||
1101 | INIT_LIST_HEAD(&midch->active_list); | ||
1102 | INIT_LIST_HEAD(&midch->queue); | ||
1103 | INIT_LIST_HEAD(&midch->free_list); | ||
1104 | /*mask interrupts*/ | ||
1105 | iowrite32(MASK_INTR_REG(midch->ch_id), | ||
1106 | dma->dma_base + MASK_BLOCK); | ||
1107 | iowrite32(MASK_INTR_REG(midch->ch_id), | ||
1108 | dma->dma_base + MASK_SRC_TRAN); | ||
1109 | iowrite32(MASK_INTR_REG(midch->ch_id), | ||
1110 | dma->dma_base + MASK_DST_TRAN); | ||
1111 | iowrite32(MASK_INTR_REG(midch->ch_id), | ||
1112 | dma->dma_base + MASK_ERR); | ||
1113 | iowrite32(MASK_INTR_REG(midch->ch_id), | ||
1114 | dma->dma_base + MASK_TFR); | ||
1115 | |||
1116 | disable_dma_interrupt(midch); | ||
1117 | list_add_tail(&midch->chan.device_node, &dma->common.channels); | ||
1118 | } | ||
1119 | pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask); | ||
1120 | |||
1121 | /*init dma structure*/ | ||
1122 | dma_cap_zero(dma->common.cap_mask); | ||
1123 | dma_cap_set(DMA_MEMCPY, dma->common.cap_mask); | ||
1124 | dma_cap_set(DMA_SLAVE, dma->common.cap_mask); | ||
1125 | dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); | ||
1126 | dma->common.dev = &pdev->dev; | ||
1127 | |||
1128 | dma->common.device_alloc_chan_resources = | ||
1129 | intel_mid_dma_alloc_chan_resources; | ||
1130 | dma->common.device_free_chan_resources = | ||
1131 | intel_mid_dma_free_chan_resources; | ||
1132 | |||
1133 | dma->common.device_tx_status = intel_mid_dma_tx_status; | ||
1134 | dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; | ||
1135 | dma->common.device_issue_pending = intel_mid_dma_issue_pending; | ||
1136 | dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; | ||
1137 | dma->common.device_config = intel_mid_dma_config; | ||
1138 | dma->common.device_terminate_all = intel_mid_dma_terminate_all; | ||
1139 | |||
1140 | /*enable dma cntrl*/ | ||
1141 | iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); | ||
1142 | |||
1143 | /*register irq */ | ||
1144 | if (dma->pimr_mask) { | ||
1145 | pr_debug("MDMA:Requesting irq shared for DMAC1\n"); | ||
1146 | err = request_irq(pdev->irq, intel_mid_dma_interrupt1, | ||
1147 | IRQF_SHARED, "INTEL_MID_DMAC1", dma); | ||
1148 | if (0 != err) | ||
1149 | goto err_irq; | ||
1150 | } else { | ||
1151 | dma->intr_mask = 0x03; | ||
1152 | pr_debug("MDMA:Requesting irq for DMAC2\n"); | ||
1153 | err = request_irq(pdev->irq, intel_mid_dma_interrupt2, | ||
1154 | IRQF_SHARED, "INTEL_MID_DMAC2", dma); | ||
1155 | if (0 != err) | ||
1156 | goto err_irq; | ||
1157 | } | ||
1158 | /*register device w/ engine*/ | ||
1159 | err = dma_async_device_register(&dma->common); | ||
1160 | if (0 != err) { | ||
1161 | pr_err("ERR_MDMA:device_register failed: %d\n", err); | ||
1162 | goto err_engine; | ||
1163 | } | ||
1164 | if (dma->pimr_mask) { | ||
1165 | pr_debug("setting up tasklet1 for DMAC1\n"); | ||
1166 | tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma); | ||
1167 | } else { | ||
1168 | pr_debug("setting up tasklet2 for DMAC2\n"); | ||
1169 | tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma); | ||
1170 | } | ||
1171 | return 0; | ||
1172 | |||
1173 | err_engine: | ||
1174 | free_irq(pdev->irq, dma); | ||
1175 | err_irq: | ||
1176 | if (dma->mask_reg) | ||
1177 | iounmap(dma->mask_reg); | ||
1178 | err_ioremap: | ||
1179 | pci_pool_destroy(dma->dma_pool); | ||
1180 | err_dma_pool: | ||
1181 | pr_err("ERR_MDMA:setup_dma failed: %d\n", err); | ||
1182 | return err; | ||
1183 | |||
1184 | } | ||
1185 | |||
1186 | /** | ||
1187 | * middma_shutdown - Shutdown the DMA controller | ||
1188 | * @pdev: Controller PCI device structure | ||
1189 | * | ||
1190 | * Called by remove | ||
1191 | * Unregister DMa controller, clear all structures and free interrupt | ||
1192 | */ | ||
1193 | static void middma_shutdown(struct pci_dev *pdev) | ||
1194 | { | ||
1195 | struct middma_device *device = pci_get_drvdata(pdev); | ||
1196 | |||
1197 | dma_async_device_unregister(&device->common); | ||
1198 | pci_pool_destroy(device->dma_pool); | ||
1199 | if (device->mask_reg) | ||
1200 | iounmap(device->mask_reg); | ||
1201 | if (device->dma_base) | ||
1202 | iounmap(device->dma_base); | ||
1203 | free_irq(pdev->irq, device); | ||
1204 | return; | ||
1205 | } | ||
1206 | |||
1207 | /** | ||
1208 | * intel_mid_dma_probe - PCI Probe | ||
1209 | * @pdev: Controller PCI device structure | ||
1210 | * @id: pci device id structure | ||
1211 | * | ||
1212 | * Initialize the PCI device, map BARs, query driver data. | ||
1213 | * Call setup_dma to complete contoller and chan initilzation | ||
1214 | */ | ||
1215 | static int intel_mid_dma_probe(struct pci_dev *pdev, | ||
1216 | const struct pci_device_id *id) | ||
1217 | { | ||
1218 | struct middma_device *device; | ||
1219 | u32 base_addr, bar_size; | ||
1220 | struct intel_mid_dma_probe_info *info; | ||
1221 | int err; | ||
1222 | |||
1223 | pr_debug("MDMA: probe for %x\n", pdev->device); | ||
1224 | info = (void *)id->driver_data; | ||
1225 | pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n", | ||
1226 | info->max_chan, info->ch_base, | ||
1227 | info->block_size, info->pimr_mask); | ||
1228 | |||
1229 | err = pci_enable_device(pdev); | ||
1230 | if (err) | ||
1231 | goto err_enable_device; | ||
1232 | |||
1233 | err = pci_request_regions(pdev, "intel_mid_dmac"); | ||
1234 | if (err) | ||
1235 | goto err_request_regions; | ||
1236 | |||
1237 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
1238 | if (err) | ||
1239 | goto err_set_dma_mask; | ||
1240 | |||
1241 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
1242 | if (err) | ||
1243 | goto err_set_dma_mask; | ||
1244 | |||
1245 | device = kzalloc(sizeof(*device), GFP_KERNEL); | ||
1246 | if (!device) { | ||
1247 | pr_err("ERR_MDMA:kzalloc failed probe\n"); | ||
1248 | err = -ENOMEM; | ||
1249 | goto err_kzalloc; | ||
1250 | } | ||
1251 | device->pdev = pci_dev_get(pdev); | ||
1252 | |||
1253 | base_addr = pci_resource_start(pdev, 0); | ||
1254 | bar_size = pci_resource_len(pdev, 0); | ||
1255 | device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE); | ||
1256 | if (!device->dma_base) { | ||
1257 | pr_err("ERR_MDMA:ioremap failed\n"); | ||
1258 | err = -ENOMEM; | ||
1259 | goto err_ioremap; | ||
1260 | } | ||
1261 | pci_set_drvdata(pdev, device); | ||
1262 | pci_set_master(pdev); | ||
1263 | device->max_chan = info->max_chan; | ||
1264 | device->chan_base = info->ch_base; | ||
1265 | device->block_size = info->block_size; | ||
1266 | device->pimr_mask = info->pimr_mask; | ||
1267 | |||
1268 | err = mid_setup_dma(pdev); | ||
1269 | if (err) | ||
1270 | goto err_dma; | ||
1271 | |||
1272 | pm_runtime_put_noidle(&pdev->dev); | ||
1273 | pm_runtime_allow(&pdev->dev); | ||
1274 | return 0; | ||
1275 | |||
1276 | err_dma: | ||
1277 | iounmap(device->dma_base); | ||
1278 | err_ioremap: | ||
1279 | pci_dev_put(pdev); | ||
1280 | kfree(device); | ||
1281 | err_kzalloc: | ||
1282 | err_set_dma_mask: | ||
1283 | pci_release_regions(pdev); | ||
1284 | pci_disable_device(pdev); | ||
1285 | err_request_regions: | ||
1286 | err_enable_device: | ||
1287 | pr_err("ERR_MDMA:Probe failed %d\n", err); | ||
1288 | return err; | ||
1289 | } | ||
1290 | |||
1291 | /** | ||
1292 | * intel_mid_dma_remove - PCI remove | ||
1293 | * @pdev: Controller PCI device structure | ||
1294 | * | ||
1295 | * Free up all resources and data | ||
1296 | * Call shutdown_dma to complete contoller and chan cleanup | ||
1297 | */ | ||
1298 | static void intel_mid_dma_remove(struct pci_dev *pdev) | ||
1299 | { | ||
1300 | struct middma_device *device = pci_get_drvdata(pdev); | ||
1301 | |||
1302 | pm_runtime_get_noresume(&pdev->dev); | ||
1303 | pm_runtime_forbid(&pdev->dev); | ||
1304 | middma_shutdown(pdev); | ||
1305 | pci_dev_put(pdev); | ||
1306 | kfree(device); | ||
1307 | pci_release_regions(pdev); | ||
1308 | pci_disable_device(pdev); | ||
1309 | } | ||
1310 | |||
1311 | /* Power Management */ | ||
1312 | /* | ||
1313 | * dma_suspend - PCI suspend function | ||
1314 | * | ||
1315 | * @pci: PCI device structure | ||
1316 | * @state: PM message | ||
1317 | * | ||
1318 | * This function is called by OS when a power event occurs | ||
1319 | */ | ||
1320 | static int dma_suspend(struct device *dev) | ||
1321 | { | ||
1322 | struct pci_dev *pci = to_pci_dev(dev); | ||
1323 | int i; | ||
1324 | struct middma_device *device = pci_get_drvdata(pci); | ||
1325 | pr_debug("MDMA: dma_suspend called\n"); | ||
1326 | |||
1327 | for (i = 0; i < device->max_chan; i++) { | ||
1328 | if (device->ch[i].in_use) | ||
1329 | return -EAGAIN; | ||
1330 | } | ||
1331 | dmac1_mask_periphral_intr(device); | ||
1332 | device->state = SUSPENDED; | ||
1333 | pci_save_state(pci); | ||
1334 | pci_disable_device(pci); | ||
1335 | pci_set_power_state(pci, PCI_D3hot); | ||
1336 | return 0; | ||
1337 | } | ||
1338 | |||
1339 | /** | ||
1340 | * dma_resume - PCI resume function | ||
1341 | * | ||
1342 | * @pci: PCI device structure | ||
1343 | * | ||
1344 | * This function is called by OS when a power event occurs | ||
1345 | */ | ||
1346 | int dma_resume(struct device *dev) | ||
1347 | { | ||
1348 | struct pci_dev *pci = to_pci_dev(dev); | ||
1349 | int ret; | ||
1350 | struct middma_device *device = pci_get_drvdata(pci); | ||
1351 | |||
1352 | pr_debug("MDMA: dma_resume called\n"); | ||
1353 | pci_set_power_state(pci, PCI_D0); | ||
1354 | pci_restore_state(pci); | ||
1355 | ret = pci_enable_device(pci); | ||
1356 | if (ret) { | ||
1357 | pr_err("MDMA: device can't be enabled for %x\n", pci->device); | ||
1358 | return ret; | ||
1359 | } | ||
1360 | device->state = RUNNING; | ||
1361 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); | ||
1362 | return 0; | ||
1363 | } | ||
1364 | |||
1365 | static int dma_runtime_suspend(struct device *dev) | ||
1366 | { | ||
1367 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
1368 | struct middma_device *device = pci_get_drvdata(pci_dev); | ||
1369 | |||
1370 | device->state = SUSPENDED; | ||
1371 | return 0; | ||
1372 | } | ||
1373 | |||
1374 | static int dma_runtime_resume(struct device *dev) | ||
1375 | { | ||
1376 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
1377 | struct middma_device *device = pci_get_drvdata(pci_dev); | ||
1378 | |||
1379 | device->state = RUNNING; | ||
1380 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); | ||
1381 | return 0; | ||
1382 | } | ||
1383 | |||
1384 | static int dma_runtime_idle(struct device *dev) | ||
1385 | { | ||
1386 | struct pci_dev *pdev = to_pci_dev(dev); | ||
1387 | struct middma_device *device = pci_get_drvdata(pdev); | ||
1388 | int i; | ||
1389 | |||
1390 | for (i = 0; i < device->max_chan; i++) { | ||
1391 | if (device->ch[i].in_use) | ||
1392 | return -EAGAIN; | ||
1393 | } | ||
1394 | |||
1395 | return 0; | ||
1396 | } | ||
1397 | |||
1398 | /****************************************************************************** | ||
1399 | * PCI stuff | ||
1400 | */ | ||
1401 | static struct pci_device_id intel_mid_dma_ids[] = { | ||
1402 | { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)}, | ||
1403 | { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)}, | ||
1404 | { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)}, | ||
1405 | { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)}, | ||
1406 | { 0, } | ||
1407 | }; | ||
1408 | MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); | ||
1409 | |||
1410 | static const struct dev_pm_ops intel_mid_dma_pm = { | ||
1411 | .runtime_suspend = dma_runtime_suspend, | ||
1412 | .runtime_resume = dma_runtime_resume, | ||
1413 | .runtime_idle = dma_runtime_idle, | ||
1414 | .suspend = dma_suspend, | ||
1415 | .resume = dma_resume, | ||
1416 | }; | ||
1417 | |||
1418 | static struct pci_driver intel_mid_dma_pci_driver = { | ||
1419 | .name = "Intel MID DMA", | ||
1420 | .id_table = intel_mid_dma_ids, | ||
1421 | .probe = intel_mid_dma_probe, | ||
1422 | .remove = intel_mid_dma_remove, | ||
1423 | #ifdef CONFIG_PM | ||
1424 | .driver = { | ||
1425 | .pm = &intel_mid_dma_pm, | ||
1426 | }, | ||
1427 | #endif | ||
1428 | }; | ||
1429 | |||
1430 | static int __init intel_mid_dma_init(void) | ||
1431 | { | ||
1432 | pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", | ||
1433 | INTEL_MID_DMA_DRIVER_VERSION); | ||
1434 | return pci_register_driver(&intel_mid_dma_pci_driver); | ||
1435 | } | ||
1436 | fs_initcall(intel_mid_dma_init); | ||
1437 | |||
1438 | static void __exit intel_mid_dma_exit(void) | ||
1439 | { | ||
1440 | pci_unregister_driver(&intel_mid_dma_pci_driver); | ||
1441 | } | ||
1442 | module_exit(intel_mid_dma_exit); | ||
1443 | |||
1444 | MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); | ||
1445 | MODULE_DESCRIPTION("Intel (R) MID DMAC Driver"); | ||
1446 | MODULE_LICENSE("GPL v2"); | ||
1447 | MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION); | ||
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h deleted file mode 100644 index 17b42192ea58..000000000000 --- a/drivers/dma/intel_mid_dma_regs.h +++ /dev/null | |||
@@ -1,299 +0,0 @@ | |||
1 | /* | ||
2 | * intel_mid_dma_regs.h - Intel MID DMA Drivers | ||
3 | * | ||
4 | * Copyright (C) 2008-10 Intel Corp | ||
5 | * Author: Vinod Koul <vinod.koul@intel.com> | ||
6 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; version 2 of the License. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
20 | * | ||
21 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
22 | * | ||
23 | * | ||
24 | */ | ||
25 | #ifndef __INTEL_MID_DMAC_REGS_H__ | ||
26 | #define __INTEL_MID_DMAC_REGS_H__ | ||
27 | |||
28 | #include <linux/dmaengine.h> | ||
29 | #include <linux/dmapool.h> | ||
30 | #include <linux/pci_ids.h> | ||
31 | |||
32 | #define INTEL_MID_DMA_DRIVER_VERSION "1.1.0" | ||
33 | |||
34 | #define REG_BIT0 0x00000001 | ||
35 | #define REG_BIT8 0x00000100 | ||
36 | #define INT_MASK_WE 0x8 | ||
37 | #define CLEAR_DONE 0xFFFFEFFF | ||
38 | #define UNMASK_INTR_REG(chan_num) \ | ||
39 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) | ||
40 | #define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num) | ||
41 | |||
42 | #define ENABLE_CHANNEL(chan_num) \ | ||
43 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) | ||
44 | |||
45 | #define DISABLE_CHANNEL(chan_num) \ | ||
46 | (REG_BIT8 << chan_num) | ||
47 | |||
48 | #define DESCS_PER_CHANNEL 16 | ||
49 | /*DMA Registers*/ | ||
50 | /*registers associated with channel programming*/ | ||
51 | #define DMA_REG_SIZE 0x400 | ||
52 | #define DMA_CH_SIZE 0x58 | ||
53 | |||
54 | /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/ | ||
55 | #define SAR 0x00 /* Source Address Register*/ | ||
56 | #define DAR 0x08 /* Destination Address Register*/ | ||
57 | #define LLP 0x10 /* Linked List Pointer Register*/ | ||
58 | #define CTL_LOW 0x18 /* Control Register*/ | ||
59 | #define CTL_HIGH 0x1C /* Control Register*/ | ||
60 | #define CFG_LOW 0x40 /* Configuration Register Low*/ | ||
61 | #define CFG_HIGH 0x44 /* Configuration Register high*/ | ||
62 | |||
63 | #define STATUS_TFR 0x2E8 | ||
64 | #define STATUS_BLOCK 0x2F0 | ||
65 | #define STATUS_ERR 0x308 | ||
66 | |||
67 | #define RAW_TFR 0x2C0 | ||
68 | #define RAW_BLOCK 0x2C8 | ||
69 | #define RAW_ERR 0x2E0 | ||
70 | |||
71 | #define MASK_TFR 0x310 | ||
72 | #define MASK_BLOCK 0x318 | ||
73 | #define MASK_SRC_TRAN 0x320 | ||
74 | #define MASK_DST_TRAN 0x328 | ||
75 | #define MASK_ERR 0x330 | ||
76 | |||
77 | #define CLEAR_TFR 0x338 | ||
78 | #define CLEAR_BLOCK 0x340 | ||
79 | #define CLEAR_SRC_TRAN 0x348 | ||
80 | #define CLEAR_DST_TRAN 0x350 | ||
81 | #define CLEAR_ERR 0x358 | ||
82 | |||
83 | #define INTR_STATUS 0x360 | ||
84 | #define DMA_CFG 0x398 | ||
85 | #define DMA_CHAN_EN 0x3A0 | ||
86 | |||
87 | /*DMA channel control registers*/ | ||
88 | union intel_mid_dma_ctl_lo { | ||
89 | struct { | ||
90 | u32 int_en:1; /*enable or disable interrupts*/ | ||
91 | /*should be 0*/ | ||
92 | u32 dst_tr_width:3; /*destination transfer width*/ | ||
93 | /*usually 32 bits = 010*/ | ||
94 | u32 src_tr_width:3; /*source transfer width*/ | ||
95 | /*usually 32 bits = 010*/ | ||
96 | u32 dinc:2; /*destination address inc/dec*/ | ||
97 | /*For mem:INC=00, Periphral NoINC=11*/ | ||
98 | u32 sinc:2; /*source address inc or dec, as above*/ | ||
99 | u32 dst_msize:3; /*destination burst transaction length*/ | ||
100 | /*always = 16 ie 011*/ | ||
101 | u32 src_msize:3; /*source burst transaction length*/ | ||
102 | /*always = 16 ie 011*/ | ||
103 | u32 reser1:3; | ||
104 | u32 tt_fc:3; /*transfer type and flow controller*/ | ||
105 | /*M-M = 000 | ||
106 | P-M = 010 | ||
107 | M-P = 001*/ | ||
108 | u32 dms:2; /*destination master select = 0*/ | ||
109 | u32 sms:2; /*source master select = 0*/ | ||
110 | u32 llp_dst_en:1; /*enable/disable destination LLP = 0*/ | ||
111 | u32 llp_src_en:1; /*enable/disable source LLP = 0*/ | ||
112 | u32 reser2:3; | ||
113 | } ctlx; | ||
114 | u32 ctl_lo; | ||
115 | }; | ||
116 | |||
117 | union intel_mid_dma_ctl_hi { | ||
118 | struct { | ||
119 | u32 block_ts:12; /*block transfer size*/ | ||
120 | u32 done:1; /*Done - updated by DMAC*/ | ||
121 | u32 reser:19; /*configured by DMAC*/ | ||
122 | } ctlx; | ||
123 | u32 ctl_hi; | ||
124 | |||
125 | }; | ||
126 | |||
127 | /*DMA channel configuration registers*/ | ||
128 | union intel_mid_dma_cfg_lo { | ||
129 | struct { | ||
130 | u32 reser1:5; | ||
131 | u32 ch_prior:3; /*channel priority = 0*/ | ||
132 | u32 ch_susp:1; /*channel suspend = 0*/ | ||
133 | u32 fifo_empty:1; /*FIFO empty or not R bit = 0*/ | ||
134 | u32 hs_sel_dst:1; /*select HW/SW destn handshaking*/ | ||
135 | /*HW = 0, SW = 1*/ | ||
136 | u32 hs_sel_src:1; /*select HW/SW src handshaking*/ | ||
137 | u32 reser2:6; | ||
138 | u32 dst_hs_pol:1; /*dest HS interface polarity*/ | ||
139 | u32 src_hs_pol:1; /*src HS interface polarity*/ | ||
140 | u32 max_abrst:10; /*max AMBA burst len = 0 (no sw limit*/ | ||
141 | u32 reload_src:1; /*auto reload src addr =1 if src is P*/ | ||
142 | u32 reload_dst:1; /*AR destn addr =1 if dstn is P*/ | ||
143 | } cfgx; | ||
144 | u32 cfg_lo; | ||
145 | }; | ||
146 | |||
147 | union intel_mid_dma_cfg_hi { | ||
148 | struct { | ||
149 | u32 fcmode:1; /*flow control mode = 1*/ | ||
150 | u32 fifo_mode:1; /*FIFO mode select = 1*/ | ||
151 | u32 protctl:3; /*protection control = 0*/ | ||
152 | u32 rsvd:2; | ||
153 | u32 src_per:4; /*src hw HS interface*/ | ||
154 | u32 dst_per:4; /*dstn hw HS interface*/ | ||
155 | u32 reser2:17; | ||
156 | } cfgx; | ||
157 | u32 cfg_hi; | ||
158 | }; | ||
159 | |||
160 | |||
161 | /** | ||
162 | * struct intel_mid_dma_chan - internal mid representation of a DMA channel | ||
163 | * @chan: dma_chan strcture represetation for mid chan | ||
164 | * @ch_regs: MMIO register space pointer to channel register | ||
165 | * @dma_base: MMIO register space DMA engine base pointer | ||
166 | * @ch_id: DMA channel id | ||
167 | * @lock: channel spinlock | ||
168 | * @active_list: current active descriptors | ||
169 | * @queue: current queued up descriptors | ||
170 | * @free_list: current free descriptors | ||
171 | * @slave: dma slave structure | ||
172 | * @descs_allocated: total number of descriptors allocated | ||
173 | * @dma: dma device structure pointer | ||
174 | * @busy: bool representing if ch is busy (active txn) or not | ||
175 | * @in_use: bool representing if ch is in use or not | ||
176 | * @raw_tfr: raw trf interrupt received | ||
177 | * @raw_block: raw block interrupt received | ||
178 | */ | ||
179 | struct intel_mid_dma_chan { | ||
180 | struct dma_chan chan; | ||
181 | void __iomem *ch_regs; | ||
182 | void __iomem *dma_base; | ||
183 | int ch_id; | ||
184 | spinlock_t lock; | ||
185 | struct list_head active_list; | ||
186 | struct list_head queue; | ||
187 | struct list_head free_list; | ||
188 | unsigned int descs_allocated; | ||
189 | struct middma_device *dma; | ||
190 | bool busy; | ||
191 | bool in_use; | ||
192 | u32 raw_tfr; | ||
193 | u32 raw_block; | ||
194 | struct intel_mid_dma_slave *mid_slave; | ||
195 | }; | ||
196 | |||
197 | static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( | ||
198 | struct dma_chan *chan) | ||
199 | { | ||
200 | return container_of(chan, struct intel_mid_dma_chan, chan); | ||
201 | } | ||
202 | |||
203 | enum intel_mid_dma_state { | ||
204 | RUNNING = 0, | ||
205 | SUSPENDED, | ||
206 | }; | ||
207 | /** | ||
208 | * struct middma_device - internal representation of a DMA device | ||
209 | * @pdev: PCI device | ||
210 | * @dma_base: MMIO register space pointer of DMA | ||
211 | * @dma_pool: for allocating DMA descriptors | ||
212 | * @common: embedded struct dma_device | ||
213 | * @tasklet: dma tasklet for processing interrupts | ||
214 | * @ch: per channel data | ||
215 | * @pci_id: DMA device PCI ID | ||
216 | * @intr_mask: Interrupt mask to be used | ||
217 | * @mask_reg: MMIO register for periphral mask | ||
218 | * @chan_base: Base ch index (read from driver data) | ||
219 | * @max_chan: max number of chs supported (from drv_data) | ||
220 | * @block_size: Block size of DMA transfer supported (from drv_data) | ||
221 | * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data) | ||
222 | * @state: dma PM device state | ||
223 | */ | ||
224 | struct middma_device { | ||
225 | struct pci_dev *pdev; | ||
226 | void __iomem *dma_base; | ||
227 | struct pci_pool *dma_pool; | ||
228 | struct dma_device common; | ||
229 | struct tasklet_struct tasklet; | ||
230 | struct intel_mid_dma_chan ch[MAX_CHAN]; | ||
231 | unsigned int pci_id; | ||
232 | unsigned int intr_mask; | ||
233 | void __iomem *mask_reg; | ||
234 | int chan_base; | ||
235 | int max_chan; | ||
236 | int block_size; | ||
237 | unsigned int pimr_mask; | ||
238 | enum intel_mid_dma_state state; | ||
239 | }; | ||
240 | |||
241 | static inline struct middma_device *to_middma_device(struct dma_device *common) | ||
242 | { | ||
243 | return container_of(common, struct middma_device, common); | ||
244 | } | ||
245 | |||
246 | struct intel_mid_dma_desc { | ||
247 | void __iomem *block; /*ch ptr*/ | ||
248 | struct list_head desc_node; | ||
249 | struct dma_async_tx_descriptor txd; | ||
250 | size_t len; | ||
251 | dma_addr_t sar; | ||
252 | dma_addr_t dar; | ||
253 | u32 cfg_hi; | ||
254 | u32 cfg_lo; | ||
255 | u32 ctl_lo; | ||
256 | u32 ctl_hi; | ||
257 | struct pci_pool *lli_pool; | ||
258 | struct intel_mid_dma_lli *lli; | ||
259 | dma_addr_t lli_phys; | ||
260 | unsigned int lli_length; | ||
261 | unsigned int current_lli; | ||
262 | dma_addr_t next; | ||
263 | enum dma_transfer_direction dirn; | ||
264 | enum dma_status status; | ||
265 | enum dma_slave_buswidth width; /*width of DMA txn*/ | ||
266 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ | ||
267 | |||
268 | }; | ||
269 | |||
270 | struct intel_mid_dma_lli { | ||
271 | dma_addr_t sar; | ||
272 | dma_addr_t dar; | ||
273 | dma_addr_t llp; | ||
274 | u32 ctl_lo; | ||
275 | u32 ctl_hi; | ||
276 | } __attribute__ ((packed)); | ||
277 | |||
278 | static inline int test_ch_en(void __iomem *dma, u32 ch_no) | ||
279 | { | ||
280 | u32 en_reg = ioread32(dma + DMA_CHAN_EN); | ||
281 | return (en_reg >> ch_no) & 0x1; | ||
282 | } | ||
283 | |||
284 | static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc | ||
285 | (struct dma_async_tx_descriptor *txd) | ||
286 | { | ||
287 | return container_of(txd, struct intel_mid_dma_desc, txd); | ||
288 | } | ||
289 | |||
290 | static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave | ||
291 | (struct dma_slave_config *slave) | ||
292 | { | ||
293 | return container_of(slave, struct intel_mid_dma_slave, dma_slave); | ||
294 | } | ||
295 | |||
296 | |||
297 | int dma_resume(struct device *dev); | ||
298 | |||
299 | #endif /*__INTEL_MID_DMAC_REGS_H__*/ | ||
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index ab8dfbef6f1b..198f96b7fb45 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -159,10 +159,9 @@ config SPI_BUTTERFLY | |||
159 | 159 | ||
160 | config SPI_CADENCE | 160 | config SPI_CADENCE |
161 | tristate "Cadence SPI controller" | 161 | tristate "Cadence SPI controller" |
162 | depends on ARM | ||
163 | help | 162 | help |
164 | This selects the Cadence SPI controller master driver | 163 | This selects the Cadence SPI controller master driver |
165 | used by Xilinx Zynq. | 164 | used by Xilinx Zynq and ZynqMP. |
166 | 165 | ||
167 | config SPI_CLPS711X | 166 | config SPI_CLPS711X |
168 | tristate "CLPS711X host SPI controller" | 167 | tristate "CLPS711X host SPI controller" |
@@ -632,7 +631,7 @@ config SPI_DW_PCI | |||
632 | 631 | ||
633 | config SPI_DW_MID_DMA | 632 | config SPI_DW_MID_DMA |
634 | bool "DMA support for DW SPI controller on Intel MID platform" | 633 | bool "DMA support for DW SPI controller on Intel MID platform" |
635 | depends on SPI_DW_PCI && INTEL_MID_DMAC | 634 | depends on SPI_DW_PCI && DW_DMAC_PCI |
636 | 635 | ||
637 | config SPI_DW_MMIO | 636 | config SPI_DW_MMIO |
638 | tristate "Memory-mapped io interface driver for DW SPI core" | 637 | tristate "Memory-mapped io interface driver for DW SPI core" |
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 06de34001c66..a2f40b1b2225 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
@@ -180,11 +180,17 @@ | |||
180 | | SPI_BF(name, value)) | 180 | | SPI_BF(name, value)) |
181 | 181 | ||
182 | /* Register access macros */ | 182 | /* Register access macros */ |
183 | #ifdef CONFIG_AVR32 | ||
183 | #define spi_readl(port, reg) \ | 184 | #define spi_readl(port, reg) \ |
184 | __raw_readl((port)->regs + SPI_##reg) | 185 | __raw_readl((port)->regs + SPI_##reg) |
185 | #define spi_writel(port, reg, value) \ | 186 | #define spi_writel(port, reg, value) \ |
186 | __raw_writel((value), (port)->regs + SPI_##reg) | 187 | __raw_writel((value), (port)->regs + SPI_##reg) |
187 | 188 | #else | |
189 | #define spi_readl(port, reg) \ | ||
190 | readl_relaxed((port)->regs + SPI_##reg) | ||
191 | #define spi_writel(port, reg, value) \ | ||
192 | writel_relaxed((value), (port)->regs + SPI_##reg) | ||
193 | #endif | ||
188 | /* use PIO for small transfers, avoiding DMA setup/teardown overhead and | 194 | /* use PIO for small transfers, avoiding DMA setup/teardown overhead and |
189 | * cache operations; better heuristics consider wordsize and bitrate. | 195 | * cache operations; better heuristics consider wordsize and bitrate. |
190 | */ | 196 | */ |
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 419a782ab6d5..f63864a893c5 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2012 Chris Boot | 4 | * Copyright (C) 2012 Chris Boot |
5 | * Copyright (C) 2013 Stephen Warren | 5 | * Copyright (C) 2013 Stephen Warren |
6 | * Copyright (C) 2015 Martin Sperl | ||
6 | * | 7 | * |
7 | * This driver is inspired by: | 8 | * This driver is inspired by: |
8 | * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org> | 9 | * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org> |
@@ -29,6 +30,7 @@ | |||
29 | #include <linux/module.h> | 30 | #include <linux/module.h> |
30 | #include <linux/of.h> | 31 | #include <linux/of.h> |
31 | #include <linux/of_irq.h> | 32 | #include <linux/of_irq.h> |
33 | #include <linux/of_gpio.h> | ||
32 | #include <linux/of_device.h> | 34 | #include <linux/of_device.h> |
33 | #include <linux/spi/spi.h> | 35 | #include <linux/spi/spi.h> |
34 | 36 | ||
@@ -66,8 +68,10 @@ | |||
66 | #define BCM2835_SPI_CS_CS_10 0x00000002 | 68 | #define BCM2835_SPI_CS_CS_10 0x00000002 |
67 | #define BCM2835_SPI_CS_CS_01 0x00000001 | 69 | #define BCM2835_SPI_CS_CS_01 0x00000001 |
68 | 70 | ||
69 | #define BCM2835_SPI_TIMEOUT_MS 30000 | 71 | #define BCM2835_SPI_POLLING_LIMIT_US 30 |
70 | #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS) | 72 | #define BCM2835_SPI_TIMEOUT_MS 30000 |
73 | #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ | ||
74 | | SPI_NO_CS | SPI_3WIRE) | ||
71 | 75 | ||
72 | #define DRV_NAME "spi-bcm2835" | 76 | #define DRV_NAME "spi-bcm2835" |
73 | 77 | ||
@@ -75,10 +79,10 @@ struct bcm2835_spi { | |||
75 | void __iomem *regs; | 79 | void __iomem *regs; |
76 | struct clk *clk; | 80 | struct clk *clk; |
77 | int irq; | 81 | int irq; |
78 | struct completion done; | ||
79 | const u8 *tx_buf; | 82 | const u8 *tx_buf; |
80 | u8 *rx_buf; | 83 | u8 *rx_buf; |
81 | int len; | 84 | int tx_len; |
85 | int rx_len; | ||
82 | }; | 86 | }; |
83 | 87 | ||
84 | static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg) | 88 | static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg) |
@@ -91,205 +95,315 @@ static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned reg, u32 val) | |||
91 | writel(val, bs->regs + reg); | 95 | writel(val, bs->regs + reg); |
92 | } | 96 | } |
93 | 97 | ||
94 | static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs, int len) | 98 | static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs) |
95 | { | 99 | { |
96 | u8 byte; | 100 | u8 byte; |
97 | 101 | ||
98 | while (len--) { | 102 | while ((bs->rx_len) && |
103 | (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) { | ||
99 | byte = bcm2835_rd(bs, BCM2835_SPI_FIFO); | 104 | byte = bcm2835_rd(bs, BCM2835_SPI_FIFO); |
100 | if (bs->rx_buf) | 105 | if (bs->rx_buf) |
101 | *bs->rx_buf++ = byte; | 106 | *bs->rx_buf++ = byte; |
107 | bs->rx_len--; | ||
102 | } | 108 | } |
103 | } | 109 | } |
104 | 110 | ||
105 | static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs, int len) | 111 | static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs) |
106 | { | 112 | { |
107 | u8 byte; | 113 | u8 byte; |
108 | 114 | ||
109 | if (len > bs->len) | 115 | while ((bs->tx_len) && |
110 | len = bs->len; | 116 | (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) { |
111 | |||
112 | while (len--) { | ||
113 | byte = bs->tx_buf ? *bs->tx_buf++ : 0; | 117 | byte = bs->tx_buf ? *bs->tx_buf++ : 0; |
114 | bcm2835_wr(bs, BCM2835_SPI_FIFO, byte); | 118 | bcm2835_wr(bs, BCM2835_SPI_FIFO, byte); |
115 | bs->len--; | 119 | bs->tx_len--; |
116 | } | 120 | } |
117 | } | 121 | } |
118 | 122 | ||
123 | static void bcm2835_spi_reset_hw(struct spi_master *master) | ||
124 | { | ||
125 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | ||
126 | u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); | ||
127 | |||
128 | /* Disable SPI interrupts and transfer */ | ||
129 | cs &= ~(BCM2835_SPI_CS_INTR | | ||
130 | BCM2835_SPI_CS_INTD | | ||
131 | BCM2835_SPI_CS_TA); | ||
132 | /* and reset RX/TX FIFOS */ | ||
133 | cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX; | ||
134 | |||
135 | /* and reset the SPI_HW */ | ||
136 | bcm2835_wr(bs, BCM2835_SPI_CS, cs); | ||
137 | } | ||
138 | |||
119 | static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) | 139 | static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) |
120 | { | 140 | { |
121 | struct spi_master *master = dev_id; | 141 | struct spi_master *master = dev_id; |
122 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | 142 | struct bcm2835_spi *bs = spi_master_get_devdata(master); |
123 | u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); | ||
124 | 143 | ||
125 | /* | 144 | /* Read as many bytes as possible from FIFO */ |
126 | * RXR - RX needs Reading. This means 12 (or more) bytes have been | 145 | bcm2835_rd_fifo(bs); |
127 | * transmitted and hence 12 (or more) bytes have been received. | 146 | /* Write as many bytes as possible to FIFO */ |
128 | * | 147 | bcm2835_wr_fifo(bs); |
129 | * The FIFO is 16-bytes deep. We check for this interrupt to keep the | 148 | |
130 | * FIFO full; we have a 4-byte-time buffer for IRQ latency. We check | 149 | /* based on flags decide if we can finish the transfer */ |
131 | * this before DONE (TX empty) just in case we delayed processing this | 150 | if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) { |
132 | * interrupt for some reason. | 151 | /* Transfer complete - reset SPI HW */ |
133 | * | 152 | bcm2835_spi_reset_hw(master); |
134 | * We only check for this case if we have more bytes to TX; at the end | 153 | /* wake up the framework */ |
135 | * of the transfer, we ignore this pipelining optimization, and let | 154 | complete(&master->xfer_completion); |
136 | * bcm2835_spi_finish_transfer() drain the RX FIFO. | 155 | } |
156 | |||
157 | return IRQ_HANDLED; | ||
158 | } | ||
159 | |||
160 | static int bcm2835_spi_transfer_one_poll(struct spi_master *master, | ||
161 | struct spi_device *spi, | ||
162 | struct spi_transfer *tfr, | ||
163 | u32 cs, | ||
164 | unsigned long xfer_time_us) | ||
165 | { | ||
166 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | ||
167 | unsigned long timeout = jiffies + | ||
168 | max(4 * xfer_time_us * HZ / 1000000, 2uL); | ||
169 | |||
170 | /* enable HW block without interrupts */ | ||
171 | bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); | ||
172 | |||
173 | /* set timeout to 4x the expected time, or 2 jiffies */ | ||
174 | /* loop until finished the transfer */ | ||
175 | while (bs->rx_len) { | ||
176 | /* read from fifo as much as possible */ | ||
177 | bcm2835_rd_fifo(bs); | ||
178 | /* fill in tx fifo as much as possible */ | ||
179 | bcm2835_wr_fifo(bs); | ||
180 | /* if we still expect some data after the read, | ||
181 | * check for a possible timeout | ||
182 | */ | ||
183 | if (bs->rx_len && time_after(jiffies, timeout)) { | ||
184 | /* Transfer complete - reset SPI HW */ | ||
185 | bcm2835_spi_reset_hw(master); | ||
186 | /* and return timeout */ | ||
187 | return -ETIMEDOUT; | ||
188 | } | ||
189 | } | ||
190 | |||
191 | /* Transfer complete - reset SPI HW */ | ||
192 | bcm2835_spi_reset_hw(master); | ||
193 | /* and return without waiting for completion */ | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static int bcm2835_spi_transfer_one_irq(struct spi_master *master, | ||
198 | struct spi_device *spi, | ||
199 | struct spi_transfer *tfr, | ||
200 | u32 cs) | ||
201 | { | ||
202 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | ||
203 | |||
204 | /* fill in fifo if we have gpio-cs | ||
205 | * note that there have been rare events where the native-CS | ||
206 | * flapped for <1us which may change the behaviour | ||
207 | * with gpio-cs this does not happen, so it is implemented | ||
208 | * only for this case | ||
137 | */ | 209 | */ |
138 | if (bs->len && (cs & BCM2835_SPI_CS_RXR)) { | 210 | if (gpio_is_valid(spi->cs_gpio)) { |
139 | /* Read 12 bytes of data */ | 211 | /* enable HW block, but without interrupts enabled |
140 | bcm2835_rd_fifo(bs, 12); | 212 | * this would triggern an immediate interrupt |
141 | |||
142 | /* Write up to 12 bytes */ | ||
143 | bcm2835_wr_fifo(bs, 12); | ||
144 | |||
145 | /* | ||
146 | * We must have written something to the TX FIFO due to the | ||
147 | * bs->len check above, so cannot be DONE. Hence, return | ||
148 | * early. Note that DONE could also be set if we serviced an | ||
149 | * RXR interrupt really late. | ||
150 | */ | 213 | */ |
151 | return IRQ_HANDLED; | 214 | bcm2835_wr(bs, BCM2835_SPI_CS, |
215 | cs | BCM2835_SPI_CS_TA); | ||
216 | /* fill in tx fifo as much as possible */ | ||
217 | bcm2835_wr_fifo(bs); | ||
152 | } | 218 | } |
153 | 219 | ||
154 | /* | 220 | /* |
155 | * DONE - TX empty. This occurs when we first enable the transfer | 221 | * Enable the HW block. This will immediately trigger a DONE (TX |
156 | * since we do not pre-fill the TX FIFO. At any other time, given that | 222 | * empty) interrupt, upon which we will fill the TX FIFO with the |
157 | * we refill the TX FIFO above based on RXR, and hence ignore DONE if | 223 | * first TX bytes. Pre-filling the TX FIFO here to avoid the |
158 | * RXR is set, DONE really does mean end-of-transfer. | 224 | * interrupt doesn't work:-( |
159 | */ | 225 | */ |
160 | if (cs & BCM2835_SPI_CS_DONE) { | 226 | cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA; |
161 | if (bs->len) { /* First interrupt in a transfer */ | 227 | bcm2835_wr(bs, BCM2835_SPI_CS, cs); |
162 | bcm2835_wr_fifo(bs, 16); | ||
163 | } else { /* Transfer complete */ | ||
164 | /* Disable SPI interrupts */ | ||
165 | cs &= ~(BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD); | ||
166 | bcm2835_wr(bs, BCM2835_SPI_CS, cs); | ||
167 | |||
168 | /* | ||
169 | * Wake up bcm2835_spi_transfer_one(), which will call | ||
170 | * bcm2835_spi_finish_transfer(), to drain the RX FIFO. | ||
171 | */ | ||
172 | complete(&bs->done); | ||
173 | } | ||
174 | |||
175 | return IRQ_HANDLED; | ||
176 | } | ||
177 | 228 | ||
178 | return IRQ_NONE; | 229 | /* signal that we need to wait for completion */ |
230 | return 1; | ||
179 | } | 231 | } |
180 | 232 | ||
181 | static int bcm2835_spi_start_transfer(struct spi_device *spi, | 233 | static int bcm2835_spi_transfer_one(struct spi_master *master, |
182 | struct spi_transfer *tfr) | 234 | struct spi_device *spi, |
235 | struct spi_transfer *tfr) | ||
183 | { | 236 | { |
184 | struct bcm2835_spi *bs = spi_master_get_devdata(spi->master); | 237 | struct bcm2835_spi *bs = spi_master_get_devdata(master); |
185 | unsigned long spi_hz, clk_hz, cdiv; | 238 | unsigned long spi_hz, clk_hz, cdiv; |
186 | u32 cs = BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA; | 239 | unsigned long spi_used_hz, xfer_time_us; |
240 | u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); | ||
187 | 241 | ||
242 | /* set clock */ | ||
188 | spi_hz = tfr->speed_hz; | 243 | spi_hz = tfr->speed_hz; |
189 | clk_hz = clk_get_rate(bs->clk); | 244 | clk_hz = clk_get_rate(bs->clk); |
190 | 245 | ||
191 | if (spi_hz >= clk_hz / 2) { | 246 | if (spi_hz >= clk_hz / 2) { |
192 | cdiv = 2; /* clk_hz/2 is the fastest we can go */ | 247 | cdiv = 2; /* clk_hz/2 is the fastest we can go */ |
193 | } else if (spi_hz) { | 248 | } else if (spi_hz) { |
194 | /* CDIV must be a power of two */ | 249 | /* CDIV must be a multiple of two */ |
195 | cdiv = roundup_pow_of_two(DIV_ROUND_UP(clk_hz, spi_hz)); | 250 | cdiv = DIV_ROUND_UP(clk_hz, spi_hz); |
251 | cdiv += (cdiv % 2); | ||
196 | 252 | ||
197 | if (cdiv >= 65536) | 253 | if (cdiv >= 65536) |
198 | cdiv = 0; /* 0 is the slowest we can go */ | 254 | cdiv = 0; /* 0 is the slowest we can go */ |
199 | } else | 255 | } else { |
200 | cdiv = 0; /* 0 is the slowest we can go */ | 256 | cdiv = 0; /* 0 is the slowest we can go */ |
257 | } | ||
258 | spi_used_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536); | ||
259 | bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); | ||
201 | 260 | ||
261 | /* handle all the modes */ | ||
262 | if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf)) | ||
263 | cs |= BCM2835_SPI_CS_REN; | ||
202 | if (spi->mode & SPI_CPOL) | 264 | if (spi->mode & SPI_CPOL) |
203 | cs |= BCM2835_SPI_CS_CPOL; | 265 | cs |= BCM2835_SPI_CS_CPOL; |
204 | if (spi->mode & SPI_CPHA) | 266 | if (spi->mode & SPI_CPHA) |
205 | cs |= BCM2835_SPI_CS_CPHA; | 267 | cs |= BCM2835_SPI_CS_CPHA; |
206 | 268 | ||
207 | if (!(spi->mode & SPI_NO_CS)) { | 269 | /* for gpio_cs set dummy CS so that no HW-CS get changed |
208 | if (spi->mode & SPI_CS_HIGH) { | 270 | * we can not run this in bcm2835_spi_set_cs, as it does |
209 | cs |= BCM2835_SPI_CS_CSPOL; | 271 | * not get called for cs_gpio cases, so we need to do it here |
210 | cs |= BCM2835_SPI_CS_CSPOL0 << spi->chip_select; | 272 | */ |
211 | } | 273 | if (gpio_is_valid(spi->cs_gpio) || (spi->mode & SPI_NO_CS)) |
212 | 274 | cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01; | |
213 | cs |= spi->chip_select; | ||
214 | } | ||
215 | 275 | ||
216 | reinit_completion(&bs->done); | 276 | /* set transmit buffers and length */ |
217 | bs->tx_buf = tfr->tx_buf; | 277 | bs->tx_buf = tfr->tx_buf; |
218 | bs->rx_buf = tfr->rx_buf; | 278 | bs->rx_buf = tfr->rx_buf; |
219 | bs->len = tfr->len; | 279 | bs->tx_len = tfr->len; |
280 | bs->rx_len = tfr->len; | ||
220 | 281 | ||
221 | bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); | 282 | /* calculate the estimated time in us the transfer runs */ |
222 | /* | 283 | xfer_time_us = tfr->len |
223 | * Enable the HW block. This will immediately trigger a DONE (TX | 284 | * 9 /* clocks/byte - SPI-HW waits 1 clock after each byte */ |
224 | * empty) interrupt, upon which we will fill the TX FIFO with the | 285 | * 1000000 / spi_used_hz; |
225 | * first TX bytes. Pre-filling the TX FIFO here to avoid the | ||
226 | * interrupt doesn't work:-( | ||
227 | */ | ||
228 | bcm2835_wr(bs, BCM2835_SPI_CS, cs); | ||
229 | 286 | ||
230 | return 0; | 287 | /* for short requests run polling*/ |
288 | if (xfer_time_us <= BCM2835_SPI_POLLING_LIMIT_US) | ||
289 | return bcm2835_spi_transfer_one_poll(master, spi, tfr, | ||
290 | cs, xfer_time_us); | ||
291 | |||
292 | return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs); | ||
231 | } | 293 | } |
232 | 294 | ||
233 | static int bcm2835_spi_finish_transfer(struct spi_device *spi, | 295 | static void bcm2835_spi_handle_err(struct spi_master *master, |
234 | struct spi_transfer *tfr, bool cs_change) | 296 | struct spi_message *msg) |
235 | { | 297 | { |
236 | struct bcm2835_spi *bs = spi_master_get_devdata(spi->master); | 298 | bcm2835_spi_reset_hw(master); |
237 | u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); | 299 | } |
300 | |||
301 | static void bcm2835_spi_set_cs(struct spi_device *spi, bool gpio_level) | ||
302 | { | ||
303 | /* | ||
304 | * we can assume that we are "native" as per spi_set_cs | ||
305 | * calling us ONLY when cs_gpio is not set | ||
306 | * we can also assume that we are CS < 3 as per bcm2835_spi_setup | ||
307 | * we would not get called because of error handling there. | ||
308 | * the level passed is the electrical level not enabled/disabled | ||
309 | * so it has to get translated back to enable/disable | ||
310 | * see spi_set_cs in spi.c for the implementation | ||
311 | */ | ||
238 | 312 | ||
239 | /* Drain RX FIFO */ | 313 | struct spi_master *master = spi->master; |
240 | while (cs & BCM2835_SPI_CS_RXD) { | 314 | struct bcm2835_spi *bs = spi_master_get_devdata(master); |
241 | bcm2835_rd_fifo(bs, 1); | 315 | u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); |
242 | cs = bcm2835_rd(bs, BCM2835_SPI_CS); | 316 | bool enable; |
317 | |||
318 | /* calculate the enable flag from the passed gpio_level */ | ||
319 | enable = (spi->mode & SPI_CS_HIGH) ? gpio_level : !gpio_level; | ||
320 | |||
321 | /* set flags for "reverse" polarity in the registers */ | ||
322 | if (spi->mode & SPI_CS_HIGH) { | ||
323 | /* set the correct CS-bits */ | ||
324 | cs |= BCM2835_SPI_CS_CSPOL; | ||
325 | cs |= BCM2835_SPI_CS_CSPOL0 << spi->chip_select; | ||
326 | } else { | ||
327 | /* clean the CS-bits */ | ||
328 | cs &= ~BCM2835_SPI_CS_CSPOL; | ||
329 | cs &= ~(BCM2835_SPI_CS_CSPOL0 << spi->chip_select); | ||
243 | } | 330 | } |
244 | 331 | ||
245 | if (tfr->delay_usecs) | 332 | /* select the correct chip_select depending on disabled/enabled */ |
246 | udelay(tfr->delay_usecs); | 333 | if (enable) { |
334 | /* set cs correctly */ | ||
335 | if (spi->mode & SPI_NO_CS) { | ||
336 | /* use the "undefined" chip-select */ | ||
337 | cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01; | ||
338 | } else { | ||
339 | /* set the chip select */ | ||
340 | cs &= ~(BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01); | ||
341 | cs |= spi->chip_select; | ||
342 | } | ||
343 | } else { | ||
344 | /* disable CSPOL which puts HW-CS into deselected state */ | ||
345 | cs &= ~BCM2835_SPI_CS_CSPOL; | ||
346 | /* use the "undefined" chip-select as precaution */ | ||
347 | cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01; | ||
348 | } | ||
247 | 349 | ||
248 | if (cs_change) | 350 | /* finally set the calculated flags in SPI_CS */ |
249 | /* Clear TA flag */ | 351 | bcm2835_wr(bs, BCM2835_SPI_CS, cs); |
250 | bcm2835_wr(bs, BCM2835_SPI_CS, cs & ~BCM2835_SPI_CS_TA); | 352 | } |
251 | 353 | ||
252 | return 0; | 354 | static int chip_match_name(struct gpio_chip *chip, void *data) |
355 | { | ||
356 | return !strcmp(chip->label, data); | ||
253 | } | 357 | } |
254 | 358 | ||
255 | static int bcm2835_spi_transfer_one(struct spi_master *master, | 359 | static int bcm2835_spi_setup(struct spi_device *spi) |
256 | struct spi_message *mesg) | ||
257 | { | 360 | { |
258 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | 361 | int err; |
259 | struct spi_transfer *tfr; | 362 | struct gpio_chip *chip; |
260 | struct spi_device *spi = mesg->spi; | 363 | /* |
261 | int err = 0; | 364 | * sanity checking the native-chipselects |
262 | unsigned int timeout; | 365 | */ |
263 | bool cs_change; | 366 | if (spi->mode & SPI_NO_CS) |
264 | 367 | return 0; | |
265 | list_for_each_entry(tfr, &mesg->transfers, transfer_list) { | 368 | if (gpio_is_valid(spi->cs_gpio)) |
266 | err = bcm2835_spi_start_transfer(spi, tfr); | 369 | return 0; |
267 | if (err) | 370 | if (spi->chip_select > 1) { |
268 | goto out; | 371 | /* error in the case of native CS requested with CS > 1 |
269 | 372 | * officially there is a CS2, but it is not documented | |
270 | timeout = wait_for_completion_timeout(&bs->done, | 373 | * which GPIO is connected with that... |
271 | msecs_to_jiffies(BCM2835_SPI_TIMEOUT_MS)); | 374 | */ |
272 | if (!timeout) { | 375 | dev_err(&spi->dev, |
273 | err = -ETIMEDOUT; | 376 | "setup: only two native chip-selects are supported\n"); |
274 | goto out; | 377 | return -EINVAL; |
275 | } | 378 | } |
379 | /* now translate native cs to GPIO */ | ||
276 | 380 | ||
277 | cs_change = tfr->cs_change || | 381 | /* get the gpio chip for the base */ |
278 | list_is_last(&tfr->transfer_list, &mesg->transfers); | 382 | chip = gpiochip_find("pinctrl-bcm2835", chip_match_name); |
383 | if (!chip) | ||
384 | return 0; | ||
279 | 385 | ||
280 | err = bcm2835_spi_finish_transfer(spi, tfr, cs_change); | 386 | /* and calculate the real CS */ |
281 | if (err) | 387 | spi->cs_gpio = chip->base + 8 - spi->chip_select; |
282 | goto out; | ||
283 | 388 | ||
284 | mesg->actual_length += (tfr->len - bs->len); | 389 | /* and set up the "mode" and level */ |
285 | } | 390 | dev_info(&spi->dev, "setting up native-CS%i as GPIO %i\n", |
391 | spi->chip_select, spi->cs_gpio); | ||
286 | 392 | ||
287 | out: | 393 | /* set up GPIO as output and pull to the correct level */ |
288 | /* Clear FIFOs, and disable the HW block */ | 394 | err = gpio_direction_output(spi->cs_gpio, |
289 | bcm2835_wr(bs, BCM2835_SPI_CS, | 395 | (spi->mode & SPI_CS_HIGH) ? 0 : 1); |
290 | BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); | 396 | if (err) { |
291 | mesg->status = err; | 397 | dev_err(&spi->dev, |
292 | spi_finalize_current_message(master); | 398 | "could not set CS%i gpio %i as output: %i", |
399 | spi->chip_select, spi->cs_gpio, err); | ||
400 | return err; | ||
401 | } | ||
402 | /* the implementation of pinctrl-bcm2835 currently does not | ||
403 | * set the GPIO value when using gpio_direction_output | ||
404 | * so we are setting it here explicitly | ||
405 | */ | ||
406 | gpio_set_value(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ? 0 : 1); | ||
293 | 407 | ||
294 | return 0; | 408 | return 0; |
295 | } | 409 | } |
@@ -312,13 +426,14 @@ static int bcm2835_spi_probe(struct platform_device *pdev) | |||
312 | master->mode_bits = BCM2835_SPI_MODE_BITS; | 426 | master->mode_bits = BCM2835_SPI_MODE_BITS; |
313 | master->bits_per_word_mask = SPI_BPW_MASK(8); | 427 | master->bits_per_word_mask = SPI_BPW_MASK(8); |
314 | master->num_chipselect = 3; | 428 | master->num_chipselect = 3; |
315 | master->transfer_one_message = bcm2835_spi_transfer_one; | 429 | master->setup = bcm2835_spi_setup; |
430 | master->set_cs = bcm2835_spi_set_cs; | ||
431 | master->transfer_one = bcm2835_spi_transfer_one; | ||
432 | master->handle_err = bcm2835_spi_handle_err; | ||
316 | master->dev.of_node = pdev->dev.of_node; | 433 | master->dev.of_node = pdev->dev.of_node; |
317 | 434 | ||
318 | bs = spi_master_get_devdata(master); | 435 | bs = spi_master_get_devdata(master); |
319 | 436 | ||
320 | init_completion(&bs->done); | ||
321 | |||
322 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 437 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
323 | bs->regs = devm_ioremap_resource(&pdev->dev, res); | 438 | bs->regs = devm_ioremap_resource(&pdev->dev, res); |
324 | if (IS_ERR(bs->regs)) { | 439 | if (IS_ERR(bs->regs)) { |
@@ -343,13 +458,13 @@ static int bcm2835_spi_probe(struct platform_device *pdev) | |||
343 | clk_prepare_enable(bs->clk); | 458 | clk_prepare_enable(bs->clk); |
344 | 459 | ||
345 | err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0, | 460 | err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0, |
346 | dev_name(&pdev->dev), master); | 461 | dev_name(&pdev->dev), master); |
347 | if (err) { | 462 | if (err) { |
348 | dev_err(&pdev->dev, "could not request IRQ: %d\n", err); | 463 | dev_err(&pdev->dev, "could not request IRQ: %d\n", err); |
349 | goto out_clk_disable; | 464 | goto out_clk_disable; |
350 | } | 465 | } |
351 | 466 | ||
352 | /* initialise the hardware */ | 467 | /* initialise the hardware with the default polarities */ |
353 | bcm2835_wr(bs, BCM2835_SPI_CS, | 468 | bcm2835_wr(bs, BCM2835_SPI_CS, |
354 | BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); | 469 | BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); |
355 | 470 | ||
diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c index 3fb91c81015a..1520554978a3 100644 --- a/drivers/spi/spi-bcm53xx.c +++ b/drivers/spi/spi-bcm53xx.c | |||
@@ -44,7 +44,7 @@ static int bcm53xxspi_wait(struct bcm53xxspi *b53spi, unsigned int timeout_ms) | |||
44 | u32 tmp; | 44 | u32 tmp; |
45 | 45 | ||
46 | /* SPE bit has to be 0 before we read MSPI STATUS */ | 46 | /* SPE bit has to be 0 before we read MSPI STATUS */ |
47 | deadline = jiffies + BCM53XXSPI_SPE_TIMEOUT_MS * HZ / 1000; | 47 | deadline = jiffies + msecs_to_jiffies(BCM53XXSPI_SPE_TIMEOUT_MS); |
48 | do { | 48 | do { |
49 | tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2); | 49 | tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2); |
50 | if (!(tmp & B53SPI_MSPI_SPCR2_SPE)) | 50 | if (!(tmp & B53SPI_MSPI_SPCR2_SPE)) |
@@ -56,7 +56,7 @@ static int bcm53xxspi_wait(struct bcm53xxspi *b53spi, unsigned int timeout_ms) | |||
56 | goto spi_timeout; | 56 | goto spi_timeout; |
57 | 57 | ||
58 | /* Check status */ | 58 | /* Check status */ |
59 | deadline = jiffies + timeout_ms * HZ / 1000; | 59 | deadline = jiffies + msecs_to_jiffies(timeout_ms); |
60 | do { | 60 | do { |
61 | tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_MSPI_STATUS); | 61 | tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_MSPI_STATUS); |
62 | if (tmp & B53SPI_MSPI_MSPI_STATUS_SPIF) { | 62 | if (tmp & B53SPI_MSPI_MSPI_STATUS_SPIF) { |
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c index 37079937d2f7..a3d65b4f4944 100644 --- a/drivers/spi/spi-bfin5xx.c +++ b/drivers/spi/spi-bfin5xx.c | |||
@@ -559,7 +559,7 @@ static void bfin_spi_pump_transfers(unsigned long data) | |||
559 | struct spi_transfer *previous = NULL; | 559 | struct spi_transfer *previous = NULL; |
560 | struct bfin_spi_slave_data *chip = NULL; | 560 | struct bfin_spi_slave_data *chip = NULL; |
561 | unsigned int bits_per_word; | 561 | unsigned int bits_per_word; |
562 | u16 cr, cr_width, dma_width, dma_config; | 562 | u16 cr, cr_width = 0, dma_width, dma_config; |
563 | u32 tranf_success = 1; | 563 | u32 tranf_success = 1; |
564 | u8 full_duplex = 0; | 564 | u8 full_duplex = 0; |
565 | 565 | ||
@@ -648,7 +648,6 @@ static void bfin_spi_pump_transfers(unsigned long data) | |||
648 | } else if (bits_per_word == 8) { | 648 | } else if (bits_per_word == 8) { |
649 | drv_data->n_bytes = bits_per_word/8; | 649 | drv_data->n_bytes = bits_per_word/8; |
650 | drv_data->len = transfer->len; | 650 | drv_data->len = transfer->len; |
651 | cr_width = 0; | ||
652 | drv_data->ops = &bfin_bfin_spi_transfer_ops_u8; | 651 | drv_data->ops = &bfin_bfin_spi_transfer_ops_u8; |
653 | } | 652 | } |
654 | cr = bfin_read(&drv_data->regs->ctl) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); | 653 | cr = bfin_read(&drv_data->regs->ctl) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); |
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h index c616e41521be..06b34e5bcfa3 100644 --- a/drivers/spi/spi-bitbang-txrx.h +++ b/drivers/spi/spi-bitbang-txrx.h | |||
@@ -49,12 +49,17 @@ bitbang_txrx_be_cpha0(struct spi_device *spi, | |||
49 | { | 49 | { |
50 | /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */ | 50 | /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */ |
51 | 51 | ||
52 | bool oldbit = !(word & 1); | ||
52 | /* clock starts at inactive polarity */ | 53 | /* clock starts at inactive polarity */ |
53 | for (word <<= (32 - bits); likely(bits); bits--) { | 54 | for (word <<= (32 - bits); likely(bits); bits--) { |
54 | 55 | ||
55 | /* setup MSB (to slave) on trailing edge */ | 56 | /* setup MSB (to slave) on trailing edge */ |
56 | if ((flags & SPI_MASTER_NO_TX) == 0) | 57 | if ((flags & SPI_MASTER_NO_TX) == 0) { |
57 | setmosi(spi, word & (1 << 31)); | 58 | if ((word & (1 << 31)) != oldbit) { |
59 | setmosi(spi, word & (1 << 31)); | ||
60 | oldbit = word & (1 << 31); | ||
61 | } | ||
62 | } | ||
58 | spidelay(nsecs); /* T(setup) */ | 63 | spidelay(nsecs); /* T(setup) */ |
59 | 64 | ||
60 | setsck(spi, !cpol); | 65 | setsck(spi, !cpol); |
@@ -76,13 +81,18 @@ bitbang_txrx_be_cpha1(struct spi_device *spi, | |||
76 | { | 81 | { |
77 | /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */ | 82 | /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */ |
78 | 83 | ||
84 | bool oldbit = !(word & (1 << 31)); | ||
79 | /* clock starts at inactive polarity */ | 85 | /* clock starts at inactive polarity */ |
80 | for (word <<= (32 - bits); likely(bits); bits--) { | 86 | for (word <<= (32 - bits); likely(bits); bits--) { |
81 | 87 | ||
82 | /* setup MSB (to slave) on leading edge */ | 88 | /* setup MSB (to slave) on leading edge */ |
83 | setsck(spi, !cpol); | 89 | setsck(spi, !cpol); |
84 | if ((flags & SPI_MASTER_NO_TX) == 0) | 90 | if ((flags & SPI_MASTER_NO_TX) == 0) { |
85 | setmosi(spi, word & (1 << 31)); | 91 | if ((word & (1 << 31)) != oldbit) { |
92 | setmosi(spi, word & (1 << 31)); | ||
93 | oldbit = word & (1 << 31); | ||
94 | } | ||
95 | } | ||
86 | spidelay(nsecs); /* T(setup) */ | 96 | spidelay(nsecs); /* T(setup) */ |
87 | 97 | ||
88 | setsck(spi, cpol); | 98 | setsck(spi, cpol); |
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index 4f8c798e0633..bb1052e748f2 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c | |||
@@ -23,29 +23,31 @@ | |||
23 | #include "spi-dw.h" | 23 | #include "spi-dw.h" |
24 | 24 | ||
25 | #ifdef CONFIG_SPI_DW_MID_DMA | 25 | #ifdef CONFIG_SPI_DW_MID_DMA |
26 | #include <linux/intel_mid_dma.h> | ||
27 | #include <linux/pci.h> | 26 | #include <linux/pci.h> |
27 | #include <linux/platform_data/dma-dw.h> | ||
28 | 28 | ||
29 | #define RX_BUSY 0 | 29 | #define RX_BUSY 0 |
30 | #define TX_BUSY 1 | 30 | #define TX_BUSY 1 |
31 | 31 | ||
32 | struct mid_dma { | 32 | static struct dw_dma_slave mid_dma_tx = { .dst_id = 1 }; |
33 | struct intel_mid_dma_slave dmas_tx; | 33 | static struct dw_dma_slave mid_dma_rx = { .src_id = 0 }; |
34 | struct intel_mid_dma_slave dmas_rx; | ||
35 | }; | ||
36 | 34 | ||
37 | static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) | 35 | static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) |
38 | { | 36 | { |
39 | struct dw_spi *dws = param; | 37 | struct dw_dma_slave *s = param; |
38 | |||
39 | if (s->dma_dev != chan->device->dev) | ||
40 | return false; | ||
40 | 41 | ||
41 | return dws->dma_dev == chan->device->dev; | 42 | chan->private = s; |
43 | return true; | ||
42 | } | 44 | } |
43 | 45 | ||
44 | static int mid_spi_dma_init(struct dw_spi *dws) | 46 | static int mid_spi_dma_init(struct dw_spi *dws) |
45 | { | 47 | { |
46 | struct mid_dma *dw_dma = dws->dma_priv; | ||
47 | struct pci_dev *dma_dev; | 48 | struct pci_dev *dma_dev; |
48 | struct intel_mid_dma_slave *rxs, *txs; | 49 | struct dw_dma_slave *tx = dws->dma_tx; |
50 | struct dw_dma_slave *rx = dws->dma_rx; | ||
49 | dma_cap_mask_t mask; | 51 | dma_cap_mask_t mask; |
50 | 52 | ||
51 | /* | 53 | /* |
@@ -56,28 +58,22 @@ static int mid_spi_dma_init(struct dw_spi *dws) | |||
56 | if (!dma_dev) | 58 | if (!dma_dev) |
57 | return -ENODEV; | 59 | return -ENODEV; |
58 | 60 | ||
59 | dws->dma_dev = &dma_dev->dev; | ||
60 | |||
61 | dma_cap_zero(mask); | 61 | dma_cap_zero(mask); |
62 | dma_cap_set(DMA_SLAVE, mask); | 62 | dma_cap_set(DMA_SLAVE, mask); |
63 | 63 | ||
64 | /* 1. Init rx channel */ | 64 | /* 1. Init rx channel */ |
65 | dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); | 65 | rx->dma_dev = &dma_dev->dev; |
66 | dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, rx); | ||
66 | if (!dws->rxchan) | 67 | if (!dws->rxchan) |
67 | goto err_exit; | 68 | goto err_exit; |
68 | rxs = &dw_dma->dmas_rx; | 69 | dws->master->dma_rx = dws->rxchan; |
69 | rxs->hs_mode = LNW_DMA_HW_HS; | ||
70 | rxs->cfg_mode = LNW_DMA_PER_TO_MEM; | ||
71 | dws->rxchan->private = rxs; | ||
72 | 70 | ||
73 | /* 2. Init tx channel */ | 71 | /* 2. Init tx channel */ |
74 | dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); | 72 | tx->dma_dev = &dma_dev->dev; |
73 | dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, tx); | ||
75 | if (!dws->txchan) | 74 | if (!dws->txchan) |
76 | goto free_rxchan; | 75 | goto free_rxchan; |
77 | txs = &dw_dma->dmas_tx; | 76 | dws->master->dma_tx = dws->txchan; |
78 | txs->hs_mode = LNW_DMA_HW_HS; | ||
79 | txs->cfg_mode = LNW_DMA_MEM_TO_PER; | ||
80 | dws->txchan->private = txs; | ||
81 | 77 | ||
82 | dws->dma_inited = 1; | 78 | dws->dma_inited = 1; |
83 | return 0; | 79 | return 0; |
@@ -100,6 +96,42 @@ static void mid_spi_dma_exit(struct dw_spi *dws) | |||
100 | dma_release_channel(dws->rxchan); | 96 | dma_release_channel(dws->rxchan); |
101 | } | 97 | } |
102 | 98 | ||
99 | static irqreturn_t dma_transfer(struct dw_spi *dws) | ||
100 | { | ||
101 | u16 irq_status = dw_readl(dws, DW_SPI_ISR); | ||
102 | |||
103 | if (!irq_status) | ||
104 | return IRQ_NONE; | ||
105 | |||
106 | dw_readl(dws, DW_SPI_ICR); | ||
107 | spi_reset_chip(dws); | ||
108 | |||
109 | dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__); | ||
110 | dws->master->cur_msg->status = -EIO; | ||
111 | spi_finalize_current_transfer(dws->master); | ||
112 | return IRQ_HANDLED; | ||
113 | } | ||
114 | |||
115 | static bool mid_spi_can_dma(struct spi_master *master, struct spi_device *spi, | ||
116 | struct spi_transfer *xfer) | ||
117 | { | ||
118 | struct dw_spi *dws = spi_master_get_devdata(master); | ||
119 | |||
120 | if (!dws->dma_inited) | ||
121 | return false; | ||
122 | |||
123 | return xfer->len > dws->fifo_len; | ||
124 | } | ||
125 | |||
126 | static enum dma_slave_buswidth convert_dma_width(u32 dma_width) { | ||
127 | if (dma_width == 1) | ||
128 | return DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
129 | else if (dma_width == 2) | ||
130 | return DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
131 | |||
132 | return DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
133 | } | ||
134 | |||
103 | /* | 135 | /* |
104 | * dws->dma_chan_busy is set before the dma transfer starts, callback for tx | 136 | * dws->dma_chan_busy is set before the dma transfer starts, callback for tx |
105 | * channel will clear a corresponding bit. | 137 | * channel will clear a corresponding bit. |
@@ -111,33 +143,30 @@ static void dw_spi_dma_tx_done(void *arg) | |||
111 | clear_bit(TX_BUSY, &dws->dma_chan_busy); | 143 | clear_bit(TX_BUSY, &dws->dma_chan_busy); |
112 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) | 144 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) |
113 | return; | 145 | return; |
114 | dw_spi_xfer_done(dws); | 146 | spi_finalize_current_transfer(dws->master); |
115 | } | 147 | } |
116 | 148 | ||
117 | static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws) | 149 | static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws, |
150 | struct spi_transfer *xfer) | ||
118 | { | 151 | { |
119 | struct dma_slave_config txconf; | 152 | struct dma_slave_config txconf; |
120 | struct dma_async_tx_descriptor *txdesc; | 153 | struct dma_async_tx_descriptor *txdesc; |
121 | 154 | ||
122 | if (!dws->tx_dma) | 155 | if (!xfer->tx_buf) |
123 | return NULL; | 156 | return NULL; |
124 | 157 | ||
125 | txconf.direction = DMA_MEM_TO_DEV; | 158 | txconf.direction = DMA_MEM_TO_DEV; |
126 | txconf.dst_addr = dws->dma_addr; | 159 | txconf.dst_addr = dws->dma_addr; |
127 | txconf.dst_maxburst = LNW_DMA_MSIZE_16; | 160 | txconf.dst_maxburst = 16; |
128 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 161 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
129 | txconf.dst_addr_width = dws->dma_width; | 162 | txconf.dst_addr_width = convert_dma_width(dws->dma_width); |
130 | txconf.device_fc = false; | 163 | txconf.device_fc = false; |
131 | 164 | ||
132 | dmaengine_slave_config(dws->txchan, &txconf); | 165 | dmaengine_slave_config(dws->txchan, &txconf); |
133 | 166 | ||
134 | memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl)); | ||
135 | dws->tx_sgl.dma_address = dws->tx_dma; | ||
136 | dws->tx_sgl.length = dws->len; | ||
137 | |||
138 | txdesc = dmaengine_prep_slave_sg(dws->txchan, | 167 | txdesc = dmaengine_prep_slave_sg(dws->txchan, |
139 | &dws->tx_sgl, | 168 | xfer->tx_sg.sgl, |
140 | 1, | 169 | xfer->tx_sg.nents, |
141 | DMA_MEM_TO_DEV, | 170 | DMA_MEM_TO_DEV, |
142 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 171 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
143 | if (!txdesc) | 172 | if (!txdesc) |
@@ -160,33 +189,30 @@ static void dw_spi_dma_rx_done(void *arg) | |||
160 | clear_bit(RX_BUSY, &dws->dma_chan_busy); | 189 | clear_bit(RX_BUSY, &dws->dma_chan_busy); |
161 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) | 190 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) |
162 | return; | 191 | return; |
163 | dw_spi_xfer_done(dws); | 192 | spi_finalize_current_transfer(dws->master); |
164 | } | 193 | } |
165 | 194 | ||
166 | static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws) | 195 | static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, |
196 | struct spi_transfer *xfer) | ||
167 | { | 197 | { |
168 | struct dma_slave_config rxconf; | 198 | struct dma_slave_config rxconf; |
169 | struct dma_async_tx_descriptor *rxdesc; | 199 | struct dma_async_tx_descriptor *rxdesc; |
170 | 200 | ||
171 | if (!dws->rx_dma) | 201 | if (!xfer->rx_buf) |
172 | return NULL; | 202 | return NULL; |
173 | 203 | ||
174 | rxconf.direction = DMA_DEV_TO_MEM; | 204 | rxconf.direction = DMA_DEV_TO_MEM; |
175 | rxconf.src_addr = dws->dma_addr; | 205 | rxconf.src_addr = dws->dma_addr; |
176 | rxconf.src_maxburst = LNW_DMA_MSIZE_16; | 206 | rxconf.src_maxburst = 16; |
177 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 207 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
178 | rxconf.src_addr_width = dws->dma_width; | 208 | rxconf.src_addr_width = convert_dma_width(dws->dma_width); |
179 | rxconf.device_fc = false; | 209 | rxconf.device_fc = false; |
180 | 210 | ||
181 | dmaengine_slave_config(dws->rxchan, &rxconf); | 211 | dmaengine_slave_config(dws->rxchan, &rxconf); |
182 | 212 | ||
183 | memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl)); | ||
184 | dws->rx_sgl.dma_address = dws->rx_dma; | ||
185 | dws->rx_sgl.length = dws->len; | ||
186 | |||
187 | rxdesc = dmaengine_prep_slave_sg(dws->rxchan, | 213 | rxdesc = dmaengine_prep_slave_sg(dws->rxchan, |
188 | &dws->rx_sgl, | 214 | xfer->rx_sg.sgl, |
189 | 1, | 215 | xfer->rx_sg.nents, |
190 | DMA_DEV_TO_MEM, | 216 | DMA_DEV_TO_MEM, |
191 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 217 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
192 | if (!rxdesc) | 218 | if (!rxdesc) |
@@ -198,37 +224,36 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws) | |||
198 | return rxdesc; | 224 | return rxdesc; |
199 | } | 225 | } |
200 | 226 | ||
201 | static void dw_spi_dma_setup(struct dw_spi *dws) | 227 | static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) |
202 | { | 228 | { |
203 | u16 dma_ctrl = 0; | 229 | u16 dma_ctrl = 0; |
204 | 230 | ||
205 | spi_enable_chip(dws, 0); | 231 | dw_writel(dws, DW_SPI_DMARDLR, 0xf); |
232 | dw_writel(dws, DW_SPI_DMATDLR, 0x10); | ||
206 | 233 | ||
207 | dw_writew(dws, DW_SPI_DMARDLR, 0xf); | 234 | if (xfer->tx_buf) |
208 | dw_writew(dws, DW_SPI_DMATDLR, 0x10); | ||
209 | |||
210 | if (dws->tx_dma) | ||
211 | dma_ctrl |= SPI_DMA_TDMAE; | 235 | dma_ctrl |= SPI_DMA_TDMAE; |
212 | if (dws->rx_dma) | 236 | if (xfer->rx_buf) |
213 | dma_ctrl |= SPI_DMA_RDMAE; | 237 | dma_ctrl |= SPI_DMA_RDMAE; |
214 | dw_writew(dws, DW_SPI_DMACR, dma_ctrl); | 238 | dw_writel(dws, DW_SPI_DMACR, dma_ctrl); |
239 | |||
240 | /* Set the interrupt mask */ | ||
241 | spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI); | ||
215 | 242 | ||
216 | spi_enable_chip(dws, 1); | 243 | dws->transfer_handler = dma_transfer; |
244 | |||
245 | return 0; | ||
217 | } | 246 | } |
218 | 247 | ||
219 | static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | 248 | static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) |
220 | { | 249 | { |
221 | struct dma_async_tx_descriptor *txdesc, *rxdesc; | 250 | struct dma_async_tx_descriptor *txdesc, *rxdesc; |
222 | 251 | ||
223 | /* 1. setup DMA related registers */ | 252 | /* Prepare the TX dma transfer */ |
224 | if (cs_change) | 253 | txdesc = dw_spi_dma_prepare_tx(dws, xfer); |
225 | dw_spi_dma_setup(dws); | ||
226 | 254 | ||
227 | /* 2. Prepare the TX dma transfer */ | 255 | /* Prepare the RX dma transfer */ |
228 | txdesc = dw_spi_dma_prepare_tx(dws); | 256 | rxdesc = dw_spi_dma_prepare_rx(dws, xfer); |
229 | |||
230 | /* 3. Prepare the RX dma transfer */ | ||
231 | rxdesc = dw_spi_dma_prepare_rx(dws); | ||
232 | 257 | ||
233 | /* rx must be started before tx due to spi instinct */ | 258 | /* rx must be started before tx due to spi instinct */ |
234 | if (rxdesc) { | 259 | if (rxdesc) { |
@@ -246,10 +271,25 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | |||
246 | return 0; | 271 | return 0; |
247 | } | 272 | } |
248 | 273 | ||
274 | static void mid_spi_dma_stop(struct dw_spi *dws) | ||
275 | { | ||
276 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { | ||
277 | dmaengine_terminate_all(dws->txchan); | ||
278 | clear_bit(TX_BUSY, &dws->dma_chan_busy); | ||
279 | } | ||
280 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { | ||
281 | dmaengine_terminate_all(dws->rxchan); | ||
282 | clear_bit(RX_BUSY, &dws->dma_chan_busy); | ||
283 | } | ||
284 | } | ||
285 | |||
249 | static struct dw_spi_dma_ops mid_dma_ops = { | 286 | static struct dw_spi_dma_ops mid_dma_ops = { |
250 | .dma_init = mid_spi_dma_init, | 287 | .dma_init = mid_spi_dma_init, |
251 | .dma_exit = mid_spi_dma_exit, | 288 | .dma_exit = mid_spi_dma_exit, |
289 | .dma_setup = mid_spi_dma_setup, | ||
290 | .can_dma = mid_spi_can_dma, | ||
252 | .dma_transfer = mid_spi_dma_transfer, | 291 | .dma_transfer = mid_spi_dma_transfer, |
292 | .dma_stop = mid_spi_dma_stop, | ||
253 | }; | 293 | }; |
254 | #endif | 294 | #endif |
255 | 295 | ||
@@ -282,9 +322,8 @@ int dw_spi_mid_init(struct dw_spi *dws) | |||
282 | iounmap(clk_reg); | 322 | iounmap(clk_reg); |
283 | 323 | ||
284 | #ifdef CONFIG_SPI_DW_MID_DMA | 324 | #ifdef CONFIG_SPI_DW_MID_DMA |
285 | dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); | 325 | dws->dma_tx = &mid_dma_tx; |
286 | if (!dws->dma_priv) | 326 | dws->dma_rx = &mid_dma_rx; |
287 | return -ENOMEM; | ||
288 | dws->dma_ops = &mid_dma_ops; | 327 | dws->dma_ops = &mid_dma_ops; |
289 | #endif | 328 | #endif |
290 | return 0; | 329 | return 0; |
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 4847afba89f4..8d67d03c71eb 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c | |||
@@ -28,11 +28,6 @@ | |||
28 | #include <linux/debugfs.h> | 28 | #include <linux/debugfs.h> |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | #define START_STATE ((void *)0) | ||
32 | #define RUNNING_STATE ((void *)1) | ||
33 | #define DONE_STATE ((void *)2) | ||
34 | #define ERROR_STATE ((void *)-1) | ||
35 | |||
36 | /* Slave spi_dev related */ | 31 | /* Slave spi_dev related */ |
37 | struct chip_data { | 32 | struct chip_data { |
38 | u16 cr0; | 33 | u16 cr0; |
@@ -143,13 +138,26 @@ static inline void dw_spi_debugfs_remove(struct dw_spi *dws) | |||
143 | } | 138 | } |
144 | #endif /* CONFIG_DEBUG_FS */ | 139 | #endif /* CONFIG_DEBUG_FS */ |
145 | 140 | ||
141 | static void dw_spi_set_cs(struct spi_device *spi, bool enable) | ||
142 | { | ||
143 | struct dw_spi *dws = spi_master_get_devdata(spi->master); | ||
144 | struct chip_data *chip = spi_get_ctldata(spi); | ||
145 | |||
146 | /* Chip select logic is inverted from spi_set_cs() */ | ||
147 | if (chip && chip->cs_control) | ||
148 | chip->cs_control(!enable); | ||
149 | |||
150 | if (!enable) | ||
151 | dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select)); | ||
152 | } | ||
153 | |||
146 | /* Return the max entries we can fill into tx fifo */ | 154 | /* Return the max entries we can fill into tx fifo */ |
147 | static inline u32 tx_max(struct dw_spi *dws) | 155 | static inline u32 tx_max(struct dw_spi *dws) |
148 | { | 156 | { |
149 | u32 tx_left, tx_room, rxtx_gap; | 157 | u32 tx_left, tx_room, rxtx_gap; |
150 | 158 | ||
151 | tx_left = (dws->tx_end - dws->tx) / dws->n_bytes; | 159 | tx_left = (dws->tx_end - dws->tx) / dws->n_bytes; |
152 | tx_room = dws->fifo_len - dw_readw(dws, DW_SPI_TXFLR); | 160 | tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR); |
153 | 161 | ||
154 | /* | 162 | /* |
155 | * Another concern is about the tx/rx mismatch, we | 163 | * Another concern is about the tx/rx mismatch, we |
@@ -170,7 +178,7 @@ static inline u32 rx_max(struct dw_spi *dws) | |||
170 | { | 178 | { |
171 | u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes; | 179 | u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes; |
172 | 180 | ||
173 | return min_t(u32, rx_left, dw_readw(dws, DW_SPI_RXFLR)); | 181 | return min_t(u32, rx_left, dw_readl(dws, DW_SPI_RXFLR)); |
174 | } | 182 | } |
175 | 183 | ||
176 | static void dw_writer(struct dw_spi *dws) | 184 | static void dw_writer(struct dw_spi *dws) |
@@ -186,7 +194,7 @@ static void dw_writer(struct dw_spi *dws) | |||
186 | else | 194 | else |
187 | txw = *(u16 *)(dws->tx); | 195 | txw = *(u16 *)(dws->tx); |
188 | } | 196 | } |
189 | dw_writew(dws, DW_SPI_DR, txw); | 197 | dw_writel(dws, DW_SPI_DR, txw); |
190 | dws->tx += dws->n_bytes; | 198 | dws->tx += dws->n_bytes; |
191 | } | 199 | } |
192 | } | 200 | } |
@@ -197,7 +205,7 @@ static void dw_reader(struct dw_spi *dws) | |||
197 | u16 rxw; | 205 | u16 rxw; |
198 | 206 | ||
199 | while (max--) { | 207 | while (max--) { |
200 | rxw = dw_readw(dws, DW_SPI_DR); | 208 | rxw = dw_readl(dws, DW_SPI_DR); |
201 | /* Care rx only if the transfer's original "rx" is not null */ | 209 | /* Care rx only if the transfer's original "rx" is not null */ |
202 | if (dws->rx_end - dws->len) { | 210 | if (dws->rx_end - dws->len) { |
203 | if (dws->n_bytes == 1) | 211 | if (dws->n_bytes == 1) |
@@ -209,103 +217,22 @@ static void dw_reader(struct dw_spi *dws) | |||
209 | } | 217 | } |
210 | } | 218 | } |
211 | 219 | ||
212 | static void *next_transfer(struct dw_spi *dws) | ||
213 | { | ||
214 | struct spi_message *msg = dws->cur_msg; | ||
215 | struct spi_transfer *trans = dws->cur_transfer; | ||
216 | |||
217 | /* Move to next transfer */ | ||
218 | if (trans->transfer_list.next != &msg->transfers) { | ||
219 | dws->cur_transfer = | ||
220 | list_entry(trans->transfer_list.next, | ||
221 | struct spi_transfer, | ||
222 | transfer_list); | ||
223 | return RUNNING_STATE; | ||
224 | } | ||
225 | |||
226 | return DONE_STATE; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Note: first step is the protocol driver prepares | ||
231 | * a dma-capable memory, and this func just need translate | ||
232 | * the virt addr to physical | ||
233 | */ | ||
234 | static int map_dma_buffers(struct dw_spi *dws) | ||
235 | { | ||
236 | if (!dws->cur_msg->is_dma_mapped | ||
237 | || !dws->dma_inited | ||
238 | || !dws->cur_chip->enable_dma | ||
239 | || !dws->dma_ops) | ||
240 | return 0; | ||
241 | |||
242 | if (dws->cur_transfer->tx_dma) | ||
243 | dws->tx_dma = dws->cur_transfer->tx_dma; | ||
244 | |||
245 | if (dws->cur_transfer->rx_dma) | ||
246 | dws->rx_dma = dws->cur_transfer->rx_dma; | ||
247 | |||
248 | return 1; | ||
249 | } | ||
250 | |||
251 | /* Caller already set message->status; dma and pio irqs are blocked */ | ||
252 | static void giveback(struct dw_spi *dws) | ||
253 | { | ||
254 | struct spi_transfer *last_transfer; | ||
255 | struct spi_message *msg; | ||
256 | |||
257 | msg = dws->cur_msg; | ||
258 | dws->cur_msg = NULL; | ||
259 | dws->cur_transfer = NULL; | ||
260 | dws->prev_chip = dws->cur_chip; | ||
261 | dws->cur_chip = NULL; | ||
262 | dws->dma_mapped = 0; | ||
263 | |||
264 | last_transfer = list_last_entry(&msg->transfers, struct spi_transfer, | ||
265 | transfer_list); | ||
266 | |||
267 | if (!last_transfer->cs_change) | ||
268 | spi_chip_sel(dws, msg->spi, 0); | ||
269 | |||
270 | spi_finalize_current_message(dws->master); | ||
271 | } | ||
272 | |||
273 | static void int_error_stop(struct dw_spi *dws, const char *msg) | 220 | static void int_error_stop(struct dw_spi *dws, const char *msg) |
274 | { | 221 | { |
275 | /* Stop the hw */ | 222 | spi_reset_chip(dws); |
276 | spi_enable_chip(dws, 0); | ||
277 | 223 | ||
278 | dev_err(&dws->master->dev, "%s\n", msg); | 224 | dev_err(&dws->master->dev, "%s\n", msg); |
279 | dws->cur_msg->state = ERROR_STATE; | 225 | dws->master->cur_msg->status = -EIO; |
280 | tasklet_schedule(&dws->pump_transfers); | 226 | spi_finalize_current_transfer(dws->master); |
281 | } | 227 | } |
282 | 228 | ||
283 | void dw_spi_xfer_done(struct dw_spi *dws) | ||
284 | { | ||
285 | /* Update total byte transferred return count actual bytes read */ | ||
286 | dws->cur_msg->actual_length += dws->len; | ||
287 | |||
288 | /* Move to next transfer */ | ||
289 | dws->cur_msg->state = next_transfer(dws); | ||
290 | |||
291 | /* Handle end of message */ | ||
292 | if (dws->cur_msg->state == DONE_STATE) { | ||
293 | dws->cur_msg->status = 0; | ||
294 | giveback(dws); | ||
295 | } else | ||
296 | tasklet_schedule(&dws->pump_transfers); | ||
297 | } | ||
298 | EXPORT_SYMBOL_GPL(dw_spi_xfer_done); | ||
299 | |||
300 | static irqreturn_t interrupt_transfer(struct dw_spi *dws) | 229 | static irqreturn_t interrupt_transfer(struct dw_spi *dws) |
301 | { | 230 | { |
302 | u16 irq_status = dw_readw(dws, DW_SPI_ISR); | 231 | u16 irq_status = dw_readl(dws, DW_SPI_ISR); |
303 | 232 | ||
304 | /* Error handling */ | 233 | /* Error handling */ |
305 | if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) { | 234 | if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) { |
306 | dw_readw(dws, DW_SPI_TXOICR); | 235 | dw_readl(dws, DW_SPI_ICR); |
307 | dw_readw(dws, DW_SPI_RXOICR); | ||
308 | dw_readw(dws, DW_SPI_RXUICR); | ||
309 | int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun"); | 236 | int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun"); |
310 | return IRQ_HANDLED; | 237 | return IRQ_HANDLED; |
311 | } | 238 | } |
@@ -313,7 +240,7 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws) | |||
313 | dw_reader(dws); | 240 | dw_reader(dws); |
314 | if (dws->rx_end == dws->rx) { | 241 | if (dws->rx_end == dws->rx) { |
315 | spi_mask_intr(dws, SPI_INT_TXEI); | 242 | spi_mask_intr(dws, SPI_INT_TXEI); |
316 | dw_spi_xfer_done(dws); | 243 | spi_finalize_current_transfer(dws->master); |
317 | return IRQ_HANDLED; | 244 | return IRQ_HANDLED; |
318 | } | 245 | } |
319 | if (irq_status & SPI_INT_TXEI) { | 246 | if (irq_status & SPI_INT_TXEI) { |
@@ -328,13 +255,14 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws) | |||
328 | 255 | ||
329 | static irqreturn_t dw_spi_irq(int irq, void *dev_id) | 256 | static irqreturn_t dw_spi_irq(int irq, void *dev_id) |
330 | { | 257 | { |
331 | struct dw_spi *dws = dev_id; | 258 | struct spi_master *master = dev_id; |
332 | u16 irq_status = dw_readw(dws, DW_SPI_ISR) & 0x3f; | 259 | struct dw_spi *dws = spi_master_get_devdata(master); |
260 | u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f; | ||
333 | 261 | ||
334 | if (!irq_status) | 262 | if (!irq_status) |
335 | return IRQ_NONE; | 263 | return IRQ_NONE; |
336 | 264 | ||
337 | if (!dws->cur_msg) { | 265 | if (!master->cur_msg) { |
338 | spi_mask_intr(dws, SPI_INT_TXEI); | 266 | spi_mask_intr(dws, SPI_INT_TXEI); |
339 | return IRQ_HANDLED; | 267 | return IRQ_HANDLED; |
340 | } | 268 | } |
@@ -343,7 +271,7 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id) | |||
343 | } | 271 | } |
344 | 272 | ||
345 | /* Must be called inside pump_transfers() */ | 273 | /* Must be called inside pump_transfers() */ |
346 | static void poll_transfer(struct dw_spi *dws) | 274 | static int poll_transfer(struct dw_spi *dws) |
347 | { | 275 | { |
348 | do { | 276 | do { |
349 | dw_writer(dws); | 277 | dw_writer(dws); |
@@ -351,64 +279,32 @@ static void poll_transfer(struct dw_spi *dws) | |||
351 | cpu_relax(); | 279 | cpu_relax(); |
352 | } while (dws->rx_end > dws->rx); | 280 | } while (dws->rx_end > dws->rx); |
353 | 281 | ||
354 | dw_spi_xfer_done(dws); | 282 | return 0; |
355 | } | 283 | } |
356 | 284 | ||
357 | static void pump_transfers(unsigned long data) | 285 | static int dw_spi_transfer_one(struct spi_master *master, |
286 | struct spi_device *spi, struct spi_transfer *transfer) | ||
358 | { | 287 | { |
359 | struct dw_spi *dws = (struct dw_spi *)data; | 288 | struct dw_spi *dws = spi_master_get_devdata(master); |
360 | struct spi_message *message = NULL; | 289 | struct chip_data *chip = spi_get_ctldata(spi); |
361 | struct spi_transfer *transfer = NULL; | ||
362 | struct spi_transfer *previous = NULL; | ||
363 | struct spi_device *spi = NULL; | ||
364 | struct chip_data *chip = NULL; | ||
365 | u8 bits = 0; | ||
366 | u8 imask = 0; | 290 | u8 imask = 0; |
367 | u8 cs_change = 0; | 291 | u16 txlevel = 0; |
368 | u16 txint_level = 0; | ||
369 | u16 clk_div = 0; | 292 | u16 clk_div = 0; |
370 | u32 speed = 0; | 293 | u32 speed = 0; |
371 | u32 cr0 = 0; | 294 | u32 cr0 = 0; |
295 | int ret; | ||
372 | 296 | ||
373 | /* Get current state information */ | 297 | dws->dma_mapped = 0; |
374 | message = dws->cur_msg; | ||
375 | transfer = dws->cur_transfer; | ||
376 | chip = dws->cur_chip; | ||
377 | spi = message->spi; | ||
378 | |||
379 | if (message->state == ERROR_STATE) { | ||
380 | message->status = -EIO; | ||
381 | goto early_exit; | ||
382 | } | ||
383 | |||
384 | /* Handle end of message */ | ||
385 | if (message->state == DONE_STATE) { | ||
386 | message->status = 0; | ||
387 | goto early_exit; | ||
388 | } | ||
389 | |||
390 | /* Delay if requested at end of transfer */ | ||
391 | if (message->state == RUNNING_STATE) { | ||
392 | previous = list_entry(transfer->transfer_list.prev, | ||
393 | struct spi_transfer, | ||
394 | transfer_list); | ||
395 | if (previous->delay_usecs) | ||
396 | udelay(previous->delay_usecs); | ||
397 | } | ||
398 | |||
399 | dws->n_bytes = chip->n_bytes; | 298 | dws->n_bytes = chip->n_bytes; |
400 | dws->dma_width = chip->dma_width; | 299 | dws->dma_width = chip->dma_width; |
401 | dws->cs_control = chip->cs_control; | ||
402 | 300 | ||
403 | dws->rx_dma = transfer->rx_dma; | ||
404 | dws->tx_dma = transfer->tx_dma; | ||
405 | dws->tx = (void *)transfer->tx_buf; | 301 | dws->tx = (void *)transfer->tx_buf; |
406 | dws->tx_end = dws->tx + transfer->len; | 302 | dws->tx_end = dws->tx + transfer->len; |
407 | dws->rx = transfer->rx_buf; | 303 | dws->rx = transfer->rx_buf; |
408 | dws->rx_end = dws->rx + transfer->len; | 304 | dws->rx_end = dws->rx + transfer->len; |
409 | dws->len = dws->cur_transfer->len; | 305 | dws->len = transfer->len; |
410 | if (chip != dws->prev_chip) | 306 | |
411 | cs_change = 1; | 307 | spi_enable_chip(dws, 0); |
412 | 308 | ||
413 | cr0 = chip->cr0; | 309 | cr0 = chip->cr0; |
414 | 310 | ||
@@ -416,32 +312,37 @@ static void pump_transfers(unsigned long data) | |||
416 | if (transfer->speed_hz) { | 312 | if (transfer->speed_hz) { |
417 | speed = chip->speed_hz; | 313 | speed = chip->speed_hz; |
418 | 314 | ||
419 | if ((transfer->speed_hz != speed) || (!chip->clk_div)) { | 315 | if ((transfer->speed_hz != speed) || !chip->clk_div) { |
420 | speed = transfer->speed_hz; | 316 | speed = transfer->speed_hz; |
421 | 317 | ||
422 | /* clk_div doesn't support odd number */ | 318 | /* clk_div doesn't support odd number */ |
423 | clk_div = dws->max_freq / speed; | 319 | clk_div = (dws->max_freq / speed + 1) & 0xfffe; |
424 | clk_div = (clk_div + 1) & 0xfffe; | ||
425 | 320 | ||
426 | chip->speed_hz = speed; | 321 | chip->speed_hz = speed; |
427 | chip->clk_div = clk_div; | 322 | chip->clk_div = clk_div; |
323 | |||
324 | spi_set_clk(dws, chip->clk_div); | ||
428 | } | 325 | } |
429 | } | 326 | } |
430 | if (transfer->bits_per_word) { | 327 | if (transfer->bits_per_word) { |
431 | bits = transfer->bits_per_word; | 328 | if (transfer->bits_per_word == 8) { |
432 | dws->n_bytes = dws->dma_width = bits >> 3; | 329 | dws->n_bytes = 1; |
433 | cr0 = (bits - 1) | 330 | dws->dma_width = 1; |
331 | } else if (transfer->bits_per_word == 16) { | ||
332 | dws->n_bytes = 2; | ||
333 | dws->dma_width = 2; | ||
334 | } | ||
335 | cr0 = (transfer->bits_per_word - 1) | ||
434 | | (chip->type << SPI_FRF_OFFSET) | 336 | | (chip->type << SPI_FRF_OFFSET) |
435 | | (spi->mode << SPI_MODE_OFFSET) | 337 | | (spi->mode << SPI_MODE_OFFSET) |
436 | | (chip->tmode << SPI_TMOD_OFFSET); | 338 | | (chip->tmode << SPI_TMOD_OFFSET); |
437 | } | 339 | } |
438 | message->state = RUNNING_STATE; | ||
439 | 340 | ||
440 | /* | 341 | /* |
441 | * Adjust transfer mode if necessary. Requires platform dependent | 342 | * Adjust transfer mode if necessary. Requires platform dependent |
442 | * chipselect mechanism. | 343 | * chipselect mechanism. |
443 | */ | 344 | */ |
444 | if (dws->cs_control) { | 345 | if (chip->cs_control) { |
445 | if (dws->rx && dws->tx) | 346 | if (dws->rx && dws->tx) |
446 | chip->tmode = SPI_TMOD_TR; | 347 | chip->tmode = SPI_TMOD_TR; |
447 | else if (dws->rx) | 348 | else if (dws->rx) |
@@ -453,80 +354,60 @@ static void pump_transfers(unsigned long data) | |||
453 | cr0 |= (chip->tmode << SPI_TMOD_OFFSET); | 354 | cr0 |= (chip->tmode << SPI_TMOD_OFFSET); |
454 | } | 355 | } |
455 | 356 | ||
357 | dw_writel(dws, DW_SPI_CTRL0, cr0); | ||
358 | |||
456 | /* Check if current transfer is a DMA transaction */ | 359 | /* Check if current transfer is a DMA transaction */ |
457 | dws->dma_mapped = map_dma_buffers(dws); | 360 | if (master->can_dma && master->can_dma(master, spi, transfer)) |
361 | dws->dma_mapped = master->cur_msg_mapped; | ||
362 | |||
363 | /* For poll mode just disable all interrupts */ | ||
364 | spi_mask_intr(dws, 0xff); | ||
458 | 365 | ||
459 | /* | 366 | /* |
460 | * Interrupt mode | 367 | * Interrupt mode |
461 | * we only need set the TXEI IRQ, as TX/RX always happen syncronizely | 368 | * we only need set the TXEI IRQ, as TX/RX always happen syncronizely |
462 | */ | 369 | */ |
463 | if (!dws->dma_mapped && !chip->poll_mode) { | 370 | if (dws->dma_mapped) { |
464 | int templen = dws->len / dws->n_bytes; | 371 | ret = dws->dma_ops->dma_setup(dws, transfer); |
465 | 372 | if (ret < 0) { | |
466 | txint_level = dws->fifo_len / 2; | 373 | spi_enable_chip(dws, 1); |
467 | txint_level = (templen > txint_level) ? txint_level : templen; | 374 | return ret; |
375 | } | ||
376 | } else if (!chip->poll_mode) { | ||
377 | txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes); | ||
378 | dw_writel(dws, DW_SPI_TXFLTR, txlevel); | ||
468 | 379 | ||
380 | /* Set the interrupt mask */ | ||
469 | imask |= SPI_INT_TXEI | SPI_INT_TXOI | | 381 | imask |= SPI_INT_TXEI | SPI_INT_TXOI | |
470 | SPI_INT_RXUI | SPI_INT_RXOI; | 382 | SPI_INT_RXUI | SPI_INT_RXOI; |
383 | spi_umask_intr(dws, imask); | ||
384 | |||
471 | dws->transfer_handler = interrupt_transfer; | 385 | dws->transfer_handler = interrupt_transfer; |
472 | } | 386 | } |
473 | 387 | ||
474 | /* | 388 | spi_enable_chip(dws, 1); |
475 | * Reprogram registers only if | ||
476 | * 1. chip select changes | ||
477 | * 2. clk_div is changed | ||
478 | * 3. control value changes | ||
479 | */ | ||
480 | if (dw_readw(dws, DW_SPI_CTRL0) != cr0 || cs_change || clk_div || imask) { | ||
481 | spi_enable_chip(dws, 0); | ||
482 | |||
483 | if (dw_readw(dws, DW_SPI_CTRL0) != cr0) | ||
484 | dw_writew(dws, DW_SPI_CTRL0, cr0); | ||
485 | |||
486 | spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); | ||
487 | spi_chip_sel(dws, spi, 1); | ||
488 | |||
489 | /* Set the interrupt mask, for poll mode just disable all int */ | ||
490 | spi_mask_intr(dws, 0xff); | ||
491 | if (imask) | ||
492 | spi_umask_intr(dws, imask); | ||
493 | if (txint_level) | ||
494 | dw_writew(dws, DW_SPI_TXFLTR, txint_level); | ||
495 | 389 | ||
496 | spi_enable_chip(dws, 1); | 390 | if (dws->dma_mapped) { |
497 | if (cs_change) | 391 | ret = dws->dma_ops->dma_transfer(dws, transfer); |
498 | dws->prev_chip = chip; | 392 | if (ret < 0) |
393 | return ret; | ||
499 | } | 394 | } |
500 | 395 | ||
501 | if (dws->dma_mapped) | ||
502 | dws->dma_ops->dma_transfer(dws, cs_change); | ||
503 | |||
504 | if (chip->poll_mode) | 396 | if (chip->poll_mode) |
505 | poll_transfer(dws); | 397 | return poll_transfer(dws); |
506 | |||
507 | return; | ||
508 | 398 | ||
509 | early_exit: | 399 | return 1; |
510 | giveback(dws); | ||
511 | } | 400 | } |
512 | 401 | ||
513 | static int dw_spi_transfer_one_message(struct spi_master *master, | 402 | static void dw_spi_handle_err(struct spi_master *master, |
514 | struct spi_message *msg) | 403 | struct spi_message *msg) |
515 | { | 404 | { |
516 | struct dw_spi *dws = spi_master_get_devdata(master); | 405 | struct dw_spi *dws = spi_master_get_devdata(master); |
517 | 406 | ||
518 | dws->cur_msg = msg; | 407 | if (dws->dma_mapped) |
519 | /* Initial message state */ | 408 | dws->dma_ops->dma_stop(dws); |
520 | dws->cur_msg->state = START_STATE; | ||
521 | dws->cur_transfer = list_entry(dws->cur_msg->transfers.next, | ||
522 | struct spi_transfer, | ||
523 | transfer_list); | ||
524 | dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi); | ||
525 | |||
526 | /* Launch transfers */ | ||
527 | tasklet_schedule(&dws->pump_transfers); | ||
528 | 409 | ||
529 | return 0; | 410 | spi_reset_chip(dws); |
530 | } | 411 | } |
531 | 412 | ||
532 | /* This may be called twice for each spi dev */ | 413 | /* This may be called twice for each spi dev */ |
@@ -561,8 +442,6 @@ static int dw_spi_setup(struct spi_device *spi) | |||
561 | 442 | ||
562 | chip->rx_threshold = 0; | 443 | chip->rx_threshold = 0; |
563 | chip->tx_threshold = 0; | 444 | chip->tx_threshold = 0; |
564 | |||
565 | chip->enable_dma = chip_info->enable_dma; | ||
566 | } | 445 | } |
567 | 446 | ||
568 | if (spi->bits_per_word == 8) { | 447 | if (spi->bits_per_word == 8) { |
@@ -610,9 +489,7 @@ static void dw_spi_cleanup(struct spi_device *spi) | |||
610 | /* Restart the controller, disable all interrupts, clean rx fifo */ | 489 | /* Restart the controller, disable all interrupts, clean rx fifo */ |
611 | static void spi_hw_init(struct device *dev, struct dw_spi *dws) | 490 | static void spi_hw_init(struct device *dev, struct dw_spi *dws) |
612 | { | 491 | { |
613 | spi_enable_chip(dws, 0); | 492 | spi_reset_chip(dws); |
614 | spi_mask_intr(dws, 0xff); | ||
615 | spi_enable_chip(dws, 1); | ||
616 | 493 | ||
617 | /* | 494 | /* |
618 | * Try to detect the FIFO depth if not set by interface driver, | 495 | * Try to detect the FIFO depth if not set by interface driver, |
@@ -622,11 +499,11 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws) | |||
622 | u32 fifo; | 499 | u32 fifo; |
623 | 500 | ||
624 | for (fifo = 1; fifo < 256; fifo++) { | 501 | for (fifo = 1; fifo < 256; fifo++) { |
625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); | 502 | dw_writel(dws, DW_SPI_TXFLTR, fifo); |
626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) | 503 | if (fifo != dw_readl(dws, DW_SPI_TXFLTR)) |
627 | break; | 504 | break; |
628 | } | 505 | } |
629 | dw_writew(dws, DW_SPI_TXFLTR, 0); | 506 | dw_writel(dws, DW_SPI_TXFLTR, 0); |
630 | 507 | ||
631 | dws->fifo_len = (fifo == 1) ? 0 : fifo; | 508 | dws->fifo_len = (fifo == 1) ? 0 : fifo; |
632 | dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); | 509 | dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); |
@@ -646,13 +523,12 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) | |||
646 | 523 | ||
647 | dws->master = master; | 524 | dws->master = master; |
648 | dws->type = SSI_MOTO_SPI; | 525 | dws->type = SSI_MOTO_SPI; |
649 | dws->prev_chip = NULL; | ||
650 | dws->dma_inited = 0; | 526 | dws->dma_inited = 0; |
651 | dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); | 527 | dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); |
652 | snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num); | 528 | snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num); |
653 | 529 | ||
654 | ret = devm_request_irq(dev, dws->irq, dw_spi_irq, IRQF_SHARED, | 530 | ret = devm_request_irq(dev, dws->irq, dw_spi_irq, IRQF_SHARED, |
655 | dws->name, dws); | 531 | dws->name, master); |
656 | if (ret < 0) { | 532 | if (ret < 0) { |
657 | dev_err(&master->dev, "can not get IRQ\n"); | 533 | dev_err(&master->dev, "can not get IRQ\n"); |
658 | goto err_free_master; | 534 | goto err_free_master; |
@@ -664,7 +540,9 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) | |||
664 | master->num_chipselect = dws->num_cs; | 540 | master->num_chipselect = dws->num_cs; |
665 | master->setup = dw_spi_setup; | 541 | master->setup = dw_spi_setup; |
666 | master->cleanup = dw_spi_cleanup; | 542 | master->cleanup = dw_spi_cleanup; |
667 | master->transfer_one_message = dw_spi_transfer_one_message; | 543 | master->set_cs = dw_spi_set_cs; |
544 | master->transfer_one = dw_spi_transfer_one; | ||
545 | master->handle_err = dw_spi_handle_err; | ||
668 | master->max_speed_hz = dws->max_freq; | 546 | master->max_speed_hz = dws->max_freq; |
669 | master->dev.of_node = dev->of_node; | 547 | master->dev.of_node = dev->of_node; |
670 | 548 | ||
@@ -676,11 +554,11 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) | |||
676 | if (ret) { | 554 | if (ret) { |
677 | dev_warn(dev, "DMA init failed\n"); | 555 | dev_warn(dev, "DMA init failed\n"); |
678 | dws->dma_inited = 0; | 556 | dws->dma_inited = 0; |
557 | } else { | ||
558 | master->can_dma = dws->dma_ops->can_dma; | ||
679 | } | 559 | } |
680 | } | 560 | } |
681 | 561 | ||
682 | tasklet_init(&dws->pump_transfers, pump_transfers, (unsigned long)dws); | ||
683 | |||
684 | spi_master_set_devdata(master, dws); | 562 | spi_master_set_devdata(master, dws); |
685 | ret = devm_spi_register_master(dev, master); | 563 | ret = devm_spi_register_master(dev, master); |
686 | if (ret) { | 564 | if (ret) { |
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h index 3d32be68c142..6c91391c1a4f 100644 --- a/drivers/spi/spi-dw.h +++ b/drivers/spi/spi-dw.h | |||
@@ -91,12 +91,15 @@ struct dw_spi; | |||
91 | struct dw_spi_dma_ops { | 91 | struct dw_spi_dma_ops { |
92 | int (*dma_init)(struct dw_spi *dws); | 92 | int (*dma_init)(struct dw_spi *dws); |
93 | void (*dma_exit)(struct dw_spi *dws); | 93 | void (*dma_exit)(struct dw_spi *dws); |
94 | int (*dma_transfer)(struct dw_spi *dws, int cs_change); | 94 | int (*dma_setup)(struct dw_spi *dws, struct spi_transfer *xfer); |
95 | bool (*can_dma)(struct spi_master *master, struct spi_device *spi, | ||
96 | struct spi_transfer *xfer); | ||
97 | int (*dma_transfer)(struct dw_spi *dws, struct spi_transfer *xfer); | ||
98 | void (*dma_stop)(struct dw_spi *dws); | ||
95 | }; | 99 | }; |
96 | 100 | ||
97 | struct dw_spi { | 101 | struct dw_spi { |
98 | struct spi_master *master; | 102 | struct spi_master *master; |
99 | struct spi_device *cur_dev; | ||
100 | enum dw_ssi_type type; | 103 | enum dw_ssi_type type; |
101 | char name[16]; | 104 | char name[16]; |
102 | 105 | ||
@@ -109,41 +112,26 @@ struct dw_spi { | |||
109 | u16 bus_num; | 112 | u16 bus_num; |
110 | u16 num_cs; /* supported slave numbers */ | 113 | u16 num_cs; /* supported slave numbers */ |
111 | 114 | ||
112 | /* Message Transfer pump */ | ||
113 | struct tasklet_struct pump_transfers; | ||
114 | |||
115 | /* Current message transfer state info */ | 115 | /* Current message transfer state info */ |
116 | struct spi_message *cur_msg; | ||
117 | struct spi_transfer *cur_transfer; | ||
118 | struct chip_data *cur_chip; | ||
119 | struct chip_data *prev_chip; | ||
120 | size_t len; | 116 | size_t len; |
121 | void *tx; | 117 | void *tx; |
122 | void *tx_end; | 118 | void *tx_end; |
123 | void *rx; | 119 | void *rx; |
124 | void *rx_end; | 120 | void *rx_end; |
125 | int dma_mapped; | 121 | int dma_mapped; |
126 | dma_addr_t rx_dma; | ||
127 | dma_addr_t tx_dma; | ||
128 | size_t rx_map_len; | ||
129 | size_t tx_map_len; | ||
130 | u8 n_bytes; /* current is a 1/2 bytes op */ | 122 | u8 n_bytes; /* current is a 1/2 bytes op */ |
131 | u8 max_bits_per_word; /* maxim is 16b */ | ||
132 | u32 dma_width; | 123 | u32 dma_width; |
133 | irqreturn_t (*transfer_handler)(struct dw_spi *dws); | 124 | irqreturn_t (*transfer_handler)(struct dw_spi *dws); |
134 | void (*cs_control)(u32 command); | ||
135 | 125 | ||
136 | /* Dma info */ | 126 | /* DMA info */ |
137 | int dma_inited; | 127 | int dma_inited; |
138 | struct dma_chan *txchan; | 128 | struct dma_chan *txchan; |
139 | struct scatterlist tx_sgl; | ||
140 | struct dma_chan *rxchan; | 129 | struct dma_chan *rxchan; |
141 | struct scatterlist rx_sgl; | ||
142 | unsigned long dma_chan_busy; | 130 | unsigned long dma_chan_busy; |
143 | struct device *dma_dev; | ||
144 | dma_addr_t dma_addr; /* phy address of the Data register */ | 131 | dma_addr_t dma_addr; /* phy address of the Data register */ |
145 | struct dw_spi_dma_ops *dma_ops; | 132 | struct dw_spi_dma_ops *dma_ops; |
146 | void *dma_priv; /* platform relate info */ | 133 | void *dma_tx; |
134 | void *dma_rx; | ||
147 | 135 | ||
148 | /* Bus interface info */ | 136 | /* Bus interface info */ |
149 | void *priv; | 137 | void *priv; |
@@ -162,16 +150,6 @@ static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val) | |||
162 | __raw_writel(val, dws->regs + offset); | 150 | __raw_writel(val, dws->regs + offset); |
163 | } | 151 | } |
164 | 152 | ||
165 | static inline u16 dw_readw(struct dw_spi *dws, u32 offset) | ||
166 | { | ||
167 | return __raw_readw(dws->regs + offset); | ||
168 | } | ||
169 | |||
170 | static inline void dw_writew(struct dw_spi *dws, u32 offset, u16 val) | ||
171 | { | ||
172 | __raw_writew(val, dws->regs + offset); | ||
173 | } | ||
174 | |||
175 | static inline void spi_enable_chip(struct dw_spi *dws, int enable) | 153 | static inline void spi_enable_chip(struct dw_spi *dws, int enable) |
176 | { | 154 | { |
177 | dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0)); | 155 | dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0)); |
@@ -182,22 +160,6 @@ static inline void spi_set_clk(struct dw_spi *dws, u16 div) | |||
182 | dw_writel(dws, DW_SPI_BAUDR, div); | 160 | dw_writel(dws, DW_SPI_BAUDR, div); |
183 | } | 161 | } |
184 | 162 | ||
185 | static inline void spi_chip_sel(struct dw_spi *dws, struct spi_device *spi, | ||
186 | int active) | ||
187 | { | ||
188 | u16 cs = spi->chip_select; | ||
189 | int gpio_val = active ? (spi->mode & SPI_CS_HIGH) : | ||
190 | !(spi->mode & SPI_CS_HIGH); | ||
191 | |||
192 | if (dws->cs_control) | ||
193 | dws->cs_control(active); | ||
194 | if (gpio_is_valid(spi->cs_gpio)) | ||
195 | gpio_set_value(spi->cs_gpio, gpio_val); | ||
196 | |||
197 | if (active) | ||
198 | dw_writel(dws, DW_SPI_SER, 1 << cs); | ||
199 | } | ||
200 | |||
201 | /* Disable IRQ bits */ | 163 | /* Disable IRQ bits */ |
202 | static inline void spi_mask_intr(struct dw_spi *dws, u32 mask) | 164 | static inline void spi_mask_intr(struct dw_spi *dws, u32 mask) |
203 | { | 165 | { |
@@ -217,15 +179,26 @@ static inline void spi_umask_intr(struct dw_spi *dws, u32 mask) | |||
217 | } | 179 | } |
218 | 180 | ||
219 | /* | 181 | /* |
182 | * This does disable the SPI controller, interrupts, and re-enable the | ||
183 | * controller back. Transmit and receive FIFO buffers are cleared when the | ||
184 | * device is disabled. | ||
185 | */ | ||
186 | static inline void spi_reset_chip(struct dw_spi *dws) | ||
187 | { | ||
188 | spi_enable_chip(dws, 0); | ||
189 | spi_mask_intr(dws, 0xff); | ||
190 | spi_enable_chip(dws, 1); | ||
191 | } | ||
192 | |||
193 | /* | ||
220 | * Each SPI slave device to work with dw_api controller should | 194 | * Each SPI slave device to work with dw_api controller should |
221 | * has such a structure claiming its working mode (PIO/DMA etc), | 195 | * has such a structure claiming its working mode (poll or PIO/DMA), |
222 | * which can be save in the "controller_data" member of the | 196 | * which can be save in the "controller_data" member of the |
223 | * struct spi_device. | 197 | * struct spi_device. |
224 | */ | 198 | */ |
225 | struct dw_spi_chip { | 199 | struct dw_spi_chip { |
226 | u8 poll_mode; /* 1 for controller polling mode */ | 200 | u8 poll_mode; /* 1 for controller polling mode */ |
227 | u8 type; /* SPI/SSP/MicroWire */ | 201 | u8 type; /* SPI/SSP/MicroWire */ |
228 | u8 enable_dma; | ||
229 | void (*cs_control)(u32 command); | 202 | void (*cs_control)(u32 command); |
230 | }; | 203 | }; |
231 | 204 | ||
@@ -233,7 +206,6 @@ extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws); | |||
233 | extern void dw_spi_remove_host(struct dw_spi *dws); | 206 | extern void dw_spi_remove_host(struct dw_spi *dws); |
234 | extern int dw_spi_suspend_host(struct dw_spi *dws); | 207 | extern int dw_spi_suspend_host(struct dw_spi *dws); |
235 | extern int dw_spi_resume_host(struct dw_spi *dws); | 208 | extern int dw_spi_resume_host(struct dw_spi *dws); |
236 | extern void dw_spi_xfer_done(struct dw_spi *dws); | ||
237 | 209 | ||
238 | /* platform related setup */ | 210 | /* platform related setup */ |
239 | extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */ | 211 | extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */ |
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index d1a39249704a..5fe54cda309f 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/math64.h> | ||
23 | #include <linux/module.h> | 24 | #include <linux/module.h> |
24 | #include <linux/of.h> | 25 | #include <linux/of.h> |
25 | #include <linux/of_device.h> | 26 | #include <linux/of_device.h> |
@@ -29,6 +30,7 @@ | |||
29 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
30 | #include <linux/spi/spi.h> | 31 | #include <linux/spi/spi.h> |
31 | #include <linux/spi/spi_bitbang.h> | 32 | #include <linux/spi/spi_bitbang.h> |
33 | #include <linux/time.h> | ||
32 | 34 | ||
33 | #define DRIVER_NAME "fsl-dspi" | 35 | #define DRIVER_NAME "fsl-dspi" |
34 | 36 | ||
@@ -51,7 +53,7 @@ | |||
51 | #define SPI_CTAR_CPOL(x) ((x) << 26) | 53 | #define SPI_CTAR_CPOL(x) ((x) << 26) |
52 | #define SPI_CTAR_CPHA(x) ((x) << 25) | 54 | #define SPI_CTAR_CPHA(x) ((x) << 25) |
53 | #define SPI_CTAR_LSBFE(x) ((x) << 24) | 55 | #define SPI_CTAR_LSBFE(x) ((x) << 24) |
54 | #define SPI_CTAR_PCSSCR(x) (((x) & 0x00000003) << 22) | 56 | #define SPI_CTAR_PCSSCK(x) (((x) & 0x00000003) << 22) |
55 | #define SPI_CTAR_PASC(x) (((x) & 0x00000003) << 20) | 57 | #define SPI_CTAR_PASC(x) (((x) & 0x00000003) << 20) |
56 | #define SPI_CTAR_PDT(x) (((x) & 0x00000003) << 18) | 58 | #define SPI_CTAR_PDT(x) (((x) & 0x00000003) << 18) |
57 | #define SPI_CTAR_PBR(x) (((x) & 0x00000003) << 16) | 59 | #define SPI_CTAR_PBR(x) (((x) & 0x00000003) << 16) |
@@ -59,6 +61,7 @@ | |||
59 | #define SPI_CTAR_ASC(x) (((x) & 0x0000000f) << 8) | 61 | #define SPI_CTAR_ASC(x) (((x) & 0x0000000f) << 8) |
60 | #define SPI_CTAR_DT(x) (((x) & 0x0000000f) << 4) | 62 | #define SPI_CTAR_DT(x) (((x) & 0x0000000f) << 4) |
61 | #define SPI_CTAR_BR(x) ((x) & 0x0000000f) | 63 | #define SPI_CTAR_BR(x) ((x) & 0x0000000f) |
64 | #define SPI_CTAR_SCALE_BITS 0xf | ||
62 | 65 | ||
63 | #define SPI_CTAR0_SLAVE 0x0c | 66 | #define SPI_CTAR0_SLAVE 0x0c |
64 | 67 | ||
@@ -148,23 +151,66 @@ static void hz_to_spi_baud(char *pbr, char *br, int speed_hz, | |||
148 | 16, 32, 64, 128, | 151 | 16, 32, 64, 128, |
149 | 256, 512, 1024, 2048, | 152 | 256, 512, 1024, 2048, |
150 | 4096, 8192, 16384, 32768 }; | 153 | 4096, 8192, 16384, 32768 }; |
151 | int temp, i = 0, j = 0; | 154 | int scale_needed, scale, minscale = INT_MAX; |
155 | int i, j; | ||
156 | |||
157 | scale_needed = clkrate / speed_hz; | ||
158 | if (clkrate % speed_hz) | ||
159 | scale_needed++; | ||
160 | |||
161 | for (i = 0; i < ARRAY_SIZE(brs); i++) | ||
162 | for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) { | ||
163 | scale = brs[i] * pbr_tbl[j]; | ||
164 | if (scale >= scale_needed) { | ||
165 | if (scale < minscale) { | ||
166 | minscale = scale; | ||
167 | *br = i; | ||
168 | *pbr = j; | ||
169 | } | ||
170 | break; | ||
171 | } | ||
172 | } | ||
152 | 173 | ||
153 | temp = clkrate / 2 / speed_hz; | 174 | if (minscale == INT_MAX) { |
175 | pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n", | ||
176 | speed_hz, clkrate); | ||
177 | *pbr = ARRAY_SIZE(pbr_tbl) - 1; | ||
178 | *br = ARRAY_SIZE(brs) - 1; | ||
179 | } | ||
180 | } | ||
154 | 181 | ||
155 | for (i = 0; i < ARRAY_SIZE(pbr_tbl); i++) | 182 | static void ns_delay_scale(char *psc, char *sc, int delay_ns, |
156 | for (j = 0; j < ARRAY_SIZE(brs); j++) { | 183 | unsigned long clkrate) |
157 | if (pbr_tbl[i] * brs[j] >= temp) { | 184 | { |
158 | *pbr = i; | 185 | int pscale_tbl[4] = {1, 3, 5, 7}; |
159 | *br = j; | 186 | int scale_needed, scale, minscale = INT_MAX; |
160 | return; | 187 | int i, j; |
188 | u32 remainder; | ||
189 | |||
190 | scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC, | ||
191 | &remainder); | ||
192 | if (remainder) | ||
193 | scale_needed++; | ||
194 | |||
195 | for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++) | ||
196 | for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) { | ||
197 | scale = pscale_tbl[i] * (2 << j); | ||
198 | if (scale >= scale_needed) { | ||
199 | if (scale < minscale) { | ||
200 | minscale = scale; | ||
201 | *psc = i; | ||
202 | *sc = j; | ||
203 | } | ||
204 | break; | ||
161 | } | 205 | } |
162 | } | 206 | } |
163 | 207 | ||
164 | pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld\ | 208 | if (minscale == INT_MAX) { |
165 | ,we use the max prescaler value.\n", speed_hz, clkrate); | 209 | pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value", |
166 | *pbr = ARRAY_SIZE(pbr_tbl) - 1; | 210 | delay_ns, clkrate); |
167 | *br = ARRAY_SIZE(brs) - 1; | 211 | *psc = ARRAY_SIZE(pscale_tbl) - 1; |
212 | *sc = SPI_CTAR_SCALE_BITS; | ||
213 | } | ||
168 | } | 214 | } |
169 | 215 | ||
170 | static int dspi_transfer_write(struct fsl_dspi *dspi) | 216 | static int dspi_transfer_write(struct fsl_dspi *dspi) |
@@ -345,7 +391,10 @@ static int dspi_setup(struct spi_device *spi) | |||
345 | { | 391 | { |
346 | struct chip_data *chip; | 392 | struct chip_data *chip; |
347 | struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); | 393 | struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); |
348 | unsigned char br = 0, pbr = 0, fmsz = 0; | 394 | u32 cs_sck_delay = 0, sck_cs_delay = 0; |
395 | unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0; | ||
396 | unsigned char pasc = 0, asc = 0, fmsz = 0; | ||
397 | unsigned long clkrate; | ||
349 | 398 | ||
350 | if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) { | 399 | if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) { |
351 | fmsz = spi->bits_per_word - 1; | 400 | fmsz = spi->bits_per_word - 1; |
@@ -362,18 +411,34 @@ static int dspi_setup(struct spi_device *spi) | |||
362 | return -ENOMEM; | 411 | return -ENOMEM; |
363 | } | 412 | } |
364 | 413 | ||
414 | of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay", | ||
415 | &cs_sck_delay); | ||
416 | |||
417 | of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay", | ||
418 | &sck_cs_delay); | ||
419 | |||
365 | chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS | | 420 | chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS | |
366 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF; | 421 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF; |
367 | 422 | ||
368 | chip->void_write_data = 0; | 423 | chip->void_write_data = 0; |
369 | 424 | ||
370 | hz_to_spi_baud(&pbr, &br, | 425 | clkrate = clk_get_rate(dspi->clk); |
371 | spi->max_speed_hz, clk_get_rate(dspi->clk)); | 426 | hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate); |
427 | |||
428 | /* Set PCS to SCK delay scale values */ | ||
429 | ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate); | ||
430 | |||
431 | /* Set After SCK delay scale values */ | ||
432 | ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate); | ||
372 | 433 | ||
373 | chip->ctar_val = SPI_CTAR_FMSZ(fmsz) | 434 | chip->ctar_val = SPI_CTAR_FMSZ(fmsz) |
374 | | SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0) | 435 | | SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0) |
375 | | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0) | 436 | | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0) |
376 | | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0) | 437 | | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0) |
438 | | SPI_CTAR_PCSSCK(pcssck) | ||
439 | | SPI_CTAR_CSSCK(cssck) | ||
440 | | SPI_CTAR_PASC(pasc) | ||
441 | | SPI_CTAR_ASC(asc) | ||
377 | | SPI_CTAR_PBR(pbr) | 442 | | SPI_CTAR_PBR(pbr) |
378 | | SPI_CTAR_BR(br); | 443 | | SPI_CTAR_BR(br); |
379 | 444 | ||
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index e649bc7d4c08..788e2b176a4f 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/clk.h> | 12 | #include <linux/clk.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/dmaengine.h> | 14 | #include <linux/dmaengine.h> |
15 | #include <linux/gpio.h> | ||
15 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
16 | #include <linux/io.h> | 17 | #include <linux/io.h> |
17 | #include <linux/irq.h> | 18 | #include <linux/irq.h> |
@@ -122,36 +123,31 @@ static inline void spfi_start(struct img_spfi *spfi) | |||
122 | spfi_writel(spfi, val, SPFI_CONTROL); | 123 | spfi_writel(spfi, val, SPFI_CONTROL); |
123 | } | 124 | } |
124 | 125 | ||
125 | static inline void spfi_stop(struct img_spfi *spfi) | ||
126 | { | ||
127 | u32 val; | ||
128 | |||
129 | val = spfi_readl(spfi, SPFI_CONTROL); | ||
130 | val &= ~SPFI_CONTROL_SPFI_EN; | ||
131 | spfi_writel(spfi, val, SPFI_CONTROL); | ||
132 | } | ||
133 | |||
134 | static inline void spfi_reset(struct img_spfi *spfi) | 126 | static inline void spfi_reset(struct img_spfi *spfi) |
135 | { | 127 | { |
136 | spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL); | 128 | spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL); |
137 | udelay(1); | ||
138 | spfi_writel(spfi, 0, SPFI_CONTROL); | 129 | spfi_writel(spfi, 0, SPFI_CONTROL); |
139 | } | 130 | } |
140 | 131 | ||
141 | static void spfi_flush_tx_fifo(struct img_spfi *spfi) | 132 | static int spfi_wait_all_done(struct img_spfi *spfi) |
142 | { | 133 | { |
143 | unsigned long timeout = jiffies + msecs_to_jiffies(10); | 134 | unsigned long timeout = jiffies + msecs_to_jiffies(50); |
144 | 135 | ||
145 | spfi_writel(spfi, SPFI_INTERRUPT_SDE, SPFI_INTERRUPT_CLEAR); | ||
146 | while (time_before(jiffies, timeout)) { | 136 | while (time_before(jiffies, timeout)) { |
147 | if (spfi_readl(spfi, SPFI_INTERRUPT_STATUS) & | 137 | u32 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); |
148 | SPFI_INTERRUPT_SDE) | 138 | |
149 | return; | 139 | if (status & SPFI_INTERRUPT_ALLDONETRIG) { |
140 | spfi_writel(spfi, SPFI_INTERRUPT_ALLDONETRIG, | ||
141 | SPFI_INTERRUPT_CLEAR); | ||
142 | return 0; | ||
143 | } | ||
150 | cpu_relax(); | 144 | cpu_relax(); |
151 | } | 145 | } |
152 | 146 | ||
153 | dev_err(spfi->dev, "Timed out waiting for FIFO to drain\n"); | 147 | dev_err(spfi->dev, "Timed out waiting for transaction to complete\n"); |
154 | spfi_reset(spfi); | 148 | spfi_reset(spfi); |
149 | |||
150 | return -ETIMEDOUT; | ||
155 | } | 151 | } |
156 | 152 | ||
157 | static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf, | 153 | static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf, |
@@ -237,6 +233,7 @@ static int img_spfi_start_pio(struct spi_master *master, | |||
237 | const void *tx_buf = xfer->tx_buf; | 233 | const void *tx_buf = xfer->tx_buf; |
238 | void *rx_buf = xfer->rx_buf; | 234 | void *rx_buf = xfer->rx_buf; |
239 | unsigned long timeout; | 235 | unsigned long timeout; |
236 | int ret; | ||
240 | 237 | ||
241 | if (tx_buf) | 238 | if (tx_buf) |
242 | tx_bytes = xfer->len; | 239 | tx_bytes = xfer->len; |
@@ -269,16 +266,15 @@ static int img_spfi_start_pio(struct spi_master *master, | |||
269 | cpu_relax(); | 266 | cpu_relax(); |
270 | } | 267 | } |
271 | 268 | ||
269 | ret = spfi_wait_all_done(spfi); | ||
270 | if (ret < 0) | ||
271 | return ret; | ||
272 | |||
272 | if (rx_bytes > 0 || tx_bytes > 0) { | 273 | if (rx_bytes > 0 || tx_bytes > 0) { |
273 | dev_err(spfi->dev, "PIO transfer timed out\n"); | 274 | dev_err(spfi->dev, "PIO transfer timed out\n"); |
274 | spfi_reset(spfi); | ||
275 | return -ETIMEDOUT; | 275 | return -ETIMEDOUT; |
276 | } | 276 | } |
277 | 277 | ||
278 | if (tx_buf) | ||
279 | spfi_flush_tx_fifo(spfi); | ||
280 | spfi_stop(spfi); | ||
281 | |||
282 | return 0; | 278 | return 0; |
283 | } | 279 | } |
284 | 280 | ||
@@ -287,14 +283,12 @@ static void img_spfi_dma_rx_cb(void *data) | |||
287 | struct img_spfi *spfi = data; | 283 | struct img_spfi *spfi = data; |
288 | unsigned long flags; | 284 | unsigned long flags; |
289 | 285 | ||
290 | spin_lock_irqsave(&spfi->lock, flags); | 286 | spfi_wait_all_done(spfi); |
291 | 287 | ||
288 | spin_lock_irqsave(&spfi->lock, flags); | ||
292 | spfi->rx_dma_busy = false; | 289 | spfi->rx_dma_busy = false; |
293 | if (!spfi->tx_dma_busy) { | 290 | if (!spfi->tx_dma_busy) |
294 | spfi_stop(spfi); | ||
295 | spi_finalize_current_transfer(spfi->master); | 291 | spi_finalize_current_transfer(spfi->master); |
296 | } | ||
297 | |||
298 | spin_unlock_irqrestore(&spfi->lock, flags); | 292 | spin_unlock_irqrestore(&spfi->lock, flags); |
299 | } | 293 | } |
300 | 294 | ||
@@ -303,16 +297,12 @@ static void img_spfi_dma_tx_cb(void *data) | |||
303 | struct img_spfi *spfi = data; | 297 | struct img_spfi *spfi = data; |
304 | unsigned long flags; | 298 | unsigned long flags; |
305 | 299 | ||
306 | spfi_flush_tx_fifo(spfi); | 300 | spfi_wait_all_done(spfi); |
307 | 301 | ||
308 | spin_lock_irqsave(&spfi->lock, flags); | 302 | spin_lock_irqsave(&spfi->lock, flags); |
309 | |||
310 | spfi->tx_dma_busy = false; | 303 | spfi->tx_dma_busy = false; |
311 | if (!spfi->rx_dma_busy) { | 304 | if (!spfi->rx_dma_busy) |
312 | spfi_stop(spfi); | ||
313 | spi_finalize_current_transfer(spfi->master); | 305 | spi_finalize_current_transfer(spfi->master); |
314 | } | ||
315 | |||
316 | spin_unlock_irqrestore(&spfi->lock, flags); | 306 | spin_unlock_irqrestore(&spfi->lock, flags); |
317 | } | 307 | } |
318 | 308 | ||
@@ -397,6 +387,75 @@ stop_dma: | |||
397 | return -EIO; | 387 | return -EIO; |
398 | } | 388 | } |
399 | 389 | ||
390 | static void img_spfi_handle_err(struct spi_master *master, | ||
391 | struct spi_message *msg) | ||
392 | { | ||
393 | struct img_spfi *spfi = spi_master_get_devdata(master); | ||
394 | unsigned long flags; | ||
395 | |||
396 | /* | ||
397 | * Stop all DMA and reset the controller if the previous transaction | ||
398 | * timed-out and never completed it's DMA. | ||
399 | */ | ||
400 | spin_lock_irqsave(&spfi->lock, flags); | ||
401 | if (spfi->tx_dma_busy || spfi->rx_dma_busy) { | ||
402 | spfi->tx_dma_busy = false; | ||
403 | spfi->rx_dma_busy = false; | ||
404 | |||
405 | dmaengine_terminate_all(spfi->tx_ch); | ||
406 | dmaengine_terminate_all(spfi->rx_ch); | ||
407 | } | ||
408 | spin_unlock_irqrestore(&spfi->lock, flags); | ||
409 | } | ||
410 | |||
411 | static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg) | ||
412 | { | ||
413 | struct img_spfi *spfi = spi_master_get_devdata(master); | ||
414 | u32 val; | ||
415 | |||
416 | val = spfi_readl(spfi, SPFI_PORT_STATE); | ||
417 | if (msg->spi->mode & SPI_CPHA) | ||
418 | val |= SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select); | ||
419 | else | ||
420 | val &= ~SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select); | ||
421 | if (msg->spi->mode & SPI_CPOL) | ||
422 | val |= SPFI_PORT_STATE_CK_POL(msg->spi->chip_select); | ||
423 | else | ||
424 | val &= ~SPFI_PORT_STATE_CK_POL(msg->spi->chip_select); | ||
425 | spfi_writel(spfi, val, SPFI_PORT_STATE); | ||
426 | |||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | static int img_spfi_unprepare(struct spi_master *master, | ||
431 | struct spi_message *msg) | ||
432 | { | ||
433 | struct img_spfi *spfi = spi_master_get_devdata(master); | ||
434 | |||
435 | spfi_reset(spfi); | ||
436 | |||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | static int img_spfi_setup(struct spi_device *spi) | ||
441 | { | ||
442 | int ret; | ||
443 | |||
444 | ret = gpio_request_one(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ? | ||
445 | GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH, | ||
446 | dev_name(&spi->dev)); | ||
447 | if (ret) | ||
448 | dev_err(&spi->dev, "can't request chipselect gpio %d\n", | ||
449 | spi->cs_gpio); | ||
450 | |||
451 | return ret; | ||
452 | } | ||
453 | |||
454 | static void img_spfi_cleanup(struct spi_device *spi) | ||
455 | { | ||
456 | gpio_free(spi->cs_gpio); | ||
457 | } | ||
458 | |||
400 | static void img_spfi_config(struct spi_master *master, struct spi_device *spi, | 459 | static void img_spfi_config(struct spi_master *master, struct spi_device *spi, |
401 | struct spi_transfer *xfer) | 460 | struct spi_transfer *xfer) |
402 | { | 461 | { |
@@ -405,10 +464,10 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi, | |||
405 | 464 | ||
406 | /* | 465 | /* |
407 | * output = spfi_clk * (BITCLK / 512), where BITCLK must be a | 466 | * output = spfi_clk * (BITCLK / 512), where BITCLK must be a |
408 | * power of 2 up to 256 (where 255 == 256 since BITCLK is 8 bits) | 467 | * power of 2 up to 128 |
409 | */ | 468 | */ |
410 | div = DIV_ROUND_UP(master->max_speed_hz, xfer->speed_hz); | 469 | div = DIV_ROUND_UP(clk_get_rate(spfi->spfi_clk), xfer->speed_hz); |
411 | div = clamp(512 / (1 << get_count_order(div)), 1, 255); | 470 | div = clamp(512 / (1 << get_count_order(div)), 1, 128); |
412 | 471 | ||
413 | val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select)); | 472 | val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select)); |
414 | val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK << | 473 | val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK << |
@@ -416,6 +475,9 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi, | |||
416 | val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT; | 475 | val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT; |
417 | spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select)); | 476 | spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select)); |
418 | 477 | ||
478 | spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT, | ||
479 | SPFI_TRANSACTION); | ||
480 | |||
419 | val = spfi_readl(spfi, SPFI_CONTROL); | 481 | val = spfi_readl(spfi, SPFI_CONTROL); |
420 | val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA); | 482 | val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA); |
421 | if (xfer->tx_buf) | 483 | if (xfer->tx_buf) |
@@ -429,25 +491,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi, | |||
429 | else if (xfer->tx_nbits == SPI_NBITS_QUAD && | 491 | else if (xfer->tx_nbits == SPI_NBITS_QUAD && |
430 | xfer->rx_nbits == SPI_NBITS_QUAD) | 492 | xfer->rx_nbits == SPI_NBITS_QUAD) |
431 | val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; | 493 | val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; |
432 | val &= ~SPFI_CONTROL_CONTINUE; | ||
433 | if (!xfer->cs_change && !list_is_last(&xfer->transfer_list, | ||
434 | &master->cur_msg->transfers)) | ||
435 | val |= SPFI_CONTROL_CONTINUE; | ||
436 | spfi_writel(spfi, val, SPFI_CONTROL); | 494 | spfi_writel(spfi, val, SPFI_CONTROL); |
437 | |||
438 | val = spfi_readl(spfi, SPFI_PORT_STATE); | ||
439 | if (spi->mode & SPI_CPHA) | ||
440 | val |= SPFI_PORT_STATE_CK_PHASE(spi->chip_select); | ||
441 | else | ||
442 | val &= ~SPFI_PORT_STATE_CK_PHASE(spi->chip_select); | ||
443 | if (spi->mode & SPI_CPOL) | ||
444 | val |= SPFI_PORT_STATE_CK_POL(spi->chip_select); | ||
445 | else | ||
446 | val &= ~SPFI_PORT_STATE_CK_POL(spi->chip_select); | ||
447 | spfi_writel(spfi, val, SPFI_PORT_STATE); | ||
448 | |||
449 | spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT, | ||
450 | SPFI_TRANSACTION); | ||
451 | } | 495 | } |
452 | 496 | ||
453 | static int img_spfi_transfer_one(struct spi_master *master, | 497 | static int img_spfi_transfer_one(struct spi_master *master, |
@@ -455,8 +499,6 @@ static int img_spfi_transfer_one(struct spi_master *master, | |||
455 | struct spi_transfer *xfer) | 499 | struct spi_transfer *xfer) |
456 | { | 500 | { |
457 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); | 501 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); |
458 | bool dma_reset = false; | ||
459 | unsigned long flags; | ||
460 | int ret; | 502 | int ret; |
461 | 503 | ||
462 | if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) { | 504 | if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) { |
@@ -466,23 +508,6 @@ static int img_spfi_transfer_one(struct spi_master *master, | |||
466 | return -EINVAL; | 508 | return -EINVAL; |
467 | } | 509 | } |
468 | 510 | ||
469 | /* | ||
470 | * Stop all DMA and reset the controller if the previous transaction | ||
471 | * timed-out and never completed it's DMA. | ||
472 | */ | ||
473 | spin_lock_irqsave(&spfi->lock, flags); | ||
474 | if (spfi->tx_dma_busy || spfi->rx_dma_busy) { | ||
475 | dev_err(spfi->dev, "SPI DMA still busy\n"); | ||
476 | dma_reset = true; | ||
477 | } | ||
478 | spin_unlock_irqrestore(&spfi->lock, flags); | ||
479 | |||
480 | if (dma_reset) { | ||
481 | dmaengine_terminate_all(spfi->tx_ch); | ||
482 | dmaengine_terminate_all(spfi->rx_ch); | ||
483 | spfi_reset(spfi); | ||
484 | } | ||
485 | |||
486 | img_spfi_config(master, spi, xfer); | 511 | img_spfi_config(master, spi, xfer); |
487 | if (master->can_dma && master->can_dma(master, spi, xfer)) | 512 | if (master->can_dma && master->can_dma(master, spi, xfer)) |
488 | ret = img_spfi_start_dma(master, spi, xfer); | 513 | ret = img_spfi_start_dma(master, spi, xfer); |
@@ -492,17 +517,6 @@ static int img_spfi_transfer_one(struct spi_master *master, | |||
492 | return ret; | 517 | return ret; |
493 | } | 518 | } |
494 | 519 | ||
495 | static void img_spfi_set_cs(struct spi_device *spi, bool enable) | ||
496 | { | ||
497 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); | ||
498 | u32 val; | ||
499 | |||
500 | val = spfi_readl(spfi, SPFI_PORT_STATE); | ||
501 | val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK << SPFI_PORT_STATE_DEV_SEL_SHIFT); | ||
502 | val |= spi->chip_select << SPFI_PORT_STATE_DEV_SEL_SHIFT; | ||
503 | spfi_writel(spfi, val, SPFI_PORT_STATE); | ||
504 | } | ||
505 | |||
506 | static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi, | 520 | static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi, |
507 | struct spi_transfer *xfer) | 521 | struct spi_transfer *xfer) |
508 | { | 522 | { |
@@ -591,14 +605,17 @@ static int img_spfi_probe(struct platform_device *pdev) | |||
591 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL; | 605 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL; |
592 | if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode")) | 606 | if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode")) |
593 | master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD; | 607 | master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD; |
594 | master->num_chipselect = 5; | ||
595 | master->dev.of_node = pdev->dev.of_node; | 608 | master->dev.of_node = pdev->dev.of_node; |
596 | master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8); | 609 | master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8); |
597 | master->max_speed_hz = clk_get_rate(spfi->spfi_clk); | 610 | master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4; |
598 | master->min_speed_hz = master->max_speed_hz / 512; | 611 | master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512; |
599 | 612 | ||
600 | master->set_cs = img_spfi_set_cs; | 613 | master->setup = img_spfi_setup; |
614 | master->cleanup = img_spfi_cleanup; | ||
601 | master->transfer_one = img_spfi_transfer_one; | 615 | master->transfer_one = img_spfi_transfer_one; |
616 | master->prepare_message = img_spfi_prepare; | ||
617 | master->unprepare_message = img_spfi_unprepare; | ||
618 | master->handle_err = img_spfi_handle_err; | ||
602 | 619 | ||
603 | spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx"); | 620 | spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx"); |
604 | spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx"); | 621 | spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx"); |
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 6fea4af51c41..f08e812b2984 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c | |||
@@ -370,8 +370,6 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, | |||
370 | if (spi_imx->dma_is_inited) { | 370 | if (spi_imx->dma_is_inited) { |
371 | dma = readl(spi_imx->base + MX51_ECSPI_DMA); | 371 | dma = readl(spi_imx->base + MX51_ECSPI_DMA); |
372 | 372 | ||
373 | spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2; | ||
374 | spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2; | ||
375 | spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2; | 373 | spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2; |
376 | rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET; | 374 | rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET; |
377 | tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET; | 375 | tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET; |
@@ -868,6 +866,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, | |||
868 | master->max_dma_len = MAX_SDMA_BD_BYTES; | 866 | master->max_dma_len = MAX_SDMA_BD_BYTES; |
869 | spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX | | 867 | spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX | |
870 | SPI_MASTER_MUST_TX; | 868 | SPI_MASTER_MUST_TX; |
869 | spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2; | ||
870 | spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2; | ||
871 | spi_imx->dma_is_inited = 1; | 871 | spi_imx->dma_is_inited = 1; |
872 | 872 | ||
873 | return 0; | 873 | return 0; |
@@ -903,7 +903,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, | |||
903 | 903 | ||
904 | if (tx) { | 904 | if (tx) { |
905 | desc_tx = dmaengine_prep_slave_sg(master->dma_tx, | 905 | desc_tx = dmaengine_prep_slave_sg(master->dma_tx, |
906 | tx->sgl, tx->nents, DMA_TO_DEVICE, | 906 | tx->sgl, tx->nents, DMA_MEM_TO_DEV, |
907 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 907 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
908 | if (!desc_tx) | 908 | if (!desc_tx) |
909 | goto no_dma; | 909 | goto no_dma; |
@@ -915,7 +915,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, | |||
915 | 915 | ||
916 | if (rx) { | 916 | if (rx) { |
917 | desc_rx = dmaengine_prep_slave_sg(master->dma_rx, | 917 | desc_rx = dmaengine_prep_slave_sg(master->dma_rx, |
918 | rx->sgl, rx->nents, DMA_FROM_DEVICE, | 918 | rx->sgl, rx->nents, DMA_DEV_TO_MEM, |
919 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 919 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
920 | if (!desc_rx) | 920 | if (!desc_rx) |
921 | goto no_dma; | 921 | goto no_dma; |
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c index ecae0d4e2945..965d2bdcfdcc 100644 --- a/drivers/spi/spi-mpc512x-psc.c +++ b/drivers/spi/spi-mpc512x-psc.c | |||
@@ -588,7 +588,7 @@ static int mpc512x_psc_spi_of_remove(struct platform_device *op) | |||
588 | return mpc512x_psc_spi_do_remove(&op->dev); | 588 | return mpc512x_psc_spi_do_remove(&op->dev); |
589 | } | 589 | } |
590 | 590 | ||
591 | static struct of_device_id mpc512x_psc_spi_of_match[] = { | 591 | static const struct of_device_id mpc512x_psc_spi_of_match[] = { |
592 | { .compatible = "fsl,mpc5121-psc-spi", }, | 592 | { .compatible = "fsl,mpc5121-psc-spi", }, |
593 | {}, | 593 | {}, |
594 | }; | 594 | }; |
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c index b283d537d16a..e99d6a93d394 100644 --- a/drivers/spi/spi-octeon.c +++ b/drivers/spi/spi-octeon.c | |||
@@ -238,7 +238,7 @@ static int octeon_spi_remove(struct platform_device *pdev) | |||
238 | return 0; | 238 | return 0; |
239 | } | 239 | } |
240 | 240 | ||
241 | static struct of_device_id octeon_spi_match[] = { | 241 | static const struct of_device_id octeon_spi_match[] = { |
242 | { .compatible = "cavium,octeon-3010-spi", }, | 242 | { .compatible = "cavium,octeon-3010-spi", }, |
243 | {}, | 243 | {}, |
244 | }; | 244 | }; |
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c index d890d309dff9..35b332dacb13 100644 --- a/drivers/spi/spi-omap-100k.c +++ b/drivers/spi/spi-omap-100k.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
27 | #include <linux/pm_runtime.h> | ||
27 | #include <linux/err.h> | 28 | #include <linux/err.h> |
28 | #include <linux/clk.h> | 29 | #include <linux/clk.h> |
29 | #include <linux/io.h> | 30 | #include <linux/io.h> |
@@ -294,16 +295,6 @@ static int omap1_spi100k_setup(struct spi_device *spi) | |||
294 | return ret; | 295 | return ret; |
295 | } | 296 | } |
296 | 297 | ||
297 | static int omap1_spi100k_prepare_hardware(struct spi_master *master) | ||
298 | { | ||
299 | struct omap1_spi100k *spi100k = spi_master_get_devdata(master); | ||
300 | |||
301 | clk_prepare_enable(spi100k->ick); | ||
302 | clk_prepare_enable(spi100k->fck); | ||
303 | |||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | static int omap1_spi100k_transfer_one_message(struct spi_master *master, | 298 | static int omap1_spi100k_transfer_one_message(struct spi_master *master, |
308 | struct spi_message *m) | 299 | struct spi_message *m) |
309 | { | 300 | { |
@@ -372,16 +363,6 @@ static int omap1_spi100k_transfer_one_message(struct spi_master *master, | |||
372 | return status; | 363 | return status; |
373 | } | 364 | } |
374 | 365 | ||
375 | static int omap1_spi100k_unprepare_hardware(struct spi_master *master) | ||
376 | { | ||
377 | struct omap1_spi100k *spi100k = spi_master_get_devdata(master); | ||
378 | |||
379 | clk_disable_unprepare(spi100k->ick); | ||
380 | clk_disable_unprepare(spi100k->fck); | ||
381 | |||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | static int omap1_spi100k_probe(struct platform_device *pdev) | 366 | static int omap1_spi100k_probe(struct platform_device *pdev) |
386 | { | 367 | { |
387 | struct spi_master *master; | 368 | struct spi_master *master; |
@@ -402,14 +383,12 @@ static int omap1_spi100k_probe(struct platform_device *pdev) | |||
402 | 383 | ||
403 | master->setup = omap1_spi100k_setup; | 384 | master->setup = omap1_spi100k_setup; |
404 | master->transfer_one_message = omap1_spi100k_transfer_one_message; | 385 | master->transfer_one_message = omap1_spi100k_transfer_one_message; |
405 | master->prepare_transfer_hardware = omap1_spi100k_prepare_hardware; | ||
406 | master->unprepare_transfer_hardware = omap1_spi100k_unprepare_hardware; | ||
407 | master->cleanup = NULL; | ||
408 | master->num_chipselect = 2; | 386 | master->num_chipselect = 2; |
409 | master->mode_bits = MODEBITS; | 387 | master->mode_bits = MODEBITS; |
410 | master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); | 388 | master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); |
411 | master->min_speed_hz = OMAP1_SPI100K_MAX_FREQ/(1<<16); | 389 | master->min_speed_hz = OMAP1_SPI100K_MAX_FREQ/(1<<16); |
412 | master->max_speed_hz = OMAP1_SPI100K_MAX_FREQ; | 390 | master->max_speed_hz = OMAP1_SPI100K_MAX_FREQ; |
391 | master->auto_runtime_pm = true; | ||
413 | 392 | ||
414 | spi100k = spi_master_get_devdata(master); | 393 | spi100k = spi_master_get_devdata(master); |
415 | 394 | ||
@@ -434,22 +413,96 @@ static int omap1_spi100k_probe(struct platform_device *pdev) | |||
434 | goto err; | 413 | goto err; |
435 | } | 414 | } |
436 | 415 | ||
416 | status = clk_prepare_enable(spi100k->ick); | ||
417 | if (status != 0) { | ||
418 | dev_err(&pdev->dev, "failed to enable ick: %d\n", status); | ||
419 | goto err; | ||
420 | } | ||
421 | |||
422 | status = clk_prepare_enable(spi100k->fck); | ||
423 | if (status != 0) { | ||
424 | dev_err(&pdev->dev, "failed to enable fck: %d\n", status); | ||
425 | goto err_ick; | ||
426 | } | ||
427 | |||
428 | pm_runtime_enable(&pdev->dev); | ||
429 | pm_runtime_set_active(&pdev->dev); | ||
430 | |||
437 | status = devm_spi_register_master(&pdev->dev, master); | 431 | status = devm_spi_register_master(&pdev->dev, master); |
438 | if (status < 0) | 432 | if (status < 0) |
439 | goto err; | 433 | goto err_fck; |
440 | 434 | ||
441 | return status; | 435 | return status; |
442 | 436 | ||
437 | err_fck: | ||
438 | clk_disable_unprepare(spi100k->fck); | ||
439 | err_ick: | ||
440 | clk_disable_unprepare(spi100k->ick); | ||
443 | err: | 441 | err: |
444 | spi_master_put(master); | 442 | spi_master_put(master); |
445 | return status; | 443 | return status; |
446 | } | 444 | } |
447 | 445 | ||
446 | static int omap1_spi100k_remove(struct platform_device *pdev) | ||
447 | { | ||
448 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); | ||
449 | struct omap1_spi100k *spi100k = spi_master_get_devdata(master); | ||
450 | |||
451 | pm_runtime_disable(&pdev->dev); | ||
452 | |||
453 | clk_disable_unprepare(spi100k->fck); | ||
454 | clk_disable_unprepare(spi100k->ick); | ||
455 | |||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | #ifdef CONFIG_PM | ||
460 | static int omap1_spi100k_runtime_suspend(struct device *dev) | ||
461 | { | ||
462 | struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); | ||
463 | struct omap1_spi100k *spi100k = spi_master_get_devdata(master); | ||
464 | |||
465 | clk_disable_unprepare(spi100k->ick); | ||
466 | clk_disable_unprepare(spi100k->fck); | ||
467 | |||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | static int omap1_spi100k_runtime_resume(struct device *dev) | ||
472 | { | ||
473 | struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); | ||
474 | struct omap1_spi100k *spi100k = spi_master_get_devdata(master); | ||
475 | int ret; | ||
476 | |||
477 | ret = clk_prepare_enable(spi100k->ick); | ||
478 | if (ret != 0) { | ||
479 | dev_err(dev, "Failed to enable ick: %d\n", ret); | ||
480 | return ret; | ||
481 | } | ||
482 | |||
483 | ret = clk_prepare_enable(spi100k->fck); | ||
484 | if (ret != 0) { | ||
485 | dev_err(dev, "Failed to enable fck: %d\n", ret); | ||
486 | clk_disable_unprepare(spi100k->ick); | ||
487 | return ret; | ||
488 | } | ||
489 | |||
490 | return 0; | ||
491 | } | ||
492 | #endif | ||
493 | |||
494 | static const struct dev_pm_ops omap1_spi100k_pm = { | ||
495 | SET_RUNTIME_PM_OPS(omap1_spi100k_runtime_suspend, | ||
496 | omap1_spi100k_runtime_resume, NULL) | ||
497 | }; | ||
498 | |||
448 | static struct platform_driver omap1_spi100k_driver = { | 499 | static struct platform_driver omap1_spi100k_driver = { |
449 | .driver = { | 500 | .driver = { |
450 | .name = "omap1_spi100k", | 501 | .name = "omap1_spi100k", |
502 | .pm = &omap1_spi100k_pm, | ||
451 | }, | 503 | }, |
452 | .probe = omap1_spi100k_probe, | 504 | .probe = omap1_spi100k_probe, |
505 | .remove = omap1_spi100k_remove, | ||
453 | }; | 506 | }; |
454 | 507 | ||
455 | module_platform_driver(omap1_spi100k_driver); | 508 | module_platform_driver(omap1_spi100k_driver); |
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c index 3c0844457c07..55576db31549 100644 --- a/drivers/spi/spi-omap-uwire.c +++ b/drivers/spi/spi-omap-uwire.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <linux/module.h> | 44 | #include <linux/module.h> |
45 | #include <linux/io.h> | 45 | #include <linux/io.h> |
46 | 46 | ||
47 | #include <asm/irq.h> | ||
48 | #include <mach/hardware.h> | 47 | #include <mach/hardware.h> |
49 | #include <asm/mach-types.h> | 48 | #include <asm/mach-types.h> |
50 | 49 | ||
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index ee513a85296b..94af80676684 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c | |||
@@ -285,7 +285,12 @@ | |||
285 | */ | 285 | */ |
286 | #define DEFAULT_SSP_REG_IMSC 0x0UL | 286 | #define DEFAULT_SSP_REG_IMSC 0x0UL |
287 | #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC | 287 | #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC |
288 | #define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC) | 288 | #define ENABLE_ALL_INTERRUPTS ( \ |
289 | SSP_IMSC_MASK_RORIM | \ | ||
290 | SSP_IMSC_MASK_RTIM | \ | ||
291 | SSP_IMSC_MASK_RXIM | \ | ||
292 | SSP_IMSC_MASK_TXIM \ | ||
293 | ) | ||
289 | 294 | ||
290 | #define CLEAR_ALL_INTERRUPTS 0x3 | 295 | #define CLEAR_ALL_INTERRUPTS 0x3 |
291 | 296 | ||
@@ -1251,7 +1256,6 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) | |||
1251 | struct pl022 *pl022 = dev_id; | 1256 | struct pl022 *pl022 = dev_id; |
1252 | struct spi_message *msg = pl022->cur_msg; | 1257 | struct spi_message *msg = pl022->cur_msg; |
1253 | u16 irq_status = 0; | 1258 | u16 irq_status = 0; |
1254 | u16 flag = 0; | ||
1255 | 1259 | ||
1256 | if (unlikely(!msg)) { | 1260 | if (unlikely(!msg)) { |
1257 | dev_err(&pl022->adev->dev, | 1261 | dev_err(&pl022->adev->dev, |
@@ -1280,9 +1284,6 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) | |||
1280 | if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) | 1284 | if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) |
1281 | dev_err(&pl022->adev->dev, | 1285 | dev_err(&pl022->adev->dev, |
1282 | "RXFIFO is full\n"); | 1286 | "RXFIFO is full\n"); |
1283 | if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) | ||
1284 | dev_err(&pl022->adev->dev, | ||
1285 | "TXFIFO is full\n"); | ||
1286 | 1287 | ||
1287 | /* | 1288 | /* |
1288 | * Disable and clear interrupts, disable SSP, | 1289 | * Disable and clear interrupts, disable SSP, |
@@ -1303,8 +1304,7 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) | |||
1303 | 1304 | ||
1304 | readwriter(pl022); | 1305 | readwriter(pl022); |
1305 | 1306 | ||
1306 | if ((pl022->tx == pl022->tx_end) && (flag == 0)) { | 1307 | if (pl022->tx == pl022->tx_end) { |
1307 | flag = 1; | ||
1308 | /* Disable Transmit interrupt, enable receive interrupt */ | 1308 | /* Disable Transmit interrupt, enable receive interrupt */ |
1309 | writew((readw(SSP_IMSC(pl022->virtbase)) & | 1309 | writew((readw(SSP_IMSC(pl022->virtbase)) & |
1310 | ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM, | 1310 | ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM, |
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 6f72ad01e041..e3223ac75a7c 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/kernel.h> | ||
23 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
24 | #include <linux/spi/pxa2xx_spi.h> | 25 | #include <linux/spi/pxa2xx_spi.h> |
25 | #include <linux/spi/spi.h> | 26 | #include <linux/spi/spi.h> |
@@ -30,10 +31,6 @@ | |||
30 | #include <linux/pm_runtime.h> | 31 | #include <linux/pm_runtime.h> |
31 | #include <linux/acpi.h> | 32 | #include <linux/acpi.h> |
32 | 33 | ||
33 | #include <asm/io.h> | ||
34 | #include <asm/irq.h> | ||
35 | #include <asm/delay.h> | ||
36 | |||
37 | #include "spi-pxa2xx.h" | 34 | #include "spi-pxa2xx.h" |
38 | 35 | ||
39 | MODULE_AUTHOR("Stephen Street"); | 36 | MODULE_AUTHOR("Stephen Street"); |
@@ -67,54 +64,6 @@ MODULE_ALIAS("platform:pxa2xx-spi"); | |||
67 | #define LPSS_TX_LOTHRESH_DFLT 160 | 64 | #define LPSS_TX_LOTHRESH_DFLT 160 |
68 | #define LPSS_TX_HITHRESH_DFLT 224 | 65 | #define LPSS_TX_HITHRESH_DFLT 224 |
69 | 66 | ||
70 | struct quark_spi_rate { | ||
71 | u32 bitrate; | ||
72 | u32 dds_clk_rate; | ||
73 | u32 clk_div; | ||
74 | }; | ||
75 | |||
76 | /* | ||
77 | * 'rate', 'dds', 'clk_div' lookup table, which is defined in | ||
78 | * the Quark SPI datasheet. | ||
79 | */ | ||
80 | static const struct quark_spi_rate quark_spi_rate_table[] = { | ||
81 | /* bitrate, dds_clk_rate, clk_div */ | ||
82 | {50000000, 0x800000, 0}, | ||
83 | {40000000, 0x666666, 0}, | ||
84 | {25000000, 0x400000, 0}, | ||
85 | {20000000, 0x666666, 1}, | ||
86 | {16667000, 0x800000, 2}, | ||
87 | {13333000, 0x666666, 2}, | ||
88 | {12500000, 0x200000, 0}, | ||
89 | {10000000, 0x800000, 4}, | ||
90 | {8000000, 0x666666, 4}, | ||
91 | {6250000, 0x400000, 3}, | ||
92 | {5000000, 0x400000, 4}, | ||
93 | {4000000, 0x666666, 9}, | ||
94 | {3125000, 0x80000, 0}, | ||
95 | {2500000, 0x400000, 9}, | ||
96 | {2000000, 0x666666, 19}, | ||
97 | {1563000, 0x40000, 0}, | ||
98 | {1250000, 0x200000, 9}, | ||
99 | {1000000, 0x400000, 24}, | ||
100 | {800000, 0x666666, 49}, | ||
101 | {781250, 0x20000, 0}, | ||
102 | {625000, 0x200000, 19}, | ||
103 | {500000, 0x400000, 49}, | ||
104 | {400000, 0x666666, 99}, | ||
105 | {390625, 0x10000, 0}, | ||
106 | {250000, 0x400000, 99}, | ||
107 | {200000, 0x666666, 199}, | ||
108 | {195313, 0x8000, 0}, | ||
109 | {125000, 0x100000, 49}, | ||
110 | {100000, 0x200000, 124}, | ||
111 | {50000, 0x100000, 124}, | ||
112 | {25000, 0x80000, 124}, | ||
113 | {10016, 0x20000, 77}, | ||
114 | {5040, 0x20000, 154}, | ||
115 | {1002, 0x8000, 194}, | ||
116 | }; | ||
117 | |||
118 | /* Offset from drv_data->lpss_base */ | 67 | /* Offset from drv_data->lpss_base */ |
119 | #define GENERAL_REG 0x08 | 68 | #define GENERAL_REG 0x08 |
120 | #define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24) | 69 | #define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24) |
@@ -701,25 +650,124 @@ static irqreturn_t ssp_int(int irq, void *dev_id) | |||
701 | } | 650 | } |
702 | 651 | ||
703 | /* | 652 | /* |
704 | * The Quark SPI data sheet gives a table, and for the given 'rate', | 653 | * The Quark SPI has an additional 24 bit register (DDS_CLK_RATE) to multiply |
705 | * the 'dds' and 'clk_div' can be found in the table. | 654 | * input frequency by fractions of 2^24. It also has a divider by 5. |
655 | * | ||
656 | * There are formulas to get baud rate value for given input frequency and | ||
657 | * divider parameters, such as DDS_CLK_RATE and SCR: | ||
658 | * | ||
659 | * Fsys = 200MHz | ||
660 | * | ||
661 | * Fssp = Fsys * DDS_CLK_RATE / 2^24 (1) | ||
662 | * Baud rate = Fsclk = Fssp / (2 * (SCR + 1)) (2) | ||
663 | * | ||
664 | * DDS_CLK_RATE either 2^n or 2^n / 5. | ||
665 | * SCR is in range 0 .. 255 | ||
666 | * | ||
667 | * Divisor = 5^i * 2^j * 2 * k | ||
668 | * i = [0, 1] i = 1 iff j = 0 or j > 3 | ||
669 | * j = [0, 23] j = 0 iff i = 1 | ||
670 | * k = [1, 256] | ||
671 | * Special case: j = 0, i = 1: Divisor = 2 / 5 | ||
672 | * | ||
673 | * Accordingly to the specification the recommended values for DDS_CLK_RATE | ||
674 | * are: | ||
675 | * Case 1: 2^n, n = [0, 23] | ||
676 | * Case 2: 2^24 * 2 / 5 (0x666666) | ||
677 | * Case 3: less than or equal to 2^24 / 5 / 16 (0x33333) | ||
678 | * | ||
679 | * In all cases the lowest possible value is better. | ||
680 | * | ||
681 | * The function calculates parameters for all cases and chooses the one closest | ||
682 | * to the asked baud rate. | ||
706 | */ | 683 | */ |
707 | static u32 quark_x1000_set_clk_regvals(u32 rate, u32 *dds, u32 *clk_div) | 684 | static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds) |
708 | { | 685 | { |
709 | unsigned int i; | 686 | unsigned long xtal = 200000000; |
710 | 687 | unsigned long fref = xtal / 2; /* mandatory division by 2, | |
711 | for (i = 0; i < ARRAY_SIZE(quark_spi_rate_table); i++) { | 688 | see (2) */ |
712 | if (rate >= quark_spi_rate_table[i].bitrate) { | 689 | /* case 3 */ |
713 | *dds = quark_spi_rate_table[i].dds_clk_rate; | 690 | unsigned long fref1 = fref / 2; /* case 1 */ |
714 | *clk_div = quark_spi_rate_table[i].clk_div; | 691 | unsigned long fref2 = fref * 2 / 5; /* case 2 */ |
715 | return quark_spi_rate_table[i].bitrate; | 692 | unsigned long scale; |
693 | unsigned long q, q1, q2; | ||
694 | long r, r1, r2; | ||
695 | u32 mul; | ||
696 | |||
697 | /* Case 1 */ | ||
698 | |||
699 | /* Set initial value for DDS_CLK_RATE */ | ||
700 | mul = (1 << 24) >> 1; | ||
701 | |||
702 | /* Calculate initial quot */ | ||
703 | q1 = DIV_ROUND_CLOSEST(fref1, rate); | ||
704 | |||
705 | /* Scale q1 if it's too big */ | ||
706 | if (q1 > 256) { | ||
707 | /* Scale q1 to range [1, 512] */ | ||
708 | scale = fls_long(q1 - 1); | ||
709 | if (scale > 9) { | ||
710 | q1 >>= scale - 9; | ||
711 | mul >>= scale - 9; | ||
716 | } | 712 | } |
713 | |||
714 | /* Round the result if we have a remainder */ | ||
715 | q1 += q1 & 1; | ||
717 | } | 716 | } |
718 | 717 | ||
719 | *dds = quark_spi_rate_table[i-1].dds_clk_rate; | 718 | /* Decrease DDS_CLK_RATE as much as we can without loss in precision */ |
720 | *clk_div = quark_spi_rate_table[i-1].clk_div; | 719 | scale = __ffs(q1); |
720 | q1 >>= scale; | ||
721 | mul >>= scale; | ||
722 | |||
723 | /* Get the remainder */ | ||
724 | r1 = abs(fref1 / (1 << (24 - fls_long(mul))) / q1 - rate); | ||
725 | |||
726 | /* Case 2 */ | ||
727 | |||
728 | q2 = DIV_ROUND_CLOSEST(fref2, rate); | ||
729 | r2 = abs(fref2 / q2 - rate); | ||
721 | 730 | ||
722 | return quark_spi_rate_table[i-1].bitrate; | 731 | /* |
732 | * Choose the best between two: less remainder we have the better. We | ||
733 | * can't go case 2 if q2 is greater than 256 since SCR register can | ||
734 | * hold only values 0 .. 255. | ||
735 | */ | ||
736 | if (r2 >= r1 || q2 > 256) { | ||
737 | /* case 1 is better */ | ||
738 | r = r1; | ||
739 | q = q1; | ||
740 | } else { | ||
741 | /* case 2 is better */ | ||
742 | r = r2; | ||
743 | q = q2; | ||
744 | mul = (1 << 24) * 2 / 5; | ||
745 | } | ||
746 | |||
747 | /* Check case 3 only If the divisor is big enough */ | ||
748 | if (fref / rate >= 80) { | ||
749 | u64 fssp; | ||
750 | u32 m; | ||
751 | |||
752 | /* Calculate initial quot */ | ||
753 | q1 = DIV_ROUND_CLOSEST(fref, rate); | ||
754 | m = (1 << 24) / q1; | ||
755 | |||
756 | /* Get the remainder */ | ||
757 | fssp = (u64)fref * m; | ||
758 | do_div(fssp, 1 << 24); | ||
759 | r1 = abs(fssp - rate); | ||
760 | |||
761 | /* Choose this one if it suits better */ | ||
762 | if (r1 < r) { | ||
763 | /* case 3 is better */ | ||
764 | q = 1; | ||
765 | mul = m; | ||
766 | } | ||
767 | } | ||
768 | |||
769 | *dds = mul; | ||
770 | return q - 1; | ||
723 | } | 771 | } |
724 | 772 | ||
725 | static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) | 773 | static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) |
@@ -730,23 +778,25 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) | |||
730 | rate = min_t(int, ssp_clk, rate); | 778 | rate = min_t(int, ssp_clk, rate); |
731 | 779 | ||
732 | if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) | 780 | if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) |
733 | return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8; | 781 | return (ssp_clk / (2 * rate) - 1) & 0xff; |
734 | else | 782 | else |
735 | return ((ssp_clk / rate - 1) & 0xfff) << 8; | 783 | return (ssp_clk / rate - 1) & 0xfff; |
736 | } | 784 | } |
737 | 785 | ||
738 | static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data, | 786 | static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data, |
739 | struct chip_data *chip, int rate) | 787 | struct chip_data *chip, int rate) |
740 | { | 788 | { |
741 | u32 clk_div; | 789 | unsigned int clk_div; |
742 | 790 | ||
743 | switch (drv_data->ssp_type) { | 791 | switch (drv_data->ssp_type) { |
744 | case QUARK_X1000_SSP: | 792 | case QUARK_X1000_SSP: |
745 | quark_x1000_set_clk_regvals(rate, &chip->dds_rate, &clk_div); | 793 | clk_div = quark_x1000_get_clk_div(rate, &chip->dds_rate); |
746 | return clk_div << 8; | 794 | break; |
747 | default: | 795 | default: |
748 | return ssp_get_clk_div(drv_data, rate); | 796 | clk_div = ssp_get_clk_div(drv_data, rate); |
797 | break; | ||
749 | } | 798 | } |
799 | return clk_div << 8; | ||
750 | } | 800 | } |
751 | 801 | ||
752 | static void pump_transfers(unsigned long data) | 802 | static void pump_transfers(unsigned long data) |
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index 2b2c359f5a50..810a7fae3479 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/pm_runtime.h> | 23 | #include <linux/pm_runtime.h> |
24 | #include <linux/spi/spi.h> | 24 | #include <linux/spi/spi.h> |
25 | #include <linux/dmaengine.h> | ||
26 | #include <linux/dma-mapping.h> | ||
25 | 27 | ||
26 | #define QUP_CONFIG 0x0000 | 28 | #define QUP_CONFIG 0x0000 |
27 | #define QUP_STATE 0x0004 | 29 | #define QUP_STATE 0x0004 |
@@ -116,6 +118,8 @@ | |||
116 | 118 | ||
117 | #define SPI_NUM_CHIPSELECTS 4 | 119 | #define SPI_NUM_CHIPSELECTS 4 |
118 | 120 | ||
121 | #define SPI_MAX_DMA_XFER (SZ_64K - 64) | ||
122 | |||
119 | /* high speed mode is when bus rate is greater then 26MHz */ | 123 | /* high speed mode is when bus rate is greater then 26MHz */ |
120 | #define SPI_HS_MIN_RATE 26000000 | 124 | #define SPI_HS_MIN_RATE 26000000 |
121 | #define SPI_MAX_RATE 50000000 | 125 | #define SPI_MAX_RATE 50000000 |
@@ -140,9 +144,14 @@ struct spi_qup { | |||
140 | struct completion done; | 144 | struct completion done; |
141 | int error; | 145 | int error; |
142 | int w_size; /* bytes per SPI word */ | 146 | int w_size; /* bytes per SPI word */ |
147 | int n_words; | ||
143 | int tx_bytes; | 148 | int tx_bytes; |
144 | int rx_bytes; | 149 | int rx_bytes; |
145 | int qup_v1; | 150 | int qup_v1; |
151 | |||
152 | int use_dma; | ||
153 | struct dma_slave_config rx_conf; | ||
154 | struct dma_slave_config tx_conf; | ||
146 | }; | 155 | }; |
147 | 156 | ||
148 | 157 | ||
@@ -198,7 +207,6 @@ static int spi_qup_set_state(struct spi_qup *controller, u32 state) | |||
198 | return 0; | 207 | return 0; |
199 | } | 208 | } |
200 | 209 | ||
201 | |||
202 | static void spi_qup_fifo_read(struct spi_qup *controller, | 210 | static void spi_qup_fifo_read(struct spi_qup *controller, |
203 | struct spi_transfer *xfer) | 211 | struct spi_transfer *xfer) |
204 | { | 212 | { |
@@ -266,6 +274,107 @@ static void spi_qup_fifo_write(struct spi_qup *controller, | |||
266 | } | 274 | } |
267 | } | 275 | } |
268 | 276 | ||
277 | static void spi_qup_dma_done(void *data) | ||
278 | { | ||
279 | struct spi_qup *qup = data; | ||
280 | |||
281 | complete(&qup->done); | ||
282 | } | ||
283 | |||
284 | static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer, | ||
285 | enum dma_transfer_direction dir, | ||
286 | dma_async_tx_callback callback) | ||
287 | { | ||
288 | struct spi_qup *qup = spi_master_get_devdata(master); | ||
289 | unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE; | ||
290 | struct dma_async_tx_descriptor *desc; | ||
291 | struct scatterlist *sgl; | ||
292 | struct dma_chan *chan; | ||
293 | dma_cookie_t cookie; | ||
294 | unsigned int nents; | ||
295 | |||
296 | if (dir == DMA_MEM_TO_DEV) { | ||
297 | chan = master->dma_tx; | ||
298 | nents = xfer->tx_sg.nents; | ||
299 | sgl = xfer->tx_sg.sgl; | ||
300 | } else { | ||
301 | chan = master->dma_rx; | ||
302 | nents = xfer->rx_sg.nents; | ||
303 | sgl = xfer->rx_sg.sgl; | ||
304 | } | ||
305 | |||
306 | desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags); | ||
307 | if (!desc) | ||
308 | return -EINVAL; | ||
309 | |||
310 | desc->callback = callback; | ||
311 | desc->callback_param = qup; | ||
312 | |||
313 | cookie = dmaengine_submit(desc); | ||
314 | |||
315 | return dma_submit_error(cookie); | ||
316 | } | ||
317 | |||
318 | static void spi_qup_dma_terminate(struct spi_master *master, | ||
319 | struct spi_transfer *xfer) | ||
320 | { | ||
321 | if (xfer->tx_buf) | ||
322 | dmaengine_terminate_all(master->dma_tx); | ||
323 | if (xfer->rx_buf) | ||
324 | dmaengine_terminate_all(master->dma_rx); | ||
325 | } | ||
326 | |||
327 | static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer) | ||
328 | { | ||
329 | dma_async_tx_callback rx_done = NULL, tx_done = NULL; | ||
330 | int ret; | ||
331 | |||
332 | if (xfer->rx_buf) | ||
333 | rx_done = spi_qup_dma_done; | ||
334 | else if (xfer->tx_buf) | ||
335 | tx_done = spi_qup_dma_done; | ||
336 | |||
337 | if (xfer->rx_buf) { | ||
338 | ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done); | ||
339 | if (ret) | ||
340 | return ret; | ||
341 | |||
342 | dma_async_issue_pending(master->dma_rx); | ||
343 | } | ||
344 | |||
345 | if (xfer->tx_buf) { | ||
346 | ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done); | ||
347 | if (ret) | ||
348 | return ret; | ||
349 | |||
350 | dma_async_issue_pending(master->dma_tx); | ||
351 | } | ||
352 | |||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer) | ||
357 | { | ||
358 | struct spi_qup *qup = spi_master_get_devdata(master); | ||
359 | int ret; | ||
360 | |||
361 | ret = spi_qup_set_state(qup, QUP_STATE_RUN); | ||
362 | if (ret) { | ||
363 | dev_warn(qup->dev, "cannot set RUN state\n"); | ||
364 | return ret; | ||
365 | } | ||
366 | |||
367 | ret = spi_qup_set_state(qup, QUP_STATE_PAUSE); | ||
368 | if (ret) { | ||
369 | dev_warn(qup->dev, "cannot set PAUSE state\n"); | ||
370 | return ret; | ||
371 | } | ||
372 | |||
373 | spi_qup_fifo_write(qup, xfer); | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | |||
269 | static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id) | 378 | static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id) |
270 | { | 379 | { |
271 | struct spi_qup *controller = dev_id; | 380 | struct spi_qup *controller = dev_id; |
@@ -315,11 +424,13 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id) | |||
315 | error = -EIO; | 424 | error = -EIO; |
316 | } | 425 | } |
317 | 426 | ||
318 | if (opflags & QUP_OP_IN_SERVICE_FLAG) | 427 | if (!controller->use_dma) { |
319 | spi_qup_fifo_read(controller, xfer); | 428 | if (opflags & QUP_OP_IN_SERVICE_FLAG) |
429 | spi_qup_fifo_read(controller, xfer); | ||
320 | 430 | ||
321 | if (opflags & QUP_OP_OUT_SERVICE_FLAG) | 431 | if (opflags & QUP_OP_OUT_SERVICE_FLAG) |
322 | spi_qup_fifo_write(controller, xfer); | 432 | spi_qup_fifo_write(controller, xfer); |
433 | } | ||
323 | 434 | ||
324 | spin_lock_irqsave(&controller->lock, flags); | 435 | spin_lock_irqsave(&controller->lock, flags); |
325 | controller->error = error; | 436 | controller->error = error; |
@@ -332,13 +443,35 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id) | |||
332 | return IRQ_HANDLED; | 443 | return IRQ_HANDLED; |
333 | } | 444 | } |
334 | 445 | ||
446 | static u32 | ||
447 | spi_qup_get_mode(struct spi_master *master, struct spi_transfer *xfer) | ||
448 | { | ||
449 | struct spi_qup *qup = spi_master_get_devdata(master); | ||
450 | u32 mode; | ||
451 | |||
452 | qup->w_size = 4; | ||
453 | |||
454 | if (xfer->bits_per_word <= 8) | ||
455 | qup->w_size = 1; | ||
456 | else if (xfer->bits_per_word <= 16) | ||
457 | qup->w_size = 2; | ||
458 | |||
459 | qup->n_words = xfer->len / qup->w_size; | ||
460 | |||
461 | if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32))) | ||
462 | mode = QUP_IO_M_MODE_FIFO; | ||
463 | else | ||
464 | mode = QUP_IO_M_MODE_BLOCK; | ||
465 | |||
466 | return mode; | ||
467 | } | ||
335 | 468 | ||
336 | /* set clock freq ... bits per word */ | 469 | /* set clock freq ... bits per word */ |
337 | static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) | 470 | static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) |
338 | { | 471 | { |
339 | struct spi_qup *controller = spi_master_get_devdata(spi->master); | 472 | struct spi_qup *controller = spi_master_get_devdata(spi->master); |
340 | u32 config, iomode, mode, control; | 473 | u32 config, iomode, mode, control; |
341 | int ret, n_words, w_size; | 474 | int ret, n_words; |
342 | 475 | ||
343 | if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) { | 476 | if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) { |
344 | dev_err(controller->dev, "too big size for loopback %d > %d\n", | 477 | dev_err(controller->dev, "too big size for loopback %d > %d\n", |
@@ -358,35 +491,54 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) | |||
358 | return -EIO; | 491 | return -EIO; |
359 | } | 492 | } |
360 | 493 | ||
361 | w_size = 4; | 494 | mode = spi_qup_get_mode(spi->master, xfer); |
362 | if (xfer->bits_per_word <= 8) | 495 | n_words = controller->n_words; |
363 | w_size = 1; | ||
364 | else if (xfer->bits_per_word <= 16) | ||
365 | w_size = 2; | ||
366 | |||
367 | n_words = xfer->len / w_size; | ||
368 | controller->w_size = w_size; | ||
369 | 496 | ||
370 | if (n_words <= (controller->in_fifo_sz / sizeof(u32))) { | 497 | if (mode == QUP_IO_M_MODE_FIFO) { |
371 | mode = QUP_IO_M_MODE_FIFO; | ||
372 | writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT); | 498 | writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT); |
373 | writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT); | 499 | writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT); |
374 | /* must be zero for FIFO */ | 500 | /* must be zero for FIFO */ |
375 | writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT); | 501 | writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT); |
376 | writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); | 502 | writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); |
377 | } else { | 503 | } else if (!controller->use_dma) { |
378 | mode = QUP_IO_M_MODE_BLOCK; | ||
379 | writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT); | 504 | writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT); |
380 | writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT); | 505 | writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT); |
381 | /* must be zero for BLOCK and BAM */ | 506 | /* must be zero for BLOCK and BAM */ |
382 | writel_relaxed(0, controller->base + QUP_MX_READ_CNT); | 507 | writel_relaxed(0, controller->base + QUP_MX_READ_CNT); |
383 | writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); | 508 | writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); |
509 | } else { | ||
510 | mode = QUP_IO_M_MODE_BAM; | ||
511 | writel_relaxed(0, controller->base + QUP_MX_READ_CNT); | ||
512 | writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); | ||
513 | |||
514 | if (!controller->qup_v1) { | ||
515 | void __iomem *input_cnt; | ||
516 | |||
517 | input_cnt = controller->base + QUP_MX_INPUT_CNT; | ||
518 | /* | ||
519 | * for DMA transfers, both QUP_MX_INPUT_CNT and | ||
520 | * QUP_MX_OUTPUT_CNT must be zero to all cases but one. | ||
521 | * That case is a non-balanced transfer when there is | ||
522 | * only a rx_buf. | ||
523 | */ | ||
524 | if (xfer->tx_buf) | ||
525 | writel_relaxed(0, input_cnt); | ||
526 | else | ||
527 | writel_relaxed(n_words, input_cnt); | ||
528 | |||
529 | writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); | ||
530 | } | ||
384 | } | 531 | } |
385 | 532 | ||
386 | iomode = readl_relaxed(controller->base + QUP_IO_M_MODES); | 533 | iomode = readl_relaxed(controller->base + QUP_IO_M_MODES); |
387 | /* Set input and output transfer mode */ | 534 | /* Set input and output transfer mode */ |
388 | iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK); | 535 | iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK); |
389 | iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN); | 536 | |
537 | if (!controller->use_dma) | ||
538 | iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN); | ||
539 | else | ||
540 | iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN; | ||
541 | |||
390 | iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT); | 542 | iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT); |
391 | iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT); | 543 | iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT); |
392 | 544 | ||
@@ -428,11 +580,31 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) | |||
428 | config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N); | 580 | config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N); |
429 | config |= xfer->bits_per_word - 1; | 581 | config |= xfer->bits_per_word - 1; |
430 | config |= QUP_CONFIG_SPI_MODE; | 582 | config |= QUP_CONFIG_SPI_MODE; |
583 | |||
584 | if (controller->use_dma) { | ||
585 | if (!xfer->tx_buf) | ||
586 | config |= QUP_CONFIG_NO_OUTPUT; | ||
587 | if (!xfer->rx_buf) | ||
588 | config |= QUP_CONFIG_NO_INPUT; | ||
589 | } | ||
590 | |||
431 | writel_relaxed(config, controller->base + QUP_CONFIG); | 591 | writel_relaxed(config, controller->base + QUP_CONFIG); |
432 | 592 | ||
433 | /* only write to OPERATIONAL_MASK when register is present */ | 593 | /* only write to OPERATIONAL_MASK when register is present */ |
434 | if (!controller->qup_v1) | 594 | if (!controller->qup_v1) { |
435 | writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK); | 595 | u32 mask = 0; |
596 | |||
597 | /* | ||
598 | * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO | ||
599 | * status change in BAM mode | ||
600 | */ | ||
601 | |||
602 | if (mode == QUP_IO_M_MODE_BAM) | ||
603 | mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG; | ||
604 | |||
605 | writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK); | ||
606 | } | ||
607 | |||
436 | return 0; | 608 | return 0; |
437 | } | 609 | } |
438 | 610 | ||
@@ -461,17 +633,13 @@ static int spi_qup_transfer_one(struct spi_master *master, | |||
461 | controller->tx_bytes = 0; | 633 | controller->tx_bytes = 0; |
462 | spin_unlock_irqrestore(&controller->lock, flags); | 634 | spin_unlock_irqrestore(&controller->lock, flags); |
463 | 635 | ||
464 | if (spi_qup_set_state(controller, QUP_STATE_RUN)) { | 636 | if (controller->use_dma) |
465 | dev_warn(controller->dev, "cannot set RUN state\n"); | 637 | ret = spi_qup_do_dma(master, xfer); |
466 | goto exit; | 638 | else |
467 | } | 639 | ret = spi_qup_do_pio(master, xfer); |
468 | 640 | ||
469 | if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) { | 641 | if (ret) |
470 | dev_warn(controller->dev, "cannot set PAUSE state\n"); | ||
471 | goto exit; | 642 | goto exit; |
472 | } | ||
473 | |||
474 | spi_qup_fifo_write(controller, xfer); | ||
475 | 643 | ||
476 | if (spi_qup_set_state(controller, QUP_STATE_RUN)) { | 644 | if (spi_qup_set_state(controller, QUP_STATE_RUN)) { |
477 | dev_warn(controller->dev, "cannot set EXECUTE state\n"); | 645 | dev_warn(controller->dev, "cannot set EXECUTE state\n"); |
@@ -480,6 +648,7 @@ static int spi_qup_transfer_one(struct spi_master *master, | |||
480 | 648 | ||
481 | if (!wait_for_completion_timeout(&controller->done, timeout)) | 649 | if (!wait_for_completion_timeout(&controller->done, timeout)) |
482 | ret = -ETIMEDOUT; | 650 | ret = -ETIMEDOUT; |
651 | |||
483 | exit: | 652 | exit: |
484 | spi_qup_set_state(controller, QUP_STATE_RESET); | 653 | spi_qup_set_state(controller, QUP_STATE_RESET); |
485 | spin_lock_irqsave(&controller->lock, flags); | 654 | spin_lock_irqsave(&controller->lock, flags); |
@@ -487,6 +656,97 @@ exit: | |||
487 | if (!ret) | 656 | if (!ret) |
488 | ret = controller->error; | 657 | ret = controller->error; |
489 | spin_unlock_irqrestore(&controller->lock, flags); | 658 | spin_unlock_irqrestore(&controller->lock, flags); |
659 | |||
660 | if (ret && controller->use_dma) | ||
661 | spi_qup_dma_terminate(master, xfer); | ||
662 | |||
663 | return ret; | ||
664 | } | ||
665 | |||
666 | static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi, | ||
667 | struct spi_transfer *xfer) | ||
668 | { | ||
669 | struct spi_qup *qup = spi_master_get_devdata(master); | ||
670 | size_t dma_align = dma_get_cache_alignment(); | ||
671 | u32 mode; | ||
672 | |||
673 | qup->use_dma = 0; | ||
674 | |||
675 | if (xfer->rx_buf && (xfer->len % qup->in_blk_sz || | ||
676 | IS_ERR_OR_NULL(master->dma_rx) || | ||
677 | !IS_ALIGNED((size_t)xfer->rx_buf, dma_align))) | ||
678 | return false; | ||
679 | |||
680 | if (xfer->tx_buf && (xfer->len % qup->out_blk_sz || | ||
681 | IS_ERR_OR_NULL(master->dma_tx) || | ||
682 | !IS_ALIGNED((size_t)xfer->tx_buf, dma_align))) | ||
683 | return false; | ||
684 | |||
685 | mode = spi_qup_get_mode(master, xfer); | ||
686 | if (mode == QUP_IO_M_MODE_FIFO) | ||
687 | return false; | ||
688 | |||
689 | qup->use_dma = 1; | ||
690 | |||
691 | return true; | ||
692 | } | ||
693 | |||
694 | static void spi_qup_release_dma(struct spi_master *master) | ||
695 | { | ||
696 | if (!IS_ERR_OR_NULL(master->dma_rx)) | ||
697 | dma_release_channel(master->dma_rx); | ||
698 | if (!IS_ERR_OR_NULL(master->dma_tx)) | ||
699 | dma_release_channel(master->dma_tx); | ||
700 | } | ||
701 | |||
702 | static int spi_qup_init_dma(struct spi_master *master, resource_size_t base) | ||
703 | { | ||
704 | struct spi_qup *spi = spi_master_get_devdata(master); | ||
705 | struct dma_slave_config *rx_conf = &spi->rx_conf, | ||
706 | *tx_conf = &spi->tx_conf; | ||
707 | struct device *dev = spi->dev; | ||
708 | int ret; | ||
709 | |||
710 | /* allocate dma resources, if available */ | ||
711 | master->dma_rx = dma_request_slave_channel_reason(dev, "rx"); | ||
712 | if (IS_ERR(master->dma_rx)) | ||
713 | return PTR_ERR(master->dma_rx); | ||
714 | |||
715 | master->dma_tx = dma_request_slave_channel_reason(dev, "tx"); | ||
716 | if (IS_ERR(master->dma_tx)) { | ||
717 | ret = PTR_ERR(master->dma_tx); | ||
718 | goto err_tx; | ||
719 | } | ||
720 | |||
721 | /* set DMA parameters */ | ||
722 | rx_conf->direction = DMA_DEV_TO_MEM; | ||
723 | rx_conf->device_fc = 1; | ||
724 | rx_conf->src_addr = base + QUP_INPUT_FIFO; | ||
725 | rx_conf->src_maxburst = spi->in_blk_sz; | ||
726 | |||
727 | tx_conf->direction = DMA_MEM_TO_DEV; | ||
728 | tx_conf->device_fc = 1; | ||
729 | tx_conf->dst_addr = base + QUP_OUTPUT_FIFO; | ||
730 | tx_conf->dst_maxburst = spi->out_blk_sz; | ||
731 | |||
732 | ret = dmaengine_slave_config(master->dma_rx, rx_conf); | ||
733 | if (ret) { | ||
734 | dev_err(dev, "failed to configure RX channel\n"); | ||
735 | goto err; | ||
736 | } | ||
737 | |||
738 | ret = dmaengine_slave_config(master->dma_tx, tx_conf); | ||
739 | if (ret) { | ||
740 | dev_err(dev, "failed to configure TX channel\n"); | ||
741 | goto err; | ||
742 | } | ||
743 | |||
744 | return 0; | ||
745 | |||
746 | err: | ||
747 | dma_release_channel(master->dma_tx); | ||
748 | err_tx: | ||
749 | dma_release_channel(master->dma_rx); | ||
490 | return ret; | 750 | return ret; |
491 | } | 751 | } |
492 | 752 | ||
@@ -563,6 +823,8 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
563 | master->transfer_one = spi_qup_transfer_one; | 823 | master->transfer_one = spi_qup_transfer_one; |
564 | master->dev.of_node = pdev->dev.of_node; | 824 | master->dev.of_node = pdev->dev.of_node; |
565 | master->auto_runtime_pm = true; | 825 | master->auto_runtime_pm = true; |
826 | master->dma_alignment = dma_get_cache_alignment(); | ||
827 | master->max_dma_len = SPI_MAX_DMA_XFER; | ||
566 | 828 | ||
567 | platform_set_drvdata(pdev, master); | 829 | platform_set_drvdata(pdev, master); |
568 | 830 | ||
@@ -574,6 +836,12 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
574 | controller->cclk = cclk; | 836 | controller->cclk = cclk; |
575 | controller->irq = irq; | 837 | controller->irq = irq; |
576 | 838 | ||
839 | ret = spi_qup_init_dma(master, res->start); | ||
840 | if (ret == -EPROBE_DEFER) | ||
841 | goto error; | ||
842 | else if (!ret) | ||
843 | master->can_dma = spi_qup_can_dma; | ||
844 | |||
577 | /* set v1 flag if device is version 1 */ | 845 | /* set v1 flag if device is version 1 */ |
578 | if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1")) | 846 | if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1")) |
579 | controller->qup_v1 = 1; | 847 | controller->qup_v1 = 1; |
@@ -610,7 +878,7 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
610 | ret = spi_qup_set_state(controller, QUP_STATE_RESET); | 878 | ret = spi_qup_set_state(controller, QUP_STATE_RESET); |
611 | if (ret) { | 879 | if (ret) { |
612 | dev_err(dev, "cannot set RESET state\n"); | 880 | dev_err(dev, "cannot set RESET state\n"); |
613 | goto error; | 881 | goto error_dma; |
614 | } | 882 | } |
615 | 883 | ||
616 | writel_relaxed(0, base + QUP_OPERATIONAL); | 884 | writel_relaxed(0, base + QUP_OPERATIONAL); |
@@ -634,7 +902,7 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
634 | ret = devm_request_irq(dev, irq, spi_qup_qup_irq, | 902 | ret = devm_request_irq(dev, irq, spi_qup_qup_irq, |
635 | IRQF_TRIGGER_HIGH, pdev->name, controller); | 903 | IRQF_TRIGGER_HIGH, pdev->name, controller); |
636 | if (ret) | 904 | if (ret) |
637 | goto error; | 905 | goto error_dma; |
638 | 906 | ||
639 | pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); | 907 | pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); |
640 | pm_runtime_use_autosuspend(dev); | 908 | pm_runtime_use_autosuspend(dev); |
@@ -649,6 +917,8 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
649 | 917 | ||
650 | disable_pm: | 918 | disable_pm: |
651 | pm_runtime_disable(&pdev->dev); | 919 | pm_runtime_disable(&pdev->dev); |
920 | error_dma: | ||
921 | spi_qup_release_dma(master); | ||
652 | error: | 922 | error: |
653 | clk_disable_unprepare(cclk); | 923 | clk_disable_unprepare(cclk); |
654 | clk_disable_unprepare(iclk); | 924 | clk_disable_unprepare(iclk); |
@@ -740,6 +1010,8 @@ static int spi_qup_remove(struct platform_device *pdev) | |||
740 | if (ret) | 1010 | if (ret) |
741 | return ret; | 1011 | return ret; |
742 | 1012 | ||
1013 | spi_qup_release_dma(master); | ||
1014 | |||
743 | clk_disable_unprepare(controller->cclk); | 1015 | clk_disable_unprepare(controller->cclk); |
744 | clk_disable_unprepare(controller->iclk); | 1016 | clk_disable_unprepare(controller->iclk); |
745 | 1017 | ||
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 1a777dc261d6..68e7efeb9a27 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c | |||
@@ -179,6 +179,7 @@ struct rockchip_spi { | |||
179 | u8 tmode; | 179 | u8 tmode; |
180 | u8 bpw; | 180 | u8 bpw; |
181 | u8 n_bytes; | 181 | u8 n_bytes; |
182 | u8 rsd_nsecs; | ||
182 | unsigned len; | 183 | unsigned len; |
183 | u32 speed; | 184 | u32 speed; |
184 | 185 | ||
@@ -302,8 +303,8 @@ static int rockchip_spi_prepare_message(struct spi_master *master, | |||
302 | return 0; | 303 | return 0; |
303 | } | 304 | } |
304 | 305 | ||
305 | static int rockchip_spi_unprepare_message(struct spi_master *master, | 306 | static void rockchip_spi_handle_err(struct spi_master *master, |
306 | struct spi_message *msg) | 307 | struct spi_message *msg) |
307 | { | 308 | { |
308 | unsigned long flags; | 309 | unsigned long flags; |
309 | struct rockchip_spi *rs = spi_master_get_devdata(master); | 310 | struct rockchip_spi *rs = spi_master_get_devdata(master); |
@@ -313,8 +314,8 @@ static int rockchip_spi_unprepare_message(struct spi_master *master, | |||
313 | /* | 314 | /* |
314 | * For DMA mode, we need terminate DMA channel and flush | 315 | * For DMA mode, we need terminate DMA channel and flush |
315 | * fifo for the next transfer if DMA thansfer timeout. | 316 | * fifo for the next transfer if DMA thansfer timeout. |
316 | * unprepare_message() was called by core if transfer complete | 317 | * handle_err() was called by core if transfer failed. |
317 | * or timeout. Maybe it is reasonable for error handling here. | 318 | * Maybe it is reasonable for error handling here. |
318 | */ | 319 | */ |
319 | if (rs->use_dma) { | 320 | if (rs->use_dma) { |
320 | if (rs->state & RXBUSY) { | 321 | if (rs->state & RXBUSY) { |
@@ -327,6 +328,12 @@ static int rockchip_spi_unprepare_message(struct spi_master *master, | |||
327 | } | 328 | } |
328 | 329 | ||
329 | spin_unlock_irqrestore(&rs->lock, flags); | 330 | spin_unlock_irqrestore(&rs->lock, flags); |
331 | } | ||
332 | |||
333 | static int rockchip_spi_unprepare_message(struct spi_master *master, | ||
334 | struct spi_message *msg) | ||
335 | { | ||
336 | struct rockchip_spi *rs = spi_master_get_devdata(master); | ||
330 | 337 | ||
331 | spi_enable_chip(rs, 0); | 338 | spi_enable_chip(rs, 0); |
332 | 339 | ||
@@ -493,6 +500,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs) | |||
493 | { | 500 | { |
494 | u32 div = 0; | 501 | u32 div = 0; |
495 | u32 dmacr = 0; | 502 | u32 dmacr = 0; |
503 | int rsd = 0; | ||
496 | 504 | ||
497 | u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET) | 505 | u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET) |
498 | | (CR0_SSD_ONE << CR0_SSD_OFFSET); | 506 | | (CR0_SSD_ONE << CR0_SSD_OFFSET); |
@@ -519,9 +527,23 @@ static void rockchip_spi_config(struct rockchip_spi *rs) | |||
519 | } | 527 | } |
520 | 528 | ||
521 | /* div doesn't support odd number */ | 529 | /* div doesn't support odd number */ |
522 | div = max_t(u32, rs->max_freq / rs->speed, 1); | 530 | div = DIV_ROUND_UP(rs->max_freq, rs->speed); |
523 | div = (div + 1) & 0xfffe; | 531 | div = (div + 1) & 0xfffe; |
524 | 532 | ||
533 | /* Rx sample delay is expressed in parent clock cycles (max 3) */ | ||
534 | rsd = DIV_ROUND_CLOSEST(rs->rsd_nsecs * (rs->max_freq >> 8), | ||
535 | 1000000000 >> 8); | ||
536 | if (!rsd && rs->rsd_nsecs) { | ||
537 | pr_warn_once("rockchip-spi: %u Hz are too slow to express %u ns delay\n", | ||
538 | rs->max_freq, rs->rsd_nsecs); | ||
539 | } else if (rsd > 3) { | ||
540 | rsd = 3; | ||
541 | pr_warn_once("rockchip-spi: %u Hz are too fast to express %u ns delay, clamping at %u ns\n", | ||
542 | rs->max_freq, rs->rsd_nsecs, | ||
543 | rsd * 1000000000U / rs->max_freq); | ||
544 | } | ||
545 | cr0 |= rsd << CR0_RSD_OFFSET; | ||
546 | |||
525 | writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0); | 547 | writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0); |
526 | 548 | ||
527 | writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1); | 549 | writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1); |
@@ -614,6 +636,7 @@ static int rockchip_spi_probe(struct platform_device *pdev) | |||
614 | struct rockchip_spi *rs; | 636 | struct rockchip_spi *rs; |
615 | struct spi_master *master; | 637 | struct spi_master *master; |
616 | struct resource *mem; | 638 | struct resource *mem; |
639 | u32 rsd_nsecs; | ||
617 | 640 | ||
618 | master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi)); | 641 | master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi)); |
619 | if (!master) | 642 | if (!master) |
@@ -665,6 +688,10 @@ static int rockchip_spi_probe(struct platform_device *pdev) | |||
665 | rs->dev = &pdev->dev; | 688 | rs->dev = &pdev->dev; |
666 | rs->max_freq = clk_get_rate(rs->spiclk); | 689 | rs->max_freq = clk_get_rate(rs->spiclk); |
667 | 690 | ||
691 | if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns", | ||
692 | &rsd_nsecs)) | ||
693 | rs->rsd_nsecs = rsd_nsecs; | ||
694 | |||
668 | rs->fifo_len = get_fifo_len(rs); | 695 | rs->fifo_len = get_fifo_len(rs); |
669 | if (!rs->fifo_len) { | 696 | if (!rs->fifo_len) { |
670 | dev_err(&pdev->dev, "Failed to get fifo length\n"); | 697 | dev_err(&pdev->dev, "Failed to get fifo length\n"); |
@@ -688,6 +715,7 @@ static int rockchip_spi_probe(struct platform_device *pdev) | |||
688 | master->prepare_message = rockchip_spi_prepare_message; | 715 | master->prepare_message = rockchip_spi_prepare_message; |
689 | master->unprepare_message = rockchip_spi_unprepare_message; | 716 | master->unprepare_message = rockchip_spi_unprepare_message; |
690 | master->transfer_one = rockchip_spi_transfer_one; | 717 | master->transfer_one = rockchip_spi_transfer_one; |
718 | master->handle_err = rockchip_spi_handle_err; | ||
691 | 719 | ||
692 | rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx"); | 720 | rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx"); |
693 | if (!rs->dma_tx.ch) | 721 | if (!rs->dma_tx.ch) |
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 46ce47076e63..186924aa4740 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c | |||
@@ -177,6 +177,13 @@ | |||
177 | #define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */ | 177 | #define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */ |
178 | #define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */ | 178 | #define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */ |
179 | #define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */ | 179 | #define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */ |
180 | /* QSPI on R-Car Gen2 */ | ||
181 | #define SPBFCR_TXTRG_1B 0x00 /* 31 bytes (1 byte available) */ | ||
182 | #define SPBFCR_TXTRG_32B 0x30 /* 0 byte (32 bytes available) */ | ||
183 | #define SPBFCR_RXTRG_1B 0x00 /* 1 byte (31 bytes available) */ | ||
184 | #define SPBFCR_RXTRG_32B 0x07 /* 32 bytes (0 byte available) */ | ||
185 | |||
186 | #define QSPI_BUFFER_SIZE 32u | ||
180 | 187 | ||
181 | struct rspi_data { | 188 | struct rspi_data { |
182 | void __iomem *addr; | 189 | void __iomem *addr; |
@@ -366,6 +373,52 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size) | |||
366 | return 0; | 373 | return 0; |
367 | } | 374 | } |
368 | 375 | ||
376 | static void qspi_update(const struct rspi_data *rspi, u8 mask, u8 val, u8 reg) | ||
377 | { | ||
378 | u8 data; | ||
379 | |||
380 | data = rspi_read8(rspi, reg); | ||
381 | data &= ~mask; | ||
382 | data |= (val & mask); | ||
383 | rspi_write8(rspi, data, reg); | ||
384 | } | ||
385 | |||
386 | static int qspi_set_send_trigger(struct rspi_data *rspi, unsigned int len) | ||
387 | { | ||
388 | unsigned int n; | ||
389 | |||
390 | n = min(len, QSPI_BUFFER_SIZE); | ||
391 | |||
392 | if (len >= QSPI_BUFFER_SIZE) { | ||
393 | /* sets triggering number to 32 bytes */ | ||
394 | qspi_update(rspi, SPBFCR_TXTRG_MASK, | ||
395 | SPBFCR_TXTRG_32B, QSPI_SPBFCR); | ||
396 | } else { | ||
397 | /* sets triggering number to 1 byte */ | ||
398 | qspi_update(rspi, SPBFCR_TXTRG_MASK, | ||
399 | SPBFCR_TXTRG_1B, QSPI_SPBFCR); | ||
400 | } | ||
401 | |||
402 | return n; | ||
403 | } | ||
404 | |||
405 | static void qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len) | ||
406 | { | ||
407 | unsigned int n; | ||
408 | |||
409 | n = min(len, QSPI_BUFFER_SIZE); | ||
410 | |||
411 | if (len >= QSPI_BUFFER_SIZE) { | ||
412 | /* sets triggering number to 32 bytes */ | ||
413 | qspi_update(rspi, SPBFCR_RXTRG_MASK, | ||
414 | SPBFCR_RXTRG_32B, QSPI_SPBFCR); | ||
415 | } else { | ||
416 | /* sets triggering number to 1 byte */ | ||
417 | qspi_update(rspi, SPBFCR_RXTRG_MASK, | ||
418 | SPBFCR_RXTRG_1B, QSPI_SPBFCR); | ||
419 | } | ||
420 | } | ||
421 | |||
369 | #define set_config_register(spi, n) spi->ops->set_config_register(spi, n) | 422 | #define set_config_register(spi, n) spi->ops->set_config_register(spi, n) |
370 | 423 | ||
371 | static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable) | 424 | static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable) |
@@ -609,19 +662,29 @@ static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi, | |||
609 | return __rspi_can_dma(rspi, xfer); | 662 | return __rspi_can_dma(rspi, xfer); |
610 | } | 663 | } |
611 | 664 | ||
612 | static int rspi_common_transfer(struct rspi_data *rspi, | 665 | static int rspi_dma_check_then_transfer(struct rspi_data *rspi, |
613 | struct spi_transfer *xfer) | 666 | struct spi_transfer *xfer) |
614 | { | 667 | { |
615 | int ret; | ||
616 | |||
617 | if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { | 668 | if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { |
618 | /* rx_buf can be NULL on RSPI on SH in TX-only Mode */ | 669 | /* rx_buf can be NULL on RSPI on SH in TX-only Mode */ |
619 | ret = rspi_dma_transfer(rspi, &xfer->tx_sg, | 670 | int ret = rspi_dma_transfer(rspi, &xfer->tx_sg, |
620 | xfer->rx_buf ? &xfer->rx_sg : NULL); | 671 | xfer->rx_buf ? &xfer->rx_sg : NULL); |
621 | if (ret != -EAGAIN) | 672 | if (ret != -EAGAIN) |
622 | return ret; | 673 | return 0; |
623 | } | 674 | } |
624 | 675 | ||
676 | return -EAGAIN; | ||
677 | } | ||
678 | |||
679 | static int rspi_common_transfer(struct rspi_data *rspi, | ||
680 | struct spi_transfer *xfer) | ||
681 | { | ||
682 | int ret; | ||
683 | |||
684 | ret = rspi_dma_check_then_transfer(rspi, xfer); | ||
685 | if (ret != -EAGAIN) | ||
686 | return ret; | ||
687 | |||
625 | ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len); | 688 | ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len); |
626 | if (ret < 0) | 689 | if (ret < 0) |
627 | return ret; | 690 | return ret; |
@@ -661,12 +724,59 @@ static int rspi_rz_transfer_one(struct spi_master *master, | |||
661 | return rspi_common_transfer(rspi, xfer); | 724 | return rspi_common_transfer(rspi, xfer); |
662 | } | 725 | } |
663 | 726 | ||
727 | static int qspi_trigger_transfer_out_int(struct rspi_data *rspi, const u8 *tx, | ||
728 | u8 *rx, unsigned int len) | ||
729 | { | ||
730 | int i, n, ret; | ||
731 | int error; | ||
732 | |||
733 | while (len > 0) { | ||
734 | n = qspi_set_send_trigger(rspi, len); | ||
735 | qspi_set_receive_trigger(rspi, len); | ||
736 | if (n == QSPI_BUFFER_SIZE) { | ||
737 | error = rspi_wait_for_tx_empty(rspi); | ||
738 | if (error < 0) { | ||
739 | dev_err(&rspi->master->dev, "transmit timeout\n"); | ||
740 | return error; | ||
741 | } | ||
742 | for (i = 0; i < n; i++) | ||
743 | rspi_write_data(rspi, *tx++); | ||
744 | |||
745 | error = rspi_wait_for_rx_full(rspi); | ||
746 | if (error < 0) { | ||
747 | dev_err(&rspi->master->dev, "receive timeout\n"); | ||
748 | return error; | ||
749 | } | ||
750 | for (i = 0; i < n; i++) | ||
751 | *rx++ = rspi_read_data(rspi); | ||
752 | } else { | ||
753 | ret = rspi_pio_transfer(rspi, tx, rx, n); | ||
754 | if (ret < 0) | ||
755 | return ret; | ||
756 | } | ||
757 | len -= n; | ||
758 | } | ||
759 | |||
760 | return 0; | ||
761 | } | ||
762 | |||
664 | static int qspi_transfer_out_in(struct rspi_data *rspi, | 763 | static int qspi_transfer_out_in(struct rspi_data *rspi, |
665 | struct spi_transfer *xfer) | 764 | struct spi_transfer *xfer) |
666 | { | 765 | { |
766 | int ret; | ||
767 | |||
667 | qspi_receive_init(rspi); | 768 | qspi_receive_init(rspi); |
668 | 769 | ||
669 | return rspi_common_transfer(rspi, xfer); | 770 | ret = rspi_dma_check_then_transfer(rspi, xfer); |
771 | if (ret != -EAGAIN) | ||
772 | return ret; | ||
773 | |||
774 | ret = qspi_trigger_transfer_out_int(rspi, xfer->tx_buf, | ||
775 | xfer->rx_buf, xfer->len); | ||
776 | if (ret < 0) | ||
777 | return ret; | ||
778 | |||
779 | return 0; | ||
670 | } | 780 | } |
671 | 781 | ||
672 | static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) | 782 | static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) |
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 9231c34b5a5c..b1c6731fbf27 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -324,7 +324,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) | |||
324 | 324 | ||
325 | /* Acquire DMA channels */ | 325 | /* Acquire DMA channels */ |
326 | sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter, | 326 | sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter, |
327 | (void *)sdd->rx_dma.dmach, dev, "rx"); | 327 | (void *)(long)sdd->rx_dma.dmach, dev, "rx"); |
328 | if (!sdd->rx_dma.ch) { | 328 | if (!sdd->rx_dma.ch) { |
329 | dev_err(dev, "Failed to get RX DMA channel\n"); | 329 | dev_err(dev, "Failed to get RX DMA channel\n"); |
330 | ret = -EBUSY; | 330 | ret = -EBUSY; |
@@ -333,7 +333,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) | |||
333 | spi->dma_rx = sdd->rx_dma.ch; | 333 | spi->dma_rx = sdd->rx_dma.ch; |
334 | 334 | ||
335 | sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, | 335 | sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, |
336 | (void *)sdd->tx_dma.dmach, dev, "tx"); | 336 | (void *)(long)sdd->tx_dma.dmach, dev, "tx"); |
337 | if (!sdd->tx_dma.ch) { | 337 | if (!sdd->tx_dma.ch) { |
338 | dev_err(dev, "Failed to get TX DMA channel\n"); | 338 | dev_err(dev, "Failed to get TX DMA channel\n"); |
339 | ret = -EBUSY; | 339 | ret = -EBUSY; |
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c index 5a56acf8a43e..36af4d48a700 100644 --- a/drivers/spi/spi-sc18is602.c +++ b/drivers/spi/spi-sc18is602.c | |||
@@ -286,7 +286,7 @@ static int sc18is602_probe(struct i2c_client *client, | |||
286 | hw->freq = SC18IS602_CLOCK; | 286 | hw->freq = SC18IS602_CLOCK; |
287 | break; | 287 | break; |
288 | } | 288 | } |
289 | master->bus_num = client->adapter->nr; | 289 | master->bus_num = np ? -1 : client->adapter->nr; |
290 | master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST; | 290 | master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST; |
291 | master->bits_per_word_mask = SPI_BPW_MASK(8); | 291 | master->bits_per_word_mask = SPI_BPW_MASK(8); |
292 | master->setup = sc18is602_setup; | 292 | master->setup = sc18is602_setup; |
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c index 2faeaa7b57a8..f17c0abe299f 100644 --- a/drivers/spi/spi-st-ssc4.c +++ b/drivers/spi/spi-st-ssc4.c | |||
@@ -482,7 +482,7 @@ static const struct dev_pm_ops spi_st_pm = { | |||
482 | SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL) | 482 | SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL) |
483 | }; | 483 | }; |
484 | 484 | ||
485 | static struct of_device_id stm_spi_match[] = { | 485 | static const struct of_device_id stm_spi_match[] = { |
486 | { .compatible = "st,comms-ssc4-spi", }, | 486 | { .compatible = "st,comms-ssc4-spi", }, |
487 | {}, | 487 | {}, |
488 | }; | 488 | }; |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 57a195041dc7..50910d85df5a 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -16,7 +16,6 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/kmod.h> | ||
20 | #include <linux/device.h> | 19 | #include <linux/device.h> |
21 | #include <linux/init.h> | 20 | #include <linux/init.h> |
22 | #include <linux/cache.h> | 21 | #include <linux/cache.h> |
@@ -129,125 +128,11 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
129 | return 0; | 128 | return 0; |
130 | } | 129 | } |
131 | 130 | ||
132 | #ifdef CONFIG_PM_SLEEP | ||
133 | static int spi_legacy_suspend(struct device *dev, pm_message_t message) | ||
134 | { | ||
135 | int value = 0; | ||
136 | struct spi_driver *drv = to_spi_driver(dev->driver); | ||
137 | |||
138 | /* suspend will stop irqs and dma; no more i/o */ | ||
139 | if (drv) { | ||
140 | if (drv->suspend) | ||
141 | value = drv->suspend(to_spi_device(dev), message); | ||
142 | else | ||
143 | dev_dbg(dev, "... can't suspend\n"); | ||
144 | } | ||
145 | return value; | ||
146 | } | ||
147 | |||
148 | static int spi_legacy_resume(struct device *dev) | ||
149 | { | ||
150 | int value = 0; | ||
151 | struct spi_driver *drv = to_spi_driver(dev->driver); | ||
152 | |||
153 | /* resume may restart the i/o queue */ | ||
154 | if (drv) { | ||
155 | if (drv->resume) | ||
156 | value = drv->resume(to_spi_device(dev)); | ||
157 | else | ||
158 | dev_dbg(dev, "... can't resume\n"); | ||
159 | } | ||
160 | return value; | ||
161 | } | ||
162 | |||
163 | static int spi_pm_suspend(struct device *dev) | ||
164 | { | ||
165 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
166 | |||
167 | if (pm) | ||
168 | return pm_generic_suspend(dev); | ||
169 | else | ||
170 | return spi_legacy_suspend(dev, PMSG_SUSPEND); | ||
171 | } | ||
172 | |||
173 | static int spi_pm_resume(struct device *dev) | ||
174 | { | ||
175 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
176 | |||
177 | if (pm) | ||
178 | return pm_generic_resume(dev); | ||
179 | else | ||
180 | return spi_legacy_resume(dev); | ||
181 | } | ||
182 | |||
183 | static int spi_pm_freeze(struct device *dev) | ||
184 | { | ||
185 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
186 | |||
187 | if (pm) | ||
188 | return pm_generic_freeze(dev); | ||
189 | else | ||
190 | return spi_legacy_suspend(dev, PMSG_FREEZE); | ||
191 | } | ||
192 | |||
193 | static int spi_pm_thaw(struct device *dev) | ||
194 | { | ||
195 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
196 | |||
197 | if (pm) | ||
198 | return pm_generic_thaw(dev); | ||
199 | else | ||
200 | return spi_legacy_resume(dev); | ||
201 | } | ||
202 | |||
203 | static int spi_pm_poweroff(struct device *dev) | ||
204 | { | ||
205 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
206 | |||
207 | if (pm) | ||
208 | return pm_generic_poweroff(dev); | ||
209 | else | ||
210 | return spi_legacy_suspend(dev, PMSG_HIBERNATE); | ||
211 | } | ||
212 | |||
213 | static int spi_pm_restore(struct device *dev) | ||
214 | { | ||
215 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
216 | |||
217 | if (pm) | ||
218 | return pm_generic_restore(dev); | ||
219 | else | ||
220 | return spi_legacy_resume(dev); | ||
221 | } | ||
222 | #else | ||
223 | #define spi_pm_suspend NULL | ||
224 | #define spi_pm_resume NULL | ||
225 | #define spi_pm_freeze NULL | ||
226 | #define spi_pm_thaw NULL | ||
227 | #define spi_pm_poweroff NULL | ||
228 | #define spi_pm_restore NULL | ||
229 | #endif | ||
230 | |||
231 | static const struct dev_pm_ops spi_pm = { | ||
232 | .suspend = spi_pm_suspend, | ||
233 | .resume = spi_pm_resume, | ||
234 | .freeze = spi_pm_freeze, | ||
235 | .thaw = spi_pm_thaw, | ||
236 | .poweroff = spi_pm_poweroff, | ||
237 | .restore = spi_pm_restore, | ||
238 | SET_RUNTIME_PM_OPS( | ||
239 | pm_generic_runtime_suspend, | ||
240 | pm_generic_runtime_resume, | ||
241 | NULL | ||
242 | ) | ||
243 | }; | ||
244 | |||
245 | struct bus_type spi_bus_type = { | 131 | struct bus_type spi_bus_type = { |
246 | .name = "spi", | 132 | .name = "spi", |
247 | .dev_groups = spi_dev_groups, | 133 | .dev_groups = spi_dev_groups, |
248 | .match = spi_match_device, | 134 | .match = spi_match_device, |
249 | .uevent = spi_uevent, | 135 | .uevent = spi_uevent, |
250 | .pm = &spi_pm, | ||
251 | }; | 136 | }; |
252 | EXPORT_SYMBOL_GPL(spi_bus_type); | 137 | EXPORT_SYMBOL_GPL(spi_bus_type); |
253 | 138 | ||
@@ -698,6 +583,15 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) | |||
698 | rx_dev = master->dma_rx->device->dev; | 583 | rx_dev = master->dma_rx->device->dev; |
699 | 584 | ||
700 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | 585 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
586 | /* | ||
587 | * Restore the original value of tx_buf or rx_buf if they are | ||
588 | * NULL. | ||
589 | */ | ||
590 | if (xfer->tx_buf == master->dummy_tx) | ||
591 | xfer->tx_buf = NULL; | ||
592 | if (xfer->rx_buf == master->dummy_rx) | ||
593 | xfer->rx_buf = NULL; | ||
594 | |||
701 | if (!master->can_dma(master, msg->spi, xfer)) | 595 | if (!master->can_dma(master, msg->spi, xfer)) |
702 | continue; | 596 | continue; |
703 | 597 | ||
@@ -851,6 +745,9 @@ out: | |||
851 | if (msg->status == -EINPROGRESS) | 745 | if (msg->status == -EINPROGRESS) |
852 | msg->status = ret; | 746 | msg->status = ret; |
853 | 747 | ||
748 | if (msg->status && master->handle_err) | ||
749 | master->handle_err(master, msg); | ||
750 | |||
854 | spi_finalize_current_message(master); | 751 | spi_finalize_current_message(master); |
855 | 752 | ||
856 | return ret; | 753 | return ret; |
@@ -1360,7 +1257,6 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc) | |||
1360 | spi->dev.of_node = nc; | 1257 | spi->dev.of_node = nc; |
1361 | 1258 | ||
1362 | /* Register the new device */ | 1259 | /* Register the new device */ |
1363 | request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias); | ||
1364 | rc = spi_add_device(spi); | 1260 | rc = spi_add_device(spi); |
1365 | if (rc) { | 1261 | if (rc) { |
1366 | dev_err(&master->dev, "spi_device register error %s\n", | 1262 | dev_err(&master->dev, "spi_device register error %s\n", |
@@ -1894,6 +1790,8 @@ int spi_setup(struct spi_device *spi) | |||
1894 | if (!spi->max_speed_hz) | 1790 | if (!spi->max_speed_hz) |
1895 | spi->max_speed_hz = spi->master->max_speed_hz; | 1791 | spi->max_speed_hz = spi->master->max_speed_hz; |
1896 | 1792 | ||
1793 | spi_set_cs(spi, false); | ||
1794 | |||
1897 | if (spi->master->setup) | 1795 | if (spi->master->setup) |
1898 | status = spi->master->setup(spi); | 1796 | status = spi->master->setup(spi); |
1899 | 1797 | ||
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 4eb7a980e670..92c909eed6b5 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c | |||
@@ -223,7 +223,7 @@ static int spidev_message(struct spidev_data *spidev, | |||
223 | struct spi_transfer *k_xfers; | 223 | struct spi_transfer *k_xfers; |
224 | struct spi_transfer *k_tmp; | 224 | struct spi_transfer *k_tmp; |
225 | struct spi_ioc_transfer *u_tmp; | 225 | struct spi_ioc_transfer *u_tmp; |
226 | unsigned n, total; | 226 | unsigned n, total, tx_total, rx_total; |
227 | u8 *tx_buf, *rx_buf; | 227 | u8 *tx_buf, *rx_buf; |
228 | int status = -EFAULT; | 228 | int status = -EFAULT; |
229 | 229 | ||
@@ -239,33 +239,52 @@ static int spidev_message(struct spidev_data *spidev, | |||
239 | tx_buf = spidev->tx_buffer; | 239 | tx_buf = spidev->tx_buffer; |
240 | rx_buf = spidev->rx_buffer; | 240 | rx_buf = spidev->rx_buffer; |
241 | total = 0; | 241 | total = 0; |
242 | tx_total = 0; | ||
243 | rx_total = 0; | ||
242 | for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; | 244 | for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; |
243 | n; | 245 | n; |
244 | n--, k_tmp++, u_tmp++) { | 246 | n--, k_tmp++, u_tmp++) { |
245 | k_tmp->len = u_tmp->len; | 247 | k_tmp->len = u_tmp->len; |
246 | 248 | ||
247 | total += k_tmp->len; | 249 | total += k_tmp->len; |
248 | if (total > bufsiz) { | 250 | /* Since the function returns the total length of transfers |
251 | * on success, restrict the total to positive int values to | ||
252 | * avoid the return value looking like an error. Also check | ||
253 | * each transfer length to avoid arithmetic overflow. | ||
254 | */ | ||
255 | if (total > INT_MAX || k_tmp->len > INT_MAX) { | ||
249 | status = -EMSGSIZE; | 256 | status = -EMSGSIZE; |
250 | goto done; | 257 | goto done; |
251 | } | 258 | } |
252 | 259 | ||
253 | if (u_tmp->rx_buf) { | 260 | if (u_tmp->rx_buf) { |
261 | /* this transfer needs space in RX bounce buffer */ | ||
262 | rx_total += k_tmp->len; | ||
263 | if (rx_total > bufsiz) { | ||
264 | status = -EMSGSIZE; | ||
265 | goto done; | ||
266 | } | ||
254 | k_tmp->rx_buf = rx_buf; | 267 | k_tmp->rx_buf = rx_buf; |
255 | if (!access_ok(VERIFY_WRITE, (u8 __user *) | 268 | if (!access_ok(VERIFY_WRITE, (u8 __user *) |
256 | (uintptr_t) u_tmp->rx_buf, | 269 | (uintptr_t) u_tmp->rx_buf, |
257 | u_tmp->len)) | 270 | u_tmp->len)) |
258 | goto done; | 271 | goto done; |
272 | rx_buf += k_tmp->len; | ||
259 | } | 273 | } |
260 | if (u_tmp->tx_buf) { | 274 | if (u_tmp->tx_buf) { |
275 | /* this transfer needs space in TX bounce buffer */ | ||
276 | tx_total += k_tmp->len; | ||
277 | if (tx_total > bufsiz) { | ||
278 | status = -EMSGSIZE; | ||
279 | goto done; | ||
280 | } | ||
261 | k_tmp->tx_buf = tx_buf; | 281 | k_tmp->tx_buf = tx_buf; |
262 | if (copy_from_user(tx_buf, (const u8 __user *) | 282 | if (copy_from_user(tx_buf, (const u8 __user *) |
263 | (uintptr_t) u_tmp->tx_buf, | 283 | (uintptr_t) u_tmp->tx_buf, |
264 | u_tmp->len)) | 284 | u_tmp->len)) |
265 | goto done; | 285 | goto done; |
286 | tx_buf += k_tmp->len; | ||
266 | } | 287 | } |
267 | tx_buf += k_tmp->len; | ||
268 | rx_buf += k_tmp->len; | ||
269 | 288 | ||
270 | k_tmp->cs_change = !!u_tmp->cs_change; | 289 | k_tmp->cs_change = !!u_tmp->cs_change; |
271 | k_tmp->tx_nbits = u_tmp->tx_nbits; | 290 | k_tmp->tx_nbits = u_tmp->tx_nbits; |
@@ -303,8 +322,8 @@ static int spidev_message(struct spidev_data *spidev, | |||
303 | status = -EFAULT; | 322 | status = -EFAULT; |
304 | goto done; | 323 | goto done; |
305 | } | 324 | } |
325 | rx_buf += u_tmp->len; | ||
306 | } | 326 | } |
307 | rx_buf += u_tmp->len; | ||
308 | } | 327 | } |
309 | status = total; | 328 | status = total; |
310 | 329 | ||
@@ -684,6 +703,14 @@ static const struct file_operations spidev_fops = { | |||
684 | 703 | ||
685 | static struct class *spidev_class; | 704 | static struct class *spidev_class; |
686 | 705 | ||
706 | #ifdef CONFIG_OF | ||
707 | static const struct of_device_id spidev_dt_ids[] = { | ||
708 | { .compatible = "rohm,dh2228fv" }, | ||
709 | {}, | ||
710 | }; | ||
711 | MODULE_DEVICE_TABLE(of, spidev_dt_ids); | ||
712 | #endif | ||
713 | |||
687 | /*-------------------------------------------------------------------------*/ | 714 | /*-------------------------------------------------------------------------*/ |
688 | 715 | ||
689 | static int spidev_probe(struct spi_device *spi) | 716 | static int spidev_probe(struct spi_device *spi) |
@@ -692,6 +719,17 @@ static int spidev_probe(struct spi_device *spi) | |||
692 | int status; | 719 | int status; |
693 | unsigned long minor; | 720 | unsigned long minor; |
694 | 721 | ||
722 | /* | ||
723 | * spidev should never be referenced in DT without a specific | ||
724 | * compatbile string, it is a Linux implementation thing | ||
725 | * rather than a description of the hardware. | ||
726 | */ | ||
727 | if (spi->dev.of_node && !of_match_device(spidev_dt_ids, &spi->dev)) { | ||
728 | dev_err(&spi->dev, "buggy DT: spidev listed directly in DT\n"); | ||
729 | WARN_ON(spi->dev.of_node && | ||
730 | !of_match_device(spidev_dt_ids, &spi->dev)); | ||
731 | } | ||
732 | |||
695 | /* Allocate driver data */ | 733 | /* Allocate driver data */ |
696 | spidev = kzalloc(sizeof(*spidev), GFP_KERNEL); | 734 | spidev = kzalloc(sizeof(*spidev), GFP_KERNEL); |
697 | if (!spidev) | 735 | if (!spidev) |
@@ -758,13 +796,6 @@ static int spidev_remove(struct spi_device *spi) | |||
758 | return 0; | 796 | return 0; |
759 | } | 797 | } |
760 | 798 | ||
761 | static const struct of_device_id spidev_dt_ids[] = { | ||
762 | { .compatible = "rohm,dh2228fv" }, | ||
763 | {}, | ||
764 | }; | ||
765 | |||
766 | MODULE_DEVICE_TABLE(of, spidev_dt_ids); | ||
767 | |||
768 | static struct spi_driver spidev_spi_driver = { | 799 | static struct spi_driver spidev_spi_driver = { |
769 | .driver = { | 800 | .driver = { |
770 | .name = "spidev", | 801 | .name = "spidev", |
diff --git a/include/linux/intel_mid_dma.h b/include/linux/intel_mid_dma.h deleted file mode 100644 index 10496bd24c5c..000000000000 --- a/include/linux/intel_mid_dma.h +++ /dev/null | |||
@@ -1,76 +0,0 @@ | |||
1 | /* | ||
2 | * intel_mid_dma.h - Intel MID DMA Drivers | ||
3 | * | ||
4 | * Copyright (C) 2008-10 Intel Corp | ||
5 | * Author: Vinod Koul <vinod.koul@intel.com> | ||
6 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; version 2 of the License. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
20 | * | ||
21 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
22 | * | ||
23 | * | ||
24 | */ | ||
25 | #ifndef __INTEL_MID_DMA_H__ | ||
26 | #define __INTEL_MID_DMA_H__ | ||
27 | |||
28 | #include <linux/dmaengine.h> | ||
29 | |||
30 | #define DMA_PREP_CIRCULAR_LIST (1 << 10) | ||
31 | |||
32 | /*DMA mode configurations*/ | ||
33 | enum intel_mid_dma_mode { | ||
34 | LNW_DMA_PER_TO_MEM = 0, /*periphral to memory configuration*/ | ||
35 | LNW_DMA_MEM_TO_PER, /*memory to periphral configuration*/ | ||
36 | LNW_DMA_MEM_TO_MEM, /*mem to mem confg (testing only)*/ | ||
37 | }; | ||
38 | |||
39 | /*DMA handshaking*/ | ||
40 | enum intel_mid_dma_hs_mode { | ||
41 | LNW_DMA_HW_HS = 0, /*HW Handshaking only*/ | ||
42 | LNW_DMA_SW_HS = 1, /*SW Handshaking not recommended*/ | ||
43 | }; | ||
44 | |||
45 | /*Burst size configuration*/ | ||
46 | enum intel_mid_dma_msize { | ||
47 | LNW_DMA_MSIZE_1 = 0x0, | ||
48 | LNW_DMA_MSIZE_4 = 0x1, | ||
49 | LNW_DMA_MSIZE_8 = 0x2, | ||
50 | LNW_DMA_MSIZE_16 = 0x3, | ||
51 | LNW_DMA_MSIZE_32 = 0x4, | ||
52 | LNW_DMA_MSIZE_64 = 0x5, | ||
53 | }; | ||
54 | |||
55 | /** | ||
56 | * struct intel_mid_dma_slave - DMA slave structure | ||
57 | * | ||
58 | * @dirn: DMA trf direction | ||
59 | * @src_width: tx register width | ||
60 | * @dst_width: rx register width | ||
61 | * @hs_mode: HW/SW handshaking mode | ||
62 | * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem) | ||
63 | * @src_msize: Source DMA burst size | ||
64 | * @dst_msize: Dst DMA burst size | ||
65 | * @per_addr: Periphral address | ||
66 | * @device_instance: DMA peripheral device instance, we can have multiple | ||
67 | * peripheral device connected to single DMAC | ||
68 | */ | ||
69 | struct intel_mid_dma_slave { | ||
70 | enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/ | ||
71 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ | ||
72 | unsigned int device_instance; /*0, 1 for periphral instance*/ | ||
73 | struct dma_slave_config dma_slave; | ||
74 | }; | ||
75 | |||
76 | #endif /*__INTEL_MID_DMA_H__*/ | ||
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 856d34dde79b..d673072346f2 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -162,8 +162,6 @@ struct spi_transfer; | |||
162 | * @remove: Unbinds this driver from the spi device | 162 | * @remove: Unbinds this driver from the spi device |
163 | * @shutdown: Standard shutdown callback used during system state | 163 | * @shutdown: Standard shutdown callback used during system state |
164 | * transitions such as powerdown/halt and kexec | 164 | * transitions such as powerdown/halt and kexec |
165 | * @suspend: Standard suspend callback used during system state transitions | ||
166 | * @resume: Standard resume callback used during system state transitions | ||
167 | * @driver: SPI device drivers should initialize the name and owner | 165 | * @driver: SPI device drivers should initialize the name and owner |
168 | * field of this structure. | 166 | * field of this structure. |
169 | * | 167 | * |
@@ -184,8 +182,6 @@ struct spi_driver { | |||
184 | int (*probe)(struct spi_device *spi); | 182 | int (*probe)(struct spi_device *spi); |
185 | int (*remove)(struct spi_device *spi); | 183 | int (*remove)(struct spi_device *spi); |
186 | void (*shutdown)(struct spi_device *spi); | 184 | void (*shutdown)(struct spi_device *spi); |
187 | int (*suspend)(struct spi_device *spi, pm_message_t mesg); | ||
188 | int (*resume)(struct spi_device *spi); | ||
189 | struct device_driver driver; | 185 | struct device_driver driver; |
190 | }; | 186 | }; |
191 | 187 | ||
@@ -294,6 +290,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
294 | * transfer_one_message are mutually exclusive; when both | 290 | * transfer_one_message are mutually exclusive; when both |
295 | * are set, the generic subsystem does not call your | 291 | * are set, the generic subsystem does not call your |
296 | * transfer_one callback. | 292 | * transfer_one callback. |
293 | * @handle_err: the subsystem calls the driver to handle an error that occurs | ||
294 | * in the generic implementation of transfer_one_message(). | ||
297 | * @unprepare_message: undo any work done by prepare_message(). | 295 | * @unprepare_message: undo any work done by prepare_message(). |
298 | * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS | 296 | * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS |
299 | * number. Any individual value may be -ENOENT for CS lines that | 297 | * number. Any individual value may be -ENOENT for CS lines that |
@@ -448,6 +446,8 @@ struct spi_master { | |||
448 | void (*set_cs)(struct spi_device *spi, bool enable); | 446 | void (*set_cs)(struct spi_device *spi, bool enable); |
449 | int (*transfer_one)(struct spi_master *master, struct spi_device *spi, | 447 | int (*transfer_one)(struct spi_master *master, struct spi_device *spi, |
450 | struct spi_transfer *transfer); | 448 | struct spi_transfer *transfer); |
449 | void (*handle_err)(struct spi_master *master, | ||
450 | struct spi_message *message); | ||
451 | 451 | ||
452 | /* gpio chip select */ | 452 | /* gpio chip select */ |
453 | int *cs_gpios; | 453 | int *cs_gpios; |