aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt22
-rw-r--r--Documentation/devicetree/bindings/spi/spi_nvidia.txt5
-rw-r--r--Documentation/spi/ep93xx_spi10
-rw-r--r--Documentation/spi/pxa2xx5
-rw-r--r--arch/arm/mach-ep93xx/Makefile4
-rw-r--r--arch/arm/mach-ep93xx/core.c6
-rw-r--r--arch/arm/mach-ep93xx/dma-m2p.c411
-rw-r--r--arch/arm/mach-ep93xx/dma.c108
-rw-r--r--arch/arm/mach-ep93xx/include/mach/dma.h190
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h2
-rw-r--r--arch/arm/mach-imx/clock-imx25.c7
-rw-r--r--arch/arm/mach-mx5/clock-mx51-mx53.c11
-rw-r--r--arch/arm/mach-s3c2410/include/mach/spi-gpio.h28
-rw-r--r--arch/arm/mach-s3c2410/mach-qt2410.c34
-rw-r--r--arch/arm/mach-s3c2412/mach-jive.c41
-rw-r--r--arch/arm/mach-s3c2440/mach-gta02.c1
-rw-r--r--arch/arm/plat-mxc/devices/platform-spi_imx.c12
-rw-r--r--drivers/dma/Kconfig7
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/ep93xx_dma.c1355
-rw-r--r--drivers/spi/Kconfig48
-rw-r--r--drivers/spi/Makefile111
-rw-r--r--drivers/spi/atmel_spi.h167
-rw-r--r--drivers/spi/spi-altera.c (renamed from drivers/spi/spi_altera.c)0
-rw-r--r--drivers/spi/spi-ath79.c (renamed from drivers/spi/ath79_spi.c)2
-rw-r--r--drivers/spi/spi-atmel.c (renamed from drivers/spi/atmel_spi.c)155
-rw-r--r--drivers/spi/spi-au1550.c (renamed from drivers/spi/au1550_spi.c)2
-rw-r--r--drivers/spi/spi-bfin-sport.c (renamed from drivers/spi/spi_bfin_sport.c)0
-rw-r--r--drivers/spi/spi-bfin5xx.c (renamed from drivers/spi/spi_bfin5xx.c)218
-rw-r--r--drivers/spi/spi-bitbang-txrx.h (renamed from drivers/spi/spi_bitbang_txrx.h)0
-rw-r--r--drivers/spi/spi-bitbang.c (renamed from drivers/spi/spi_bitbang.c)8
-rw-r--r--drivers/spi/spi-butterfly.c (renamed from drivers/spi/spi_butterfly.c)4
-rw-r--r--drivers/spi/spi-coldfire-qspi.c (renamed from drivers/spi/coldfire_qspi.c)0
-rw-r--r--drivers/spi/spi-davinci.c (renamed from drivers/spi/davinci_spi.c)0
-rw-r--r--drivers/spi/spi-dw-mid.c (renamed from drivers/spi/dw_spi_mid.c)4
-rw-r--r--drivers/spi/spi-dw-mmio.c (renamed from drivers/spi/dw_spi_mmio.c)4
-rw-r--r--drivers/spi/spi-dw-pci.c (renamed from drivers/spi/dw_spi_pci.c)4
-rw-r--r--drivers/spi/spi-dw.c (renamed from drivers/spi/dw_spi.c)8
-rw-r--r--drivers/spi/spi-dw.h (renamed from drivers/spi/dw_spi.h)1
-rw-r--r--drivers/spi/spi-ep93xx.c (renamed from drivers/spi/ep93xx_spi.c)303
-rw-r--r--drivers/spi/spi-fsl-espi.c (renamed from drivers/spi/spi_fsl_espi.c)2
-rw-r--r--drivers/spi/spi-fsl-lib.c (renamed from drivers/spi/spi_fsl_lib.c)2
-rw-r--r--drivers/spi/spi-fsl-lib.h (renamed from drivers/spi/spi_fsl_lib.h)0
-rw-r--r--drivers/spi/spi-fsl-spi.c (renamed from drivers/spi/spi_fsl_spi.c)30
-rw-r--r--drivers/spi/spi-gpio.c (renamed from drivers/spi/spi_gpio.c)6
-rw-r--r--drivers/spi/spi-imx.c (renamed from drivers/spi/spi_imx.c)466
-rw-r--r--drivers/spi/spi-lm70llp.c (renamed from drivers/spi/spi_lm70llp.c)4
-rw-r--r--drivers/spi/spi-mpc512x-psc.c (renamed from drivers/spi/mpc512x_psc_spi.c)0
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c (renamed from drivers/spi/mpc52xx_psc_spi.c)0
-rw-r--r--drivers/spi/spi-mpc52xx.c (renamed from drivers/spi/mpc52xx_spi.c)0
-rw-r--r--drivers/spi/spi-nuc900.c (renamed from drivers/spi/spi_nuc900.c)5
-rw-r--r--drivers/spi/spi-oc-tiny.c (renamed from drivers/spi/spi_oc_tiny.c)0
-rw-r--r--drivers/spi/spi-omap-100k.c (renamed from drivers/spi/omap_spi_100k.c)0
-rw-r--r--drivers/spi/spi-omap-uwire.c (renamed from drivers/spi/omap_uwire.c)2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c (renamed from drivers/spi/omap2_mcspi.c)10
-rw-r--r--drivers/spi/spi-orion.c (renamed from drivers/spi/orion_spi.c)8
-rw-r--r--drivers/spi/spi-pl022.c (renamed from drivers/spi/amba-pl022.c)111
-rw-r--r--drivers/spi/spi-ppc4xx.c (renamed from drivers/spi/spi_ppc4xx.c)2
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c (renamed from drivers/spi/pxa2xx_spi_pci.c)0
-rw-r--r--drivers/spi/spi-pxa2xx.c (renamed from drivers/spi/pxa2xx_spi.c)0
-rw-r--r--drivers/spi/spi-s3c24xx-fiq.S (renamed from drivers/spi/spi_s3c24xx_fiq.S)2
-rw-r--r--drivers/spi/spi-s3c24xx-fiq.h (renamed from drivers/spi/spi_s3c24xx_fiq.h)0
-rw-r--r--drivers/spi/spi-s3c24xx.c (renamed from drivers/spi/spi_s3c24xx.c)5
-rw-r--r--drivers/spi/spi-s3c64xx.c (renamed from drivers/spi/spi_s3c64xx.c)3
-rw-r--r--drivers/spi/spi-sh-msiof.c (renamed from drivers/spi/spi_sh_msiof.c)0
-rw-r--r--drivers/spi/spi-sh-sci.c (renamed from drivers/spi/spi_sh_sci.c)2
-rw-r--r--drivers/spi/spi-sh.c (renamed from drivers/spi/spi_sh.c)0
-rw-r--r--drivers/spi/spi-stmp.c (renamed from drivers/spi/spi_stmp.c)0
-rw-r--r--drivers/spi/spi-tegra.c (renamed from drivers/spi/spi_tegra.c)20
-rw-r--r--drivers/spi/spi-ti-ssp.c (renamed from drivers/spi/ti-ssp-spi.c)0
-rw-r--r--drivers/spi/spi-tle62x0.c (renamed from drivers/spi/tle62x0.c)2
-rw-r--r--drivers/spi/spi-topcliff-pch.c (renamed from drivers/spi/spi_topcliff_pch.c)1158
-rw-r--r--drivers/spi/spi-txx9.c (renamed from drivers/spi/spi_txx9.c)2
-rw-r--r--drivers/spi/spi-xilinx.c (renamed from drivers/spi/xilinx_spi.c)0
-rw-r--r--drivers/spi/spi.c2
-rw-r--r--drivers/spi/spi_s3c24xx_gpio.c201
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--sound/soc/ep93xx/ep93xx-ac97.c4
-rw-r--r--sound/soc/ep93xx/ep93xx-i2s.c4
-rw-r--r--sound/soc/ep93xx/ep93xx-pcm.c137
80 files changed, 3494 insertions, 1997 deletions
diff --git a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
new file mode 100644
index 000000000000..9841057d112b
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
@@ -0,0 +1,22 @@
1* Freescale (Enhanced) Configurable Serial Peripheral Interface
2 (CSPI/eCSPI) for i.MX
3
4Required properties:
5- compatible : Should be "fsl,<soc>-cspi" or "fsl,<soc>-ecspi"
6- reg : Offset and length of the register set for the device
7- interrupts : Should contain CSPI/eCSPI interrupt
8- fsl,spi-num-chipselects : Contains the number of the chipselect
9- cs-gpios : Specifies the gpio pins to be used for chipselects.
10
11Example:
12
13ecspi@70010000 {
14 #address-cells = <1>;
15 #size-cells = <0>;
16 compatible = "fsl,imx51-ecspi";
17 reg = <0x70010000 0x4000>;
18 interrupts = <36>;
19 fsl,spi-num-chipselects = <2>;
20 cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
21 <&gpio3 25 0>; /* GPIO4_25 */
22};
diff --git a/Documentation/devicetree/bindings/spi/spi_nvidia.txt b/Documentation/devicetree/bindings/spi/spi_nvidia.txt
new file mode 100644
index 000000000000..6b9e51896693
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi_nvidia.txt
@@ -0,0 +1,5 @@
1NVIDIA Tegra 2 SPI device
2
3Required properties:
4- compatible : should be "nvidia,tegra20-spi".
5- gpios : should specify GPIOs used for chipselect.
diff --git a/Documentation/spi/ep93xx_spi b/Documentation/spi/ep93xx_spi
index 6325f5b48635..d8eb01c15db1 100644
--- a/Documentation/spi/ep93xx_spi
+++ b/Documentation/spi/ep93xx_spi
@@ -88,6 +88,16 @@ static void __init ts72xx_init_machine(void)
88 ARRAY_SIZE(ts72xx_spi_devices)); 88 ARRAY_SIZE(ts72xx_spi_devices));
89} 89}
90 90
91The driver can use DMA for the transfers also. In this case ts72xx_spi_info
92becomes:
93
94static struct ep93xx_spi_info ts72xx_spi_info = {
95 .num_chipselect = ARRAY_SIZE(ts72xx_spi_devices),
96 .use_dma = true;
97};
98
99Note that CONFIG_EP93XX_DMA should be enabled as well.
100
91Thanks to 101Thanks to
92========= 102=========
93Martin Guy, H. Hartley Sweeten and others who helped me during development of 103Martin Guy, H. Hartley Sweeten and others who helped me during development of
diff --git a/Documentation/spi/pxa2xx b/Documentation/spi/pxa2xx
index 493dada57372..00511e08db78 100644
--- a/Documentation/spi/pxa2xx
+++ b/Documentation/spi/pxa2xx
@@ -22,15 +22,11 @@ Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a
22found in include/linux/spi/pxa2xx_spi.h: 22found in include/linux/spi/pxa2xx_spi.h:
23 23
24struct pxa2xx_spi_master { 24struct pxa2xx_spi_master {
25 enum pxa_ssp_type ssp_type;
26 u32 clock_enable; 25 u32 clock_enable;
27 u16 num_chipselect; 26 u16 num_chipselect;
28 u8 enable_dma; 27 u8 enable_dma;
29}; 28};
30 29
31The "pxa2xx_spi_master.ssp_type" field must have a value between 1 and 3 and
32informs the driver which features a particular SSP supports.
33
34The "pxa2xx_spi_master.clock_enable" field is used to enable/disable the 30The "pxa2xx_spi_master.clock_enable" field is used to enable/disable the
35corresponding SSP peripheral block in the "Clock Enable Register (CKEN"). See 31corresponding SSP peripheral block in the "Clock Enable Register (CKEN"). See
36the "PXA2xx Developer Manual" section "Clocks and Power Management". 32the "PXA2xx Developer Manual" section "Clocks and Power Management".
@@ -61,7 +57,6 @@ static struct resource pxa_spi_nssp_resources[] = {
61}; 57};
62 58
63static struct pxa2xx_spi_master pxa_nssp_master_info = { 59static struct pxa2xx_spi_master pxa_nssp_master_info = {
64 .ssp_type = PXA25x_NSSP, /* Type of SSP */
65 .clock_enable = CKEN_NSSP, /* NSSP Peripheral clock */ 60 .clock_enable = CKEN_NSSP, /* NSSP Peripheral clock */
66 .num_chipselect = 1, /* Matches the number of chips attached to NSSP */ 61 .num_chipselect = 1, /* Matches the number of chips attached to NSSP */
67 .enable_dma = 1, /* Enables NSSP DMA */ 62 .enable_dma = 1, /* Enables NSSP DMA */
diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile
index 6b7c41d155df..3cedcf2d39e5 100644
--- a/arch/arm/mach-ep93xx/Makefile
+++ b/arch/arm/mach-ep93xx/Makefile
@@ -1,11 +1,13 @@
1# 1#
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4obj-y := core.o clock.o dma-m2p.o 4obj-y := core.o clock.o
5obj-m := 5obj-m :=
6obj-n := 6obj-n :=
7obj- := 7obj- :=
8 8
9obj-$(CONFIG_EP93XX_DMA) += dma.o
10
9obj-$(CONFIG_MACH_ADSSPHERE) += adssphere.o 11obj-$(CONFIG_MACH_ADSSPHERE) += adssphere.o
10obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o 12obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o
11obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o 13obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index c488e4bd61e7..c60f081e930b 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -506,11 +506,15 @@ static struct resource ep93xx_spi_resources[] = {
506 }, 506 },
507}; 507};
508 508
509static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32);
510
509static struct platform_device ep93xx_spi_device = { 511static struct platform_device ep93xx_spi_device = {
510 .name = "ep93xx-spi", 512 .name = "ep93xx-spi",
511 .id = 0, 513 .id = 0,
512 .dev = { 514 .dev = {
513 .platform_data = &ep93xx_spi_master_data, 515 .platform_data = &ep93xx_spi_master_data,
516 .coherent_dma_mask = DMA_BIT_MASK(32),
517 .dma_mask = &ep93xx_spi_dma_mask,
514 }, 518 },
515 .num_resources = ARRAY_SIZE(ep93xx_spi_resources), 519 .num_resources = ARRAY_SIZE(ep93xx_spi_resources),
516 .resource = ep93xx_spi_resources, 520 .resource = ep93xx_spi_resources,
diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c
deleted file mode 100644
index a696d354b1f8..000000000000
--- a/arch/arm/mach-ep93xx/dma-m2p.c
+++ /dev/null
@@ -1,411 +0,0 @@
1/*
2 * arch/arm/mach-ep93xx/dma-m2p.c
3 * M2P DMA handling for Cirrus EP93xx chips.
4 *
5 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Copyright (C) 2006 Applied Data Systems
7 *
8 * Copyright (C) 2009 Ryan Mallon <ryan@bluewatersys.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 */
15
16/*
17 * On the EP93xx chip the following peripherals my be allocated to the 10
18 * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
19 *
20 * I2S contains 3 Tx and 3 Rx DMA Channels
21 * AAC contains 3 Tx and 3 Rx DMA Channels
22 * UART1 contains 1 Tx and 1 Rx DMA Channels
23 * UART2 contains 1 Tx and 1 Rx DMA Channels
24 * UART3 contains 1 Tx and 1 Rx DMA Channels
25 * IrDA contains 1 Tx and 1 Rx DMA Channels
26 *
27 * SSP and IDE use the Memory to Memory (M2M) channels and are not covered
28 * with this implementation.
29 */
30
31#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
32
33#include <linux/kernel.h>
34#include <linux/clk.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/module.h>
38#include <linux/io.h>
39
40#include <mach/dma.h>
41#include <mach/hardware.h>
42
43#define M2P_CONTROL 0x00
44#define M2P_CONTROL_STALL_IRQ_EN (1 << 0)
45#define M2P_CONTROL_NFB_IRQ_EN (1 << 1)
46#define M2P_CONTROL_ERROR_IRQ_EN (1 << 3)
47#define M2P_CONTROL_ENABLE (1 << 4)
48#define M2P_INTERRUPT 0x04
49#define M2P_INTERRUPT_STALL (1 << 0)
50#define M2P_INTERRUPT_NFB (1 << 1)
51#define M2P_INTERRUPT_ERROR (1 << 3)
52#define M2P_PPALLOC 0x08
53#define M2P_STATUS 0x0c
54#define M2P_REMAIN 0x14
55#define M2P_MAXCNT0 0x20
56#define M2P_BASE0 0x24
57#define M2P_MAXCNT1 0x30
58#define M2P_BASE1 0x34
59
60#define STATE_IDLE 0 /* Channel is inactive. */
61#define STATE_STALL 1 /* Channel is active, no buffers pending. */
62#define STATE_ON 2 /* Channel is active, one buffer pending. */
63#define STATE_NEXT 3 /* Channel is active, two buffers pending. */
64
65struct m2p_channel {
66 char *name;
67 void __iomem *base;
68 int irq;
69
70 struct clk *clk;
71 spinlock_t lock;
72
73 void *client;
74 unsigned next_slot:1;
75 struct ep93xx_dma_buffer *buffer_xfer;
76 struct ep93xx_dma_buffer *buffer_next;
77 struct list_head buffers_pending;
78};
79
80static struct m2p_channel m2p_rx[] = {
81 {"m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1},
82 {"m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3},
83 {"m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5},
84 {"m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7},
85 {"m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9},
86 {NULL},
87};
88
89static struct m2p_channel m2p_tx[] = {
90 {"m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0},
91 {"m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2},
92 {"m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4},
93 {"m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6},
94 {"m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8},
95 {NULL},
96};
97
98static void feed_buf(struct m2p_channel *ch, struct ep93xx_dma_buffer *buf)
99{
100 if (ch->next_slot == 0) {
101 writel(buf->size, ch->base + M2P_MAXCNT0);
102 writel(buf->bus_addr, ch->base + M2P_BASE0);
103 } else {
104 writel(buf->size, ch->base + M2P_MAXCNT1);
105 writel(buf->bus_addr, ch->base + M2P_BASE1);
106 }
107 ch->next_slot ^= 1;
108}
109
110static void choose_buffer_xfer(struct m2p_channel *ch)
111{
112 struct ep93xx_dma_buffer *buf;
113
114 ch->buffer_xfer = NULL;
115 if (!list_empty(&ch->buffers_pending)) {
116 buf = list_entry(ch->buffers_pending.next,
117 struct ep93xx_dma_buffer, list);
118 list_del(&buf->list);
119 feed_buf(ch, buf);
120 ch->buffer_xfer = buf;
121 }
122}
123
124static void choose_buffer_next(struct m2p_channel *ch)
125{
126 struct ep93xx_dma_buffer *buf;
127
128 ch->buffer_next = NULL;
129 if (!list_empty(&ch->buffers_pending)) {
130 buf = list_entry(ch->buffers_pending.next,
131 struct ep93xx_dma_buffer, list);
132 list_del(&buf->list);
133 feed_buf(ch, buf);
134 ch->buffer_next = buf;
135 }
136}
137
138static inline void m2p_set_control(struct m2p_channel *ch, u32 v)
139{
140 /*
141 * The control register must be read immediately after being written so
142 * that the internal state machine is correctly updated. See the ep93xx
143 * users' guide for details.
144 */
145 writel(v, ch->base + M2P_CONTROL);
146 readl(ch->base + M2P_CONTROL);
147}
148
149static inline int m2p_channel_state(struct m2p_channel *ch)
150{
151 return (readl(ch->base + M2P_STATUS) >> 4) & 0x3;
152}
153
154static irqreturn_t m2p_irq(int irq, void *dev_id)
155{
156 struct m2p_channel *ch = dev_id;
157 struct ep93xx_dma_m2p_client *cl;
158 u32 irq_status, v;
159 int error = 0;
160
161 cl = ch->client;
162
163 spin_lock(&ch->lock);
164 irq_status = readl(ch->base + M2P_INTERRUPT);
165
166 if (irq_status & M2P_INTERRUPT_ERROR) {
167 writel(M2P_INTERRUPT_ERROR, ch->base + M2P_INTERRUPT);
168 error = 1;
169 }
170
171 if ((irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) == 0) {
172 spin_unlock(&ch->lock);
173 return IRQ_NONE;
174 }
175
176 switch (m2p_channel_state(ch)) {
177 case STATE_IDLE:
178 pr_crit("dma interrupt without a dma buffer\n");
179 BUG();
180 break;
181
182 case STATE_STALL:
183 cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
184 if (ch->buffer_next != NULL) {
185 cl->buffer_finished(cl->cookie, ch->buffer_next,
186 0, error);
187 }
188 choose_buffer_xfer(ch);
189 choose_buffer_next(ch);
190 if (ch->buffer_xfer != NULL)
191 cl->buffer_started(cl->cookie, ch->buffer_xfer);
192 break;
193
194 case STATE_ON:
195 cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
196 ch->buffer_xfer = ch->buffer_next;
197 choose_buffer_next(ch);
198 cl->buffer_started(cl->cookie, ch->buffer_xfer);
199 break;
200
201 case STATE_NEXT:
202 pr_crit("dma interrupt while next\n");
203 BUG();
204 break;
205 }
206
207 v = readl(ch->base + M2P_CONTROL) & ~(M2P_CONTROL_STALL_IRQ_EN |
208 M2P_CONTROL_NFB_IRQ_EN);
209 if (ch->buffer_xfer != NULL)
210 v |= M2P_CONTROL_STALL_IRQ_EN;
211 if (ch->buffer_next != NULL)
212 v |= M2P_CONTROL_NFB_IRQ_EN;
213 m2p_set_control(ch, v);
214
215 spin_unlock(&ch->lock);
216 return IRQ_HANDLED;
217}
218
219static struct m2p_channel *find_free_channel(struct ep93xx_dma_m2p_client *cl)
220{
221 struct m2p_channel *ch;
222 int i;
223
224 if (cl->flags & EP93XX_DMA_M2P_RX)
225 ch = m2p_rx;
226 else
227 ch = m2p_tx;
228
229 for (i = 0; ch[i].base; i++) {
230 struct ep93xx_dma_m2p_client *client;
231
232 client = ch[i].client;
233 if (client != NULL) {
234 int port;
235
236 port = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
237 if (port == (client->flags &
238 EP93XX_DMA_M2P_PORT_MASK)) {
239 pr_warning("DMA channel already used by %s\n",
240 cl->name ? : "unknown client");
241 return ERR_PTR(-EBUSY);
242 }
243 }
244 }
245
246 for (i = 0; ch[i].base; i++) {
247 if (ch[i].client == NULL)
248 return ch + i;
249 }
250
251 pr_warning("No free DMA channel for %s\n",
252 cl->name ? : "unknown client");
253 return ERR_PTR(-ENODEV);
254}
255
256static void channel_enable(struct m2p_channel *ch)
257{
258 struct ep93xx_dma_m2p_client *cl = ch->client;
259 u32 v;
260
261 clk_enable(ch->clk);
262
263 v = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
264 writel(v, ch->base + M2P_PPALLOC);
265
266 v = cl->flags & EP93XX_DMA_M2P_ERROR_MASK;
267 v |= M2P_CONTROL_ENABLE | M2P_CONTROL_ERROR_IRQ_EN;
268 m2p_set_control(ch, v);
269}
270
271static void channel_disable(struct m2p_channel *ch)
272{
273 u32 v;
274
275 v = readl(ch->base + M2P_CONTROL);
276 v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
277 m2p_set_control(ch, v);
278
279 while (m2p_channel_state(ch) >= STATE_ON)
280 cpu_relax();
281
282 m2p_set_control(ch, 0x0);
283
284 while (m2p_channel_state(ch) == STATE_STALL)
285 cpu_relax();
286
287 clk_disable(ch->clk);
288}
289
290int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *cl)
291{
292 struct m2p_channel *ch;
293 int err;
294
295 ch = find_free_channel(cl);
296 if (IS_ERR(ch))
297 return PTR_ERR(ch);
298
299 err = request_irq(ch->irq, m2p_irq, 0, cl->name ? : "dma-m2p", ch);
300 if (err)
301 return err;
302
303 ch->client = cl;
304 ch->next_slot = 0;
305 ch->buffer_xfer = NULL;
306 ch->buffer_next = NULL;
307 INIT_LIST_HEAD(&ch->buffers_pending);
308
309 cl->channel = ch;
310
311 channel_enable(ch);
312
313 return 0;
314}
315EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register);
316
317void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *cl)
318{
319 struct m2p_channel *ch = cl->channel;
320
321 channel_disable(ch);
322 free_irq(ch->irq, ch);
323 ch->client = NULL;
324}
325EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister);
326
327void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *cl,
328 struct ep93xx_dma_buffer *buf)
329{
330 struct m2p_channel *ch = cl->channel;
331 unsigned long flags;
332 u32 v;
333
334 spin_lock_irqsave(&ch->lock, flags);
335 v = readl(ch->base + M2P_CONTROL);
336 if (ch->buffer_xfer == NULL) {
337 ch->buffer_xfer = buf;
338 feed_buf(ch, buf);
339 cl->buffer_started(cl->cookie, buf);
340
341 v |= M2P_CONTROL_STALL_IRQ_EN;
342 m2p_set_control(ch, v);
343
344 } else if (ch->buffer_next == NULL) {
345 ch->buffer_next = buf;
346 feed_buf(ch, buf);
347
348 v |= M2P_CONTROL_NFB_IRQ_EN;
349 m2p_set_control(ch, v);
350 } else {
351 list_add_tail(&buf->list, &ch->buffers_pending);
352 }
353 spin_unlock_irqrestore(&ch->lock, flags);
354}
355EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit);
356
357void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *cl,
358 struct ep93xx_dma_buffer *buf)
359{
360 struct m2p_channel *ch = cl->channel;
361
362 list_add_tail(&buf->list, &ch->buffers_pending);
363}
364EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive);
365
366void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *cl)
367{
368 struct m2p_channel *ch = cl->channel;
369
370 channel_disable(ch);
371 ch->next_slot = 0;
372 ch->buffer_xfer = NULL;
373 ch->buffer_next = NULL;
374 INIT_LIST_HEAD(&ch->buffers_pending);
375 channel_enable(ch);
376}
377EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush);
378
379static int init_channel(struct m2p_channel *ch)
380{
381 ch->clk = clk_get(NULL, ch->name);
382 if (IS_ERR(ch->clk))
383 return PTR_ERR(ch->clk);
384
385 spin_lock_init(&ch->lock);
386 ch->client = NULL;
387
388 return 0;
389}
390
391static int __init ep93xx_dma_m2p_init(void)
392{
393 int i;
394 int ret;
395
396 for (i = 0; m2p_rx[i].base; i++) {
397 ret = init_channel(m2p_rx + i);
398 if (ret)
399 return ret;
400 }
401
402 for (i = 0; m2p_tx[i].base; i++) {
403 ret = init_channel(m2p_tx + i);
404 if (ret)
405 return ret;
406 }
407
408 pr_info("M2P DMA subsystem initialized\n");
409 return 0;
410}
411arch_initcall(ep93xx_dma_m2p_init);
diff --git a/arch/arm/mach-ep93xx/dma.c b/arch/arm/mach-ep93xx/dma.c
new file mode 100644
index 000000000000..5a2570881255
--- /dev/null
+++ b/arch/arm/mach-ep93xx/dma.c
@@ -0,0 +1,108 @@
1/*
2 * arch/arm/mach-ep93xx/dma.c
3 *
4 * Platform support code for the EP93xx dmaengine driver.
5 *
6 * Copyright (C) 2011 Mika Westerberg
7 *
8 * This work is based on the original dma-m2p implementation with
9 * following copyrights:
10 *
11 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
12 * Copyright (C) 2006 Applied Data Systems
13 * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or (at
18 * your option) any later version.
19 */
20
21#include <linux/dmaengine.h>
22#include <linux/dma-mapping.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/kernel.h>
26#include <linux/platform_device.h>
27
28#include <mach/dma.h>
29#include <mach/hardware.h>
30
31#define DMA_CHANNEL(_name, _base, _irq) \
32 { .name = (_name), .base = (_base), .irq = (_irq) }
33
34/*
35 * DMA M2P channels.
36 *
37 * On the EP93xx chip the following peripherals my be allocated to the 10
38 * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
39 *
40 * I2S contains 3 Tx and 3 Rx DMA Channels
41 * AAC contains 3 Tx and 3 Rx DMA Channels
42 * UART1 contains 1 Tx and 1 Rx DMA Channels
43 * UART2 contains 1 Tx and 1 Rx DMA Channels
44 * UART3 contains 1 Tx and 1 Rx DMA Channels
45 * IrDA contains 1 Tx and 1 Rx DMA Channels
46 *
47 * Registers are mapped statically in ep93xx_map_io().
48 */
49static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = {
50 DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0),
51 DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1),
52 DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2),
53 DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3),
54 DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4),
55 DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5),
56 DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6),
57 DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7),
58 DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8),
59 DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9),
60};
61
62static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = {
63 .channels = ep93xx_dma_m2p_channels,
64 .num_channels = ARRAY_SIZE(ep93xx_dma_m2p_channels),
65};
66
67static struct platform_device ep93xx_dma_m2p_device = {
68 .name = "ep93xx-dma-m2p",
69 .id = -1,
70 .dev = {
71 .platform_data = &ep93xx_dma_m2p_data,
72 },
73};
74
75/*
76 * DMA M2M channels.
77 *
78 * There are 2 M2M channels which support memcpy/memset and in addition simple
79 * hardware requests from/to SSP and IDE. We do not implement an external
80 * hardware requests.
81 *
82 * Registers are mapped statically in ep93xx_map_io().
83 */
84static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = {
85 DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0),
86 DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1),
87};
88
89static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = {
90 .channels = ep93xx_dma_m2m_channels,
91 .num_channels = ARRAY_SIZE(ep93xx_dma_m2m_channels),
92};
93
94static struct platform_device ep93xx_dma_m2m_device = {
95 .name = "ep93xx-dma-m2m",
96 .id = -1,
97 .dev = {
98 .platform_data = &ep93xx_dma_m2m_data,
99 },
100};
101
102static int __init ep93xx_dma_init(void)
103{
104 platform_device_register(&ep93xx_dma_m2p_device);
105 platform_device_register(&ep93xx_dma_m2m_device);
106 return 0;
107}
108arch_initcall(ep93xx_dma_init);
diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h
index 5e31b2b25da9..46d4d876e6fb 100644
--- a/arch/arm/mach-ep93xx/include/mach/dma.h
+++ b/arch/arm/mach-ep93xx/include/mach/dma.h
@@ -1,149 +1,93 @@
1/**
2 * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
3 *
4 * The EP93xx DMA M2P subsystem handles DMA transfers between memory and
5 * peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
6 * See chapter 10 of the EP93xx users guide for full details on the DMA M2P
7 * engine.
8 *
9 * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
10 *
11 */
12
13#ifndef __ASM_ARCH_DMA_H 1#ifndef __ASM_ARCH_DMA_H
14#define __ASM_ARCH_DMA_H 2#define __ASM_ARCH_DMA_H
15 3
16#include <linux/list.h>
17#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/dmaengine.h>
6#include <linux/dma-mapping.h>
18 7
19/** 8/*
20 * struct ep93xx_dma_buffer - Information about a buffer to be transferred 9 * M2P channels.
21 * using the DMA M2P engine
22 * 10 *
23 * @list: Entry in DMA buffer list 11 * Note that these values are also directly used for setting the PPALLOC
24 * @bus_addr: Physical address of the buffer 12 * register.
25 * @size: Size of the buffer in bytes
26 */ 13 */
27struct ep93xx_dma_buffer { 14#define EP93XX_DMA_I2S1 0
28 struct list_head list; 15#define EP93XX_DMA_I2S2 1
29 u32 bus_addr; 16#define EP93XX_DMA_AAC1 2
30 u16 size; 17#define EP93XX_DMA_AAC2 3
31}; 18#define EP93XX_DMA_AAC3 4
19#define EP93XX_DMA_I2S3 5
20#define EP93XX_DMA_UART1 6
21#define EP93XX_DMA_UART2 7
22#define EP93XX_DMA_UART3 8
23#define EP93XX_DMA_IRDA 9
24/* M2M channels */
25#define EP93XX_DMA_SSP 10
26#define EP93XX_DMA_IDE 11
32 27
33/** 28/**
34 * struct ep93xx_dma_m2p_client - Information about a DMA M2P client 29 * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine
35 * 30 * @port: peripheral which is requesting the channel
36 * @name: Unique name for this client 31 * @direction: TX/RX channel
37 * @flags: Client flags 32 * @name: optional name for the channel, this is displayed in /proc/interrupts
38 * @cookie: User data to pass to callback functions 33 *
39 * @buffer_started: Non NULL function to call when a transfer is started. 34 * This information is passed as private channel parameter in a filter
40 * The arguments are the user data cookie and the DMA 35 * function. Note that this is only needed for slave/cyclic channels. For
41 * buffer which is starting. 36 * memcpy channels %NULL data should be passed.
42 * @buffer_finished: Non NULL function to call when a transfer is completed.
43 * The arguments are the user data cookie, the DMA buffer
44 * which has completed, and a boolean flag indicating if
45 * the transfer had an error.
46 */ 37 */
47struct ep93xx_dma_m2p_client { 38struct ep93xx_dma_data {
48 char *name; 39 int port;
49 u8 flags; 40 enum dma_data_direction direction;
50 void *cookie; 41 const char *name;
51 void (*buffer_started)(void *cookie,
52 struct ep93xx_dma_buffer *buf);
53 void (*buffer_finished)(void *cookie,
54 struct ep93xx_dma_buffer *buf,
55 int bytes, int error);
56
57 /* private: Internal use only */
58 void *channel;
59}; 42};
60 43
61/* DMA M2P ports */
62#define EP93XX_DMA_M2P_PORT_I2S1 0x00
63#define EP93XX_DMA_M2P_PORT_I2S2 0x01
64#define EP93XX_DMA_M2P_PORT_AAC1 0x02
65#define EP93XX_DMA_M2P_PORT_AAC2 0x03
66#define EP93XX_DMA_M2P_PORT_AAC3 0x04
67#define EP93XX_DMA_M2P_PORT_I2S3 0x05
68#define EP93XX_DMA_M2P_PORT_UART1 0x06
69#define EP93XX_DMA_M2P_PORT_UART2 0x07
70#define EP93XX_DMA_M2P_PORT_UART3 0x08
71#define EP93XX_DMA_M2P_PORT_IRDA 0x09
72#define EP93XX_DMA_M2P_PORT_MASK 0x0f
73
74/* DMA M2P client flags */
75#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */
76#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */
77
78/*
79 * DMA M2P client error handling flags. See the EP93xx users guide
80 * documentation on the DMA M2P CONTROL register for more details
81 */
82#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */
83#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */
84#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */
85
86/** 44/**
87 * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P 45 * struct ep93xx_dma_chan_data - platform specific data for a DMA channel
88 * subsystem 46 * @name: name of the channel, used for getting the right clock for the channel
89 * 47 * @base: mapped registers
90 * @m2p: Client information to register 48 * @irq: interrupt number used by this channel
91 * returns 0 on success
92 *
93 * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
94 * client
95 */ 49 */
96int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p); 50struct ep93xx_dma_chan_data {
51 const char *name;
52 void __iomem *base;
53 int irq;
54};
97 55
98/** 56/**
99 * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P 57 * struct ep93xx_dma_platform_data - platform data for the dmaengine driver
100 * subsystem 58 * @channels: array of channels which are passed to the driver
101 * 59 * @num_channels: number of channels in the array
102 * @m2p: Client to unregister
103 * 60 *
104 * Any transfers currently in progress will be completed in hardware, but 61 * This structure is passed to the DMA engine driver via platform data. For
105 * ignored in software. 62 * M2P channels, contract is that even channels are for TX and odd for RX.
63 * There is no requirement for the M2M channels.
106 */ 64 */
107void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p); 65struct ep93xx_dma_platform_data {
66 struct ep93xx_dma_chan_data *channels;
67 size_t num_channels;
68};
108 69
109/** 70static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
110 * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer 71{
111 * 72 return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
112 * @m2p: DMA Client to submit the transfer on 73}
113 * @buf: DMA Buffer to submit
114 *
115 * If the current or next transfer positions are free on the M2P client then
116 * the transfer is started immediately. If not, the transfer is added to the
117 * list of pending transfers. This function must not be called from the
118 * buffer_finished callback for an M2P channel.
119 *
120 */
121void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
122 struct ep93xx_dma_buffer *buf);
123 74
124/** 75/**
125 * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list 76 * ep93xx_dma_chan_direction - returns direction the channel can be used
126 * for an M2P channel 77 * @chan: channel
127 * 78 *
128 * @m2p: DMA Client to submit the transfer on 79 * This function can be used in filter functions to find out whether the
129 * @buf: DMA Buffer to submit 80 * channel supports given DMA direction. Only M2P channels have such
130 * 81 * limitation, for M2M channels the direction is configurable.
131 * This function must only be called from the buffer_finished callback for an
132 * M2P channel. It is commonly used to add the next transfer in a chained list
133 * of DMA transfers.
134 */ 82 */
135void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p, 83static inline enum dma_data_direction
136 struct ep93xx_dma_buffer *buf); 84ep93xx_dma_chan_direction(struct dma_chan *chan)
85{
86 if (!ep93xx_dma_chan_is_m2p(chan))
87 return DMA_NONE;
137 88
138/** 89 /* even channels are for TX, odd for RX */
139 * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client 90 return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
140 * 91}
141 * @m2p: DMA client to flush transfers on
142 *
143 * Any transfers currently in progress will be completed in hardware, but
144 * ignored in software.
145 *
146 */
147void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
148 92
149#endif /* __ASM_ARCH_DMA_H */ 93#endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
index 0a37961b3453..9bb63ac13f04 100644
--- a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
@@ -7,9 +7,11 @@ struct spi_device;
7 * struct ep93xx_spi_info - EP93xx specific SPI descriptor 7 * struct ep93xx_spi_info - EP93xx specific SPI descriptor
8 * @num_chipselect: number of chip selects on this board, must be 8 * @num_chipselect: number of chip selects on this board, must be
9 * at least one 9 * at least one
10 * @use_dma: use DMA for the transfers
10 */ 11 */
11struct ep93xx_spi_info { 12struct ep93xx_spi_info {
12 int num_chipselect; 13 int num_chipselect;
14 bool use_dma;
13}; 15};
14 16
15/** 17/**
diff --git a/arch/arm/mach-imx/clock-imx25.c b/arch/arm/mach-imx/clock-imx25.c
index a65838fc061c..af1c580b06bc 100644
--- a/arch/arm/mach-imx/clock-imx25.c
+++ b/arch/arm/mach-imx/clock-imx25.c
@@ -282,9 +282,10 @@ static struct clk_lookup lookups[] = {
282 _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk) 282 _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk)
283 _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk) 283 _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk)
284 _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk) 284 _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
285 _REGISTER_CLOCK("imx25-cspi.0", NULL, cspi1_clk) 285 /* i.mx25 has the i.mx35 type cspi */
286 _REGISTER_CLOCK("imx25-cspi.1", NULL, cspi2_clk) 286 _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk)
287 _REGISTER_CLOCK("imx25-cspi.2", NULL, cspi3_clk) 287 _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk)
288 _REGISTER_CLOCK("imx35-cspi.2", NULL, cspi3_clk)
288 _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm1_clk) 289 _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm1_clk)
289 _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm2_clk) 290 _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm2_clk)
290 _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm3_clk) 291 _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm3_clk)
diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-mx5/clock-mx51-mx53.c
index 6b89c1bf4eb2..cd79e3435e28 100644
--- a/arch/arm/mach-mx5/clock-mx51-mx53.c
+++ b/arch/arm/mach-mx5/clock-mx51-mx53.c
@@ -1442,7 +1442,8 @@ static struct clk_lookup mx51_lookups[] = {
1442 _REGISTER_CLOCK(NULL, "gpt_32k", gpt_32k_clk) 1442 _REGISTER_CLOCK(NULL, "gpt_32k", gpt_32k_clk)
1443 _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk) 1443 _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
1444 _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk) 1444 _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
1445 _REGISTER_CLOCK("imx51-cspi.0", NULL, cspi_clk) 1445 /* i.mx51 has the i.mx35 type cspi */
1446 _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
1446 _REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, esdhc1_clk) 1447 _REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, esdhc1_clk)
1447 _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_clk) 1448 _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_clk)
1448 _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_clk) 1449 _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_clk)
@@ -1471,9 +1472,11 @@ static struct clk_lookup mx53_lookups[] = {
1471 _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_mx53_clk) 1472 _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_mx53_clk)
1472 _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_mx53_clk) 1473 _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_mx53_clk)
1473 _REGISTER_CLOCK("sdhci-esdhc-imx.3", NULL, esdhc4_mx53_clk) 1474 _REGISTER_CLOCK("sdhci-esdhc-imx.3", NULL, esdhc4_mx53_clk)
1474 _REGISTER_CLOCK("imx53-ecspi.0", NULL, ecspi1_clk) 1475 /* i.mx53 has the i.mx51 type ecspi */
1475 _REGISTER_CLOCK("imx53-ecspi.1", NULL, ecspi2_clk) 1476 _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
1476 _REGISTER_CLOCK("imx53-cspi.0", NULL, cspi_clk) 1477 _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
1478 /* i.mx53 has the i.mx25 type cspi */
1479 _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
1477 _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk) 1480 _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
1478 _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk) 1481 _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
1479}; 1482};
diff --git a/arch/arm/mach-s3c2410/include/mach/spi-gpio.h b/arch/arm/mach-s3c2410/include/mach/spi-gpio.h
deleted file mode 100644
index dcef2287cb38..000000000000
--- a/arch/arm/mach-s3c2410/include/mach/spi-gpio.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/* arch/arm/mach-s3c2410/include/mach/spi-gpio.h
2 *
3 * Copyright (c) 2006 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S3C2410 - SPI Controller platform_device info
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef __ASM_ARCH_SPIGPIO_H
14#define __ASM_ARCH_SPIGPIO_H __FILE__
15
16struct s3c2410_spigpio_info {
17 unsigned long pin_clk;
18 unsigned long pin_mosi;
19 unsigned long pin_miso;
20
21 int num_chipselect;
22 int bus_num;
23
24 void (*chip_select)(struct s3c2410_spigpio_info *spi, int cs);
25};
26
27
28#endif /* __ASM_ARCH_SPIGPIO_H */
diff --git a/arch/arm/mach-s3c2410/mach-qt2410.c b/arch/arm/mach-s3c2410/mach-qt2410.c
index e8f49feef28c..f44f77531b1e 100644
--- a/arch/arm/mach-s3c2410/mach-qt2410.c
+++ b/arch/arm/mach-s3c2410/mach-qt2410.c
@@ -32,7 +32,7 @@
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/serial_core.h> 33#include <linux/serial_core.h>
34#include <linux/spi/spi.h> 34#include <linux/spi/spi.h>
35#include <linux/spi/spi_bitbang.h> 35#include <linux/spi/spi_gpio.h>
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/mtd/mtd.h> 37#include <linux/mtd/mtd.h>
38#include <linux/mtd/nand.h> 38#include <linux/mtd/nand.h>
@@ -53,8 +53,6 @@
53#include <mach/fb.h> 53#include <mach/fb.h>
54#include <plat/nand.h> 54#include <plat/nand.h>
55#include <plat/udc.h> 55#include <plat/udc.h>
56#include <mach/spi.h>
57#include <mach/spi-gpio.h>
58#include <plat/iic.h> 56#include <plat/iic.h>
59 57
60#include <plat/common-smdk.h> 58#include <plat/common-smdk.h>
@@ -216,32 +214,16 @@ static struct platform_device qt2410_led = {
216 214
217/* SPI */ 215/* SPI */
218 216
219static void spi_gpio_cs(struct s3c2410_spigpio_info *spi, int cs) 217static struct spi_gpio_platform_data spi_gpio_cfg = {
220{ 218 .sck = S3C2410_GPG(7),
221 switch (cs) { 219 .mosi = S3C2410_GPG(6),
222 case BITBANG_CS_ACTIVE: 220 .miso = S3C2410_GPG(5),
223 gpio_set_value(S3C2410_GPB(5), 0);
224 break;
225 case BITBANG_CS_INACTIVE:
226 gpio_set_value(S3C2410_GPB(5), 1);
227 break;
228 }
229}
230
231static struct s3c2410_spigpio_info spi_gpio_cfg = {
232 .pin_clk = S3C2410_GPG(7),
233 .pin_mosi = S3C2410_GPG(6),
234 .pin_miso = S3C2410_GPG(5),
235 .chip_select = &spi_gpio_cs,
236}; 221};
237 222
238
239static struct platform_device qt2410_spi = { 223static struct platform_device qt2410_spi = {
240 .name = "s3c24xx-spi-gpio", 224 .name = "spi-gpio",
241 .id = 1, 225 .id = 1,
242 .dev = { 226 .dev.platform_data = &spi_gpio_cfg,
243 .platform_data = &spi_gpio_cfg,
244 },
245}; 227};
246 228
247/* Board devices */ 229/* Board devices */
diff --git a/arch/arm/mach-s3c2412/mach-jive.c b/arch/arm/mach-s3c2412/mach-jive.c
index 85dcaeb9e62f..5eeb47580b0c 100644
--- a/arch/arm/mach-s3c2412/mach-jive.c
+++ b/arch/arm/mach-s3c2412/mach-jive.c
@@ -25,6 +25,7 @@
25#include <video/ili9320.h> 25#include <video/ili9320.h>
26 26
27#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
28#include <linux/spi/spi_gpio.h>
28 29
29#include <asm/mach/arch.h> 30#include <asm/mach/arch.h>
30#include <asm/mach/map.h> 31#include <asm/mach/map.h>
@@ -38,7 +39,6 @@
38#include <mach/regs-gpio.h> 39#include <mach/regs-gpio.h>
39#include <mach/regs-mem.h> 40#include <mach/regs-mem.h>
40#include <mach/regs-lcd.h> 41#include <mach/regs-lcd.h>
41#include <mach/spi-gpio.h>
42#include <mach/fb.h> 42#include <mach/fb.h>
43 43
44#include <asm/mach-types.h> 44#include <asm/mach-types.h>
@@ -389,45 +389,30 @@ static struct ili9320_platdata jive_lcm_config = {
389 389
390/* LCD SPI support */ 390/* LCD SPI support */
391 391
392static void jive_lcd_spi_chipselect(struct s3c2410_spigpio_info *spi, int cs) 392static struct spi_gpio_platform_data jive_lcd_spi = {
393{ 393 .sck = S3C2410_GPG(8),
394 gpio_set_value(S3C2410_GPB(7), cs ? 0 : 1); 394 .mosi = S3C2410_GPB(8),
395} 395 .miso = SPI_GPIO_NO_MISO,
396
397static struct s3c2410_spigpio_info jive_lcd_spi = {
398 .bus_num = 1,
399 .pin_clk = S3C2410_GPG(8),
400 .pin_mosi = S3C2410_GPB(8),
401 .num_chipselect = 1,
402 .chip_select = jive_lcd_spi_chipselect,
403}; 396};
404 397
405static struct platform_device jive_device_lcdspi = { 398static struct platform_device jive_device_lcdspi = {
406 .name = "spi_s3c24xx_gpio", 399 .name = "spi-gpio",
407 .id = 1, 400 .id = 1,
408 .num_resources = 0,
409 .dev.platform_data = &jive_lcd_spi, 401 .dev.platform_data = &jive_lcd_spi,
410}; 402};
411 403
412/* WM8750 audio code SPI definition */
413 404
414static void jive_wm8750_chipselect(struct s3c2410_spigpio_info *spi, int cs) 405/* WM8750 audio code SPI definition */
415{
416 gpio_set_value(S3C2410_GPH(10), cs ? 0 : 1);
417}
418 406
419static struct s3c2410_spigpio_info jive_wm8750_spi = { 407static struct spi_gpio_platform_data jive_wm8750_spi = {
420 .bus_num = 2, 408 .sck = S3C2410_GPB(4),
421 .pin_clk = S3C2410_GPB(4), 409 .mosi = S3C2410_GPB(9),
422 .pin_mosi = S3C2410_GPB(9), 410 .miso = SPI_GPIO_NO_MISO,
423 .num_chipselect = 1,
424 .chip_select = jive_wm8750_chipselect,
425}; 411};
426 412
427static struct platform_device jive_device_wm8750 = { 413static struct platform_device jive_device_wm8750 = {
428 .name = "spi_s3c24xx_gpio", 414 .name = "spi-gpio",
429 .id = 2, 415 .id = 2,
430 .num_resources = 0,
431 .dev.platform_data = &jive_wm8750_spi, 416 .dev.platform_data = &jive_wm8750_spi,
432}; 417};
433 418
@@ -441,12 +426,14 @@ static struct spi_board_info __initdata jive_spi_devs[] = {
441 .mode = SPI_MODE_3, /* CPOL=1, CPHA=1 */ 426 .mode = SPI_MODE_3, /* CPOL=1, CPHA=1 */
442 .max_speed_hz = 100000, 427 .max_speed_hz = 100000,
443 .platform_data = &jive_lcm_config, 428 .platform_data = &jive_lcm_config,
429 .controller_data = (void *)S3C2410_GPB(7),
444 }, { 430 }, {
445 .modalias = "WM8750", 431 .modalias = "WM8750",
446 .bus_num = 2, 432 .bus_num = 2,
447 .chip_select = 0, 433 .chip_select = 0,
448 .mode = SPI_MODE_0, /* CPOL=0, CPHA=0 */ 434 .mode = SPI_MODE_0, /* CPOL=0, CPHA=0 */
449 .max_speed_hz = 100000, 435 .max_speed_hz = 100000,
436 .controller_data = (void *)S3C2410_GPH(10),
450 }, 437 },
451}; 438};
452 439
diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c
index 716662008ce2..c10ddf4ed7f1 100644
--- a/arch/arm/mach-s3c2440/mach-gta02.c
+++ b/arch/arm/mach-s3c2440/mach-gta02.c
@@ -74,7 +74,6 @@
74#include <mach/fb.h> 74#include <mach/fb.h>
75 75
76#include <mach/spi.h> 76#include <mach/spi.h>
77#include <mach/spi-gpio.h>
78#include <plat/usb-control.h> 77#include <plat/usb-control.h>
79#include <mach/regs-mem.h> 78#include <mach/regs-mem.h>
80#include <mach/hardware.h> 79#include <mach/hardware.h>
diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c
index f97eb3615b2c..9bfae8bd5b8d 100644
--- a/arch/arm/plat-mxc/devices/platform-spi_imx.c
+++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c
@@ -40,9 +40,10 @@ const struct imx_spi_imx_data imx21_cspi_data[] __initconst = {
40#endif 40#endif
41 41
42#ifdef CONFIG_SOC_IMX25 42#ifdef CONFIG_SOC_IMX25
43/* i.mx25 has the i.mx35 type cspi */
43const struct imx_spi_imx_data imx25_cspi_data[] __initconst = { 44const struct imx_spi_imx_data imx25_cspi_data[] __initconst = {
44#define imx25_cspi_data_entry(_id, _hwid) \ 45#define imx25_cspi_data_entry(_id, _hwid) \
45 imx_spi_imx_data_entry(MX25, CSPI, "imx25-cspi", _id, _hwid, SZ_16K) 46 imx_spi_imx_data_entry(MX25, CSPI, "imx35-cspi", _id, _hwid, SZ_16K)
46 imx25_cspi_data_entry(0, 1), 47 imx25_cspi_data_entry(0, 1),
47 imx25_cspi_data_entry(1, 2), 48 imx25_cspi_data_entry(1, 2),
48 imx25_cspi_data_entry(2, 3), 49 imx25_cspi_data_entry(2, 3),
@@ -79,8 +80,9 @@ const struct imx_spi_imx_data imx35_cspi_data[] __initconst = {
79#endif /* ifdef CONFIG_SOC_IMX35 */ 80#endif /* ifdef CONFIG_SOC_IMX35 */
80 81
81#ifdef CONFIG_SOC_IMX51 82#ifdef CONFIG_SOC_IMX51
83/* i.mx51 has the i.mx35 type cspi */
82const struct imx_spi_imx_data imx51_cspi_data __initconst = 84const struct imx_spi_imx_data imx51_cspi_data __initconst =
83 imx_spi_imx_data_entry_single(MX51, CSPI, "imx51-cspi", 2, , SZ_4K); 85 imx_spi_imx_data_entry_single(MX51, CSPI, "imx35-cspi", 2, , SZ_4K);
84 86
85const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = { 87const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = {
86#define imx51_ecspi_data_entry(_id, _hwid) \ 88#define imx51_ecspi_data_entry(_id, _hwid) \
@@ -91,12 +93,14 @@ const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = {
91#endif /* ifdef CONFIG_SOC_IMX51 */ 93#endif /* ifdef CONFIG_SOC_IMX51 */
92 94
93#ifdef CONFIG_SOC_IMX53 95#ifdef CONFIG_SOC_IMX53
96/* i.mx53 has the i.mx35 type cspi */
94const struct imx_spi_imx_data imx53_cspi_data __initconst = 97const struct imx_spi_imx_data imx53_cspi_data __initconst =
95 imx_spi_imx_data_entry_single(MX53, CSPI, "imx53-cspi", 0, , SZ_4K); 98 imx_spi_imx_data_entry_single(MX53, CSPI, "imx35-cspi", 0, , SZ_4K);
96 99
100/* i.mx53 has the i.mx51 type ecspi */
97const struct imx_spi_imx_data imx53_ecspi_data[] __initconst = { 101const struct imx_spi_imx_data imx53_ecspi_data[] __initconst = {
98#define imx53_ecspi_data_entry(_id, _hwid) \ 102#define imx53_ecspi_data_entry(_id, _hwid) \
99 imx_spi_imx_data_entry(MX53, ECSPI, "imx53-ecspi", _id, _hwid, SZ_4K) 103 imx_spi_imx_data_entry(MX53, ECSPI, "imx51-ecspi", _id, _hwid, SZ_4K)
100 imx53_ecspi_data_entry(0, 1), 104 imx53_ecspi_data_entry(0, 1),
101 imx53_ecspi_data_entry(1, 2), 105 imx53_ecspi_data_entry(1, 2),
102}; 106};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 25cf327cd1cb..2e3b3d38c465 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -237,6 +237,13 @@ config MXS_DMA
237 Support the MXS DMA engine. This engine including APBH-DMA 237 Support the MXS DMA engine. This engine including APBH-DMA
238 and APBX-DMA is integrated into Freescale i.MX23/28 chips. 238 and APBX-DMA is integrated into Freescale i.MX23/28 chips.
239 239
240config EP93XX_DMA
241 bool "Cirrus Logic EP93xx DMA support"
242 depends on ARCH_EP93XX
243 select DMA_ENGINE
244 help
245 Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
246
240config DMA_ENGINE 247config DMA_ENGINE
241 bool 248 bool
242 249
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 836095ab3c5c..30cf3b1f0c5c 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
25obj-$(CONFIG_PL330_DMA) += pl330.o 25obj-$(CONFIG_PL330_DMA) += pl330.o
26obj-$(CONFIG_PCH_DMA) += pch_dma.o 26obj-$(CONFIG_PCH_DMA) += pch_dma.o
27obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o 27obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
28obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
new file mode 100644
index 000000000000..0766c1e53b1d
--- /dev/null
+++ b/drivers/dma/ep93xx_dma.c
@@ -0,0 +1,1355 @@
1/*
2 * Driver for the Cirrus Logic EP93xx DMA Controller
3 *
4 * Copyright (C) 2011 Mika Westerberg
5 *
6 * DMA M2P implementation is based on the original
7 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
8 *
9 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
10 * Copyright (C) 2006 Applied Data Systems
11 * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
12 *
13 * This driver is based on dw_dmac and amba-pl08x drivers.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 */
20
21#include <linux/clk.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h>
27
28#include <mach/dma.h>
29
30/* M2P registers */
31#define M2P_CONTROL 0x0000
32#define M2P_CONTROL_STALLINT BIT(0)
33#define M2P_CONTROL_NFBINT BIT(1)
34#define M2P_CONTROL_CH_ERROR_INT BIT(3)
35#define M2P_CONTROL_ENABLE BIT(4)
36#define M2P_CONTROL_ICE BIT(6)
37
38#define M2P_INTERRUPT 0x0004
39#define M2P_INTERRUPT_STALL BIT(0)
40#define M2P_INTERRUPT_NFB BIT(1)
41#define M2P_INTERRUPT_ERROR BIT(3)
42
43#define M2P_PPALLOC 0x0008
44#define M2P_STATUS 0x000c
45
46#define M2P_MAXCNT0 0x0020
47#define M2P_BASE0 0x0024
48#define M2P_MAXCNT1 0x0030
49#define M2P_BASE1 0x0034
50
51#define M2P_STATE_IDLE 0
52#define M2P_STATE_STALL 1
53#define M2P_STATE_ON 2
54#define M2P_STATE_NEXT 3
55
56/* M2M registers */
57#define M2M_CONTROL 0x0000
58#define M2M_CONTROL_DONEINT BIT(2)
59#define M2M_CONTROL_ENABLE BIT(3)
60#define M2M_CONTROL_START BIT(4)
61#define M2M_CONTROL_DAH BIT(11)
62#define M2M_CONTROL_SAH BIT(12)
63#define M2M_CONTROL_PW_SHIFT 9
64#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
65#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
66#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
67#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
68#define M2M_CONTROL_TM_SHIFT 13
69#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
70#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
71#define M2M_CONTROL_RSS_SHIFT 22
72#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
73#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
74#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
75#define M2M_CONTROL_NO_HDSK BIT(24)
76#define M2M_CONTROL_PWSC_SHIFT 25
77
78#define M2M_INTERRUPT 0x0004
79#define M2M_INTERRUPT_DONEINT BIT(1)
80
81#define M2M_BCR0 0x0010
82#define M2M_BCR1 0x0014
83#define M2M_SAR_BASE0 0x0018
84#define M2M_SAR_BASE1 0x001c
85#define M2M_DAR_BASE0 0x002c
86#define M2M_DAR_BASE1 0x0030
87
88#define DMA_MAX_CHAN_BYTES 0xffff
89#define DMA_MAX_CHAN_DESCRIPTORS 32
90
91struct ep93xx_dma_engine;
92
93/**
94 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
95 * @src_addr: source address of the transaction
96 * @dst_addr: destination address of the transaction
97 * @size: size of the transaction (in bytes)
98 * @complete: this descriptor is completed
99 * @txd: dmaengine API descriptor
100 * @tx_list: list of linked descriptors
101 * @node: link used for putting this into a channel queue
102 */
103struct ep93xx_dma_desc {
104 u32 src_addr;
105 u32 dst_addr;
106 size_t size;
107 bool complete;
108 struct dma_async_tx_descriptor txd;
109 struct list_head tx_list;
110 struct list_head node;
111};
112
113/**
114 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
115 * @chan: dmaengine API channel
116 * @edma: pointer to to the engine device
117 * @regs: memory mapped registers
118 * @irq: interrupt number of the channel
119 * @clk: clock used by this channel
120 * @tasklet: channel specific tasklet used for callbacks
121 * @lock: lock protecting the fields following
122 * @flags: flags for the channel
123 * @buffer: which buffer to use next (0/1)
124 * @last_completed: last completed cookie value
125 * @active: flattened chain of descriptors currently being processed
126 * @queue: pending descriptors which are handled next
127 * @free_list: list of free descriptors which can be used
128 * @runtime_addr: physical address currently used as dest/src (M2M only). This
129 * is set via %DMA_SLAVE_CONFIG before slave operation is
130 * prepared
131 * @runtime_ctrl: M2M runtime values for the control register.
132 *
133 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
134 * will have slightly different scheme here: @active points to a head of
135 * flattened DMA descriptor chain.
136 *
137 * @queue holds pending transactions. These are linked through the first
138 * descriptor in the chain. When a descriptor is moved to the @active queue,
139 * the first and chained descriptors are flattened into a single list.
140 *
141 * @chan.private holds pointer to &struct ep93xx_dma_data which contains
142 * necessary channel configuration information. For memcpy channels this must
143 * be %NULL.
144 */
145struct ep93xx_dma_chan {
146 struct dma_chan chan;
147 const struct ep93xx_dma_engine *edma;
148 void __iomem *regs;
149 int irq;
150 struct clk *clk;
151 struct tasklet_struct tasklet;
152 /* protects the fields following */
153 spinlock_t lock;
154 unsigned long flags;
155/* Channel is configured for cyclic transfers */
156#define EP93XX_DMA_IS_CYCLIC 0
157
158 int buffer;
159 dma_cookie_t last_completed;
160 struct list_head active;
161 struct list_head queue;
162 struct list_head free_list;
163 u32 runtime_addr;
164 u32 runtime_ctrl;
165};
166
167/**
168 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
169 * @dma_dev: holds the dmaengine device
170 * @m2m: is this an M2M or M2P device
171 * @hw_setup: method which sets the channel up for operation
172 * @hw_shutdown: shuts the channel down and flushes whatever is left
173 * @hw_submit: pushes active descriptor(s) to the hardware
174 * @hw_interrupt: handle the interrupt
175 * @num_channels: number of channels for this instance
176 * @channels: array of channels
177 *
178 * There is one instance of this struct for the M2P channels and one for the
179 * M2M channels. hw_xxx() methods are used to perform operations which are
180 * different on M2M and M2P channels. These methods are called with channel
181 * lock held and interrupts disabled so they cannot sleep.
182 */
183struct ep93xx_dma_engine {
184 struct dma_device dma_dev;
185 bool m2m;
186 int (*hw_setup)(struct ep93xx_dma_chan *);
187 void (*hw_shutdown)(struct ep93xx_dma_chan *);
188 void (*hw_submit)(struct ep93xx_dma_chan *);
189 int (*hw_interrupt)(struct ep93xx_dma_chan *);
190#define INTERRUPT_UNKNOWN 0
191#define INTERRUPT_DONE 1
192#define INTERRUPT_NEXT_BUFFER 2
193
194 size_t num_channels;
195 struct ep93xx_dma_chan channels[];
196};
197
198static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
199{
200 return &edmac->chan.dev->device;
201}
202
203static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
204{
205 return container_of(chan, struct ep93xx_dma_chan, chan);
206}
207
208/**
209 * ep93xx_dma_set_active - set new active descriptor chain
210 * @edmac: channel
211 * @desc: head of the new active descriptor chain
212 *
213 * Sets @desc to be the head of the new active descriptor chain. This is the
214 * chain which is processed next. The active list must be empty before calling
215 * this function.
216 *
217 * Called with @edmac->lock held and interrupts disabled.
218 */
219static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
220 struct ep93xx_dma_desc *desc)
221{
222 BUG_ON(!list_empty(&edmac->active));
223
224 list_add_tail(&desc->node, &edmac->active);
225
226 /* Flatten the @desc->tx_list chain into @edmac->active list */
227 while (!list_empty(&desc->tx_list)) {
228 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
229 struct ep93xx_dma_desc, node);
230
231 /*
232 * We copy the callback parameters from the first descriptor
233 * to all the chained descriptors. This way we can call the
234 * callback without having to find out the first descriptor in
235 * the chain. Useful for cyclic transfers.
236 */
237 d->txd.callback = desc->txd.callback;
238 d->txd.callback_param = desc->txd.callback_param;
239
240 list_move_tail(&d->node, &edmac->active);
241 }
242}
243
244/* Called with @edmac->lock held and interrupts disabled */
245static struct ep93xx_dma_desc *
246ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
247{
248 return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
249}
250
251/**
252 * ep93xx_dma_advance_active - advances to the next active descriptor
253 * @edmac: channel
254 *
255 * Function advances active descriptor to the next in the @edmac->active and
256 * returns %true if we still have descriptors in the chain to process.
257 * Otherwise returns %false.
258 *
259 * When the channel is in cyclic mode always returns %true.
260 *
261 * Called with @edmac->lock held and interrupts disabled.
262 */
263static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
264{
265 list_rotate_left(&edmac->active);
266
267 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
268 return true;
269
270 /*
271 * If txd.cookie is set it means that we are back in the first
272 * descriptor in the chain and hence done with it.
273 */
274 return !ep93xx_dma_get_active(edmac)->txd.cookie;
275}
276
277/*
278 * M2P DMA implementation
279 */
280
281static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
282{
283 writel(control, edmac->regs + M2P_CONTROL);
284 /*
285 * EP93xx User's Guide states that we must perform a dummy read after
286 * write to the control register.
287 */
288 readl(edmac->regs + M2P_CONTROL);
289}
290
291static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
292{
293 struct ep93xx_dma_data *data = edmac->chan.private;
294 u32 control;
295
296 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
297
298 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
299 | M2P_CONTROL_ENABLE;
300 m2p_set_control(edmac, control);
301
302 return 0;
303}
304
305static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
306{
307 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
308}
309
310static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
311{
312 u32 control;
313
314 control = readl(edmac->regs + M2P_CONTROL);
315 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
316 m2p_set_control(edmac, control);
317
318 while (m2p_channel_state(edmac) >= M2P_STATE_ON)
319 cpu_relax();
320
321 m2p_set_control(edmac, 0);
322
323 while (m2p_channel_state(edmac) == M2P_STATE_STALL)
324 cpu_relax();
325}
326
327static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
328{
329 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
330 u32 bus_addr;
331
332 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
333 bus_addr = desc->src_addr;
334 else
335 bus_addr = desc->dst_addr;
336
337 if (edmac->buffer == 0) {
338 writel(desc->size, edmac->regs + M2P_MAXCNT0);
339 writel(bus_addr, edmac->regs + M2P_BASE0);
340 } else {
341 writel(desc->size, edmac->regs + M2P_MAXCNT1);
342 writel(bus_addr, edmac->regs + M2P_BASE1);
343 }
344
345 edmac->buffer ^= 1;
346}
347
348static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
349{
350 u32 control = readl(edmac->regs + M2P_CONTROL);
351
352 m2p_fill_desc(edmac);
353 control |= M2P_CONTROL_STALLINT;
354
355 if (ep93xx_dma_advance_active(edmac)) {
356 m2p_fill_desc(edmac);
357 control |= M2P_CONTROL_NFBINT;
358 }
359
360 m2p_set_control(edmac, control);
361}
362
363static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
364{
365 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
366 u32 control;
367
368 if (irq_status & M2P_INTERRUPT_ERROR) {
369 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
370
371 /* Clear the error interrupt */
372 writel(1, edmac->regs + M2P_INTERRUPT);
373
374 /*
375 * It seems that there is no easy way of reporting errors back
376 * to client so we just report the error here and continue as
377 * usual.
378 *
379 * Revisit this when there is a mechanism to report back the
380 * errors.
381 */
382 dev_err(chan2dev(edmac),
383 "DMA transfer failed! Details:\n"
384 "\tcookie : %d\n"
385 "\tsrc_addr : 0x%08x\n"
386 "\tdst_addr : 0x%08x\n"
387 "\tsize : %zu\n",
388 desc->txd.cookie, desc->src_addr, desc->dst_addr,
389 desc->size);
390 }
391
392 switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
393 case M2P_INTERRUPT_STALL:
394 /* Disable interrupts */
395 control = readl(edmac->regs + M2P_CONTROL);
396 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
397 m2p_set_control(edmac, control);
398
399 return INTERRUPT_DONE;
400
401 case M2P_INTERRUPT_NFB:
402 if (ep93xx_dma_advance_active(edmac))
403 m2p_fill_desc(edmac);
404
405 return INTERRUPT_NEXT_BUFFER;
406 }
407
408 return INTERRUPT_UNKNOWN;
409}
410
411/*
412 * M2M DMA implementation
413 *
414 * For the M2M transfers we don't use NFB at all. This is because it simply
415 * doesn't work well with memcpy transfers. When you submit both buffers it is
416 * extremely unlikely that you get an NFB interrupt, but it instead reports
417 * DONE interrupt and both buffers are already transferred which means that we
418 * weren't able to update the next buffer.
419 *
420 * So for now we "simulate" NFB by just submitting buffer after buffer
421 * without double buffering.
422 */
423
424static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
425{
426 const struct ep93xx_dma_data *data = edmac->chan.private;
427 u32 control = 0;
428
429 if (!data) {
430 /* This is memcpy channel, nothing to configure */
431 writel(control, edmac->regs + M2M_CONTROL);
432 return 0;
433 }
434
435 switch (data->port) {
436 case EP93XX_DMA_SSP:
437 /*
438 * This was found via experimenting - anything less than 5
439 * causes the channel to perform only a partial transfer which
440 * leads to problems since we don't get DONE interrupt then.
441 */
442 control = (5 << M2M_CONTROL_PWSC_SHIFT);
443 control |= M2M_CONTROL_NO_HDSK;
444
445 if (data->direction == DMA_TO_DEVICE) {
446 control |= M2M_CONTROL_DAH;
447 control |= M2M_CONTROL_TM_TX;
448 control |= M2M_CONTROL_RSS_SSPTX;
449 } else {
450 control |= M2M_CONTROL_SAH;
451 control |= M2M_CONTROL_TM_RX;
452 control |= M2M_CONTROL_RSS_SSPRX;
453 }
454 break;
455
456 case EP93XX_DMA_IDE:
457 /*
458 * This IDE part is totally untested. Values below are taken
459 * from the EP93xx Users's Guide and might not be correct.
460 */
461 control |= M2M_CONTROL_NO_HDSK;
462 control |= M2M_CONTROL_RSS_IDE;
463 control |= M2M_CONTROL_PW_16;
464
465 if (data->direction == DMA_TO_DEVICE) {
466 /* Worst case from the UG */
467 control = (3 << M2M_CONTROL_PWSC_SHIFT);
468 control |= M2M_CONTROL_DAH;
469 control |= M2M_CONTROL_TM_TX;
470 } else {
471 control = (2 << M2M_CONTROL_PWSC_SHIFT);
472 control |= M2M_CONTROL_SAH;
473 control |= M2M_CONTROL_TM_RX;
474 }
475 break;
476
477 default:
478 return -EINVAL;
479 }
480
481 writel(control, edmac->regs + M2M_CONTROL);
482 return 0;
483}
484
485static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
486{
487 /* Just disable the channel */
488 writel(0, edmac->regs + M2M_CONTROL);
489}
490
491static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
492{
493 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
494
495 if (edmac->buffer == 0) {
496 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
497 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
498 writel(desc->size, edmac->regs + M2M_BCR0);
499 } else {
500 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
501 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
502 writel(desc->size, edmac->regs + M2M_BCR1);
503 }
504
505 edmac->buffer ^= 1;
506}
507
508static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
509{
510 struct ep93xx_dma_data *data = edmac->chan.private;
511 u32 control = readl(edmac->regs + M2M_CONTROL);
512
513 /*
514 * Since we allow clients to configure PW (peripheral width) we always
515 * clear PW bits here and then set them according what is given in
516 * the runtime configuration.
517 */
518 control &= ~M2M_CONTROL_PW_MASK;
519 control |= edmac->runtime_ctrl;
520
521 m2m_fill_desc(edmac);
522 control |= M2M_CONTROL_DONEINT;
523
524 /*
525 * Now we can finally enable the channel. For M2M channel this must be
526 * done _after_ the BCRx registers are programmed.
527 */
528 control |= M2M_CONTROL_ENABLE;
529 writel(control, edmac->regs + M2M_CONTROL);
530
531 if (!data) {
532 /*
533 * For memcpy channels the software trigger must be asserted
534 * in order to start the memcpy operation.
535 */
536 control |= M2M_CONTROL_START;
537 writel(control, edmac->regs + M2M_CONTROL);
538 }
539}
540
541static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
542{
543 u32 control;
544
545 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT))
546 return INTERRUPT_UNKNOWN;
547
548 /* Clear the DONE bit */
549 writel(0, edmac->regs + M2M_INTERRUPT);
550
551 /* Disable interrupts and the channel */
552 control = readl(edmac->regs + M2M_CONTROL);
553 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE);
554 writel(control, edmac->regs + M2M_CONTROL);
555
556 /*
557 * Since we only get DONE interrupt we have to find out ourselves
558 * whether there still is something to process. So we try to advance
559 * the chain an see whether it succeeds.
560 */
561 if (ep93xx_dma_advance_active(edmac)) {
562 edmac->edma->hw_submit(edmac);
563 return INTERRUPT_NEXT_BUFFER;
564 }
565
566 return INTERRUPT_DONE;
567}
568
569/*
570 * DMA engine API implementation
571 */
572
573static struct ep93xx_dma_desc *
574ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
575{
576 struct ep93xx_dma_desc *desc, *_desc;
577 struct ep93xx_dma_desc *ret = NULL;
578 unsigned long flags;
579
580 spin_lock_irqsave(&edmac->lock, flags);
581 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
582 if (async_tx_test_ack(&desc->txd)) {
583 list_del_init(&desc->node);
584
585 /* Re-initialize the descriptor */
586 desc->src_addr = 0;
587 desc->dst_addr = 0;
588 desc->size = 0;
589 desc->complete = false;
590 desc->txd.cookie = 0;
591 desc->txd.callback = NULL;
592 desc->txd.callback_param = NULL;
593
594 ret = desc;
595 break;
596 }
597 }
598 spin_unlock_irqrestore(&edmac->lock, flags);
599 return ret;
600}
601
602static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
603 struct ep93xx_dma_desc *desc)
604{
605 if (desc) {
606 unsigned long flags;
607
608 spin_lock_irqsave(&edmac->lock, flags);
609 list_splice_init(&desc->tx_list, &edmac->free_list);
610 list_add(&desc->node, &edmac->free_list);
611 spin_unlock_irqrestore(&edmac->lock, flags);
612 }
613}
614
615/**
616 * ep93xx_dma_advance_work - start processing the next pending transaction
617 * @edmac: channel
618 *
619 * If we have pending transactions queued and we are currently idling, this
620 * function takes the next queued transaction from the @edmac->queue and
621 * pushes it to the hardware for execution.
622 */
623static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
624{
625 struct ep93xx_dma_desc *new;
626 unsigned long flags;
627
628 spin_lock_irqsave(&edmac->lock, flags);
629 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
630 spin_unlock_irqrestore(&edmac->lock, flags);
631 return;
632 }
633
634 /* Take the next descriptor from the pending queue */
635 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
636 list_del_init(&new->node);
637
638 ep93xx_dma_set_active(edmac, new);
639
640 /* Push it to the hardware */
641 edmac->edma->hw_submit(edmac);
642 spin_unlock_irqrestore(&edmac->lock, flags);
643}
644
645static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
646{
647 struct device *dev = desc->txd.chan->device->dev;
648
649 if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
650 if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
651 dma_unmap_single(dev, desc->src_addr, desc->size,
652 DMA_TO_DEVICE);
653 else
654 dma_unmap_page(dev, desc->src_addr, desc->size,
655 DMA_TO_DEVICE);
656 }
657 if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
658 if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
659 dma_unmap_single(dev, desc->dst_addr, desc->size,
660 DMA_FROM_DEVICE);
661 else
662 dma_unmap_page(dev, desc->dst_addr, desc->size,
663 DMA_FROM_DEVICE);
664 }
665}
666
667static void ep93xx_dma_tasklet(unsigned long data)
668{
669 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
670 struct ep93xx_dma_desc *desc, *d;
671 dma_async_tx_callback callback;
672 void *callback_param;
673 LIST_HEAD(list);
674
675 spin_lock_irq(&edmac->lock);
676 desc = ep93xx_dma_get_active(edmac);
677 if (desc->complete) {
678 edmac->last_completed = desc->txd.cookie;
679 list_splice_init(&edmac->active, &list);
680 }
681 spin_unlock_irq(&edmac->lock);
682
683 /* Pick up the next descriptor from the queue */
684 ep93xx_dma_advance_work(edmac);
685
686 callback = desc->txd.callback;
687 callback_param = desc->txd.callback_param;
688
689 /* Now we can release all the chained descriptors */
690 list_for_each_entry_safe(desc, d, &list, node) {
691 /*
692 * For the memcpy channels the API requires us to unmap the
693 * buffers unless requested otherwise.
694 */
695 if (!edmac->chan.private)
696 ep93xx_dma_unmap_buffers(desc);
697
698 ep93xx_dma_desc_put(edmac, desc);
699 }
700
701 if (callback)
702 callback(callback_param);
703}
704
705static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
706{
707 struct ep93xx_dma_chan *edmac = dev_id;
708 irqreturn_t ret = IRQ_HANDLED;
709
710 spin_lock(&edmac->lock);
711
712 switch (edmac->edma->hw_interrupt(edmac)) {
713 case INTERRUPT_DONE:
714 ep93xx_dma_get_active(edmac)->complete = true;
715 tasklet_schedule(&edmac->tasklet);
716 break;
717
718 case INTERRUPT_NEXT_BUFFER:
719 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
720 tasklet_schedule(&edmac->tasklet);
721 break;
722
723 default:
724 dev_warn(chan2dev(edmac), "unknown interrupt!\n");
725 ret = IRQ_NONE;
726 break;
727 }
728
729 spin_unlock(&edmac->lock);
730 return ret;
731}
732
733/**
734 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
735 * @tx: descriptor to be executed
736 *
737 * Function will execute given descriptor on the hardware or if the hardware
738 * is busy, queue the descriptor to be executed later on. Returns cookie which
739 * can be used to poll the status of the descriptor.
740 */
741static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
742{
743 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
744 struct ep93xx_dma_desc *desc;
745 dma_cookie_t cookie;
746 unsigned long flags;
747
748 spin_lock_irqsave(&edmac->lock, flags);
749
750 cookie = edmac->chan.cookie;
751
752 if (++cookie < 0)
753 cookie = 1;
754
755 desc = container_of(tx, struct ep93xx_dma_desc, txd);
756
757 edmac->chan.cookie = cookie;
758 desc->txd.cookie = cookie;
759
760 /*
761 * If nothing is currently prosessed, we push this descriptor
762 * directly to the hardware. Otherwise we put the descriptor
763 * to the pending queue.
764 */
765 if (list_empty(&edmac->active)) {
766 ep93xx_dma_set_active(edmac, desc);
767 edmac->edma->hw_submit(edmac);
768 } else {
769 list_add_tail(&desc->node, &edmac->queue);
770 }
771
772 spin_unlock_irqrestore(&edmac->lock, flags);
773 return cookie;
774}
775
776/**
777 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
778 * @chan: channel to allocate resources
779 *
780 * Function allocates necessary resources for the given DMA channel and
781 * returns number of allocated descriptors for the channel. Negative errno
782 * is returned in case of failure.
783 */
784static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
785{
786 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
787 struct ep93xx_dma_data *data = chan->private;
788 const char *name = dma_chan_name(chan);
789 int ret, i;
790
791 /* Sanity check the channel parameters */
792 if (!edmac->edma->m2m) {
793 if (!data)
794 return -EINVAL;
795 if (data->port < EP93XX_DMA_I2S1 ||
796 data->port > EP93XX_DMA_IRDA)
797 return -EINVAL;
798 if (data->direction != ep93xx_dma_chan_direction(chan))
799 return -EINVAL;
800 } else {
801 if (data) {
802 switch (data->port) {
803 case EP93XX_DMA_SSP:
804 case EP93XX_DMA_IDE:
805 if (data->direction != DMA_TO_DEVICE &&
806 data->direction != DMA_FROM_DEVICE)
807 return -EINVAL;
808 break;
809 default:
810 return -EINVAL;
811 }
812 }
813 }
814
815 if (data && data->name)
816 name = data->name;
817
818 ret = clk_enable(edmac->clk);
819 if (ret)
820 return ret;
821
822 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
823 if (ret)
824 goto fail_clk_disable;
825
826 spin_lock_irq(&edmac->lock);
827 edmac->last_completed = 1;
828 edmac->chan.cookie = 1;
829 ret = edmac->edma->hw_setup(edmac);
830 spin_unlock_irq(&edmac->lock);
831
832 if (ret)
833 goto fail_free_irq;
834
835 for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
836 struct ep93xx_dma_desc *desc;
837
838 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
839 if (!desc) {
840 dev_warn(chan2dev(edmac), "not enough descriptors\n");
841 break;
842 }
843
844 INIT_LIST_HEAD(&desc->tx_list);
845
846 dma_async_tx_descriptor_init(&desc->txd, chan);
847 desc->txd.flags = DMA_CTRL_ACK;
848 desc->txd.tx_submit = ep93xx_dma_tx_submit;
849
850 ep93xx_dma_desc_put(edmac, desc);
851 }
852
853 return i;
854
855fail_free_irq:
856 free_irq(edmac->irq, edmac);
857fail_clk_disable:
858 clk_disable(edmac->clk);
859
860 return ret;
861}
862
863/**
864 * ep93xx_dma_free_chan_resources - release resources for the channel
865 * @chan: channel
866 *
867 * Function releases all the resources allocated for the given channel.
868 * The channel must be idle when this is called.
869 */
870static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
871{
872 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
873 struct ep93xx_dma_desc *desc, *d;
874 unsigned long flags;
875 LIST_HEAD(list);
876
877 BUG_ON(!list_empty(&edmac->active));
878 BUG_ON(!list_empty(&edmac->queue));
879
880 spin_lock_irqsave(&edmac->lock, flags);
881 edmac->edma->hw_shutdown(edmac);
882 edmac->runtime_addr = 0;
883 edmac->runtime_ctrl = 0;
884 edmac->buffer = 0;
885 list_splice_init(&edmac->free_list, &list);
886 spin_unlock_irqrestore(&edmac->lock, flags);
887
888 list_for_each_entry_safe(desc, d, &list, node)
889 kfree(desc);
890
891 clk_disable(edmac->clk);
892 free_irq(edmac->irq, edmac);
893}
894
895/**
896 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
897 * @chan: channel
898 * @dest: destination bus address
899 * @src: source bus address
900 * @len: size of the transaction
901 * @flags: flags for the descriptor
902 *
903 * Returns a valid DMA descriptor or %NULL in case of failure.
904 */
905struct dma_async_tx_descriptor *
906ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
907 dma_addr_t src, size_t len, unsigned long flags)
908{
909 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
910 struct ep93xx_dma_desc *desc, *first;
911 size_t bytes, offset;
912
913 first = NULL;
914 for (offset = 0; offset < len; offset += bytes) {
915 desc = ep93xx_dma_desc_get(edmac);
916 if (!desc) {
917 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
918 goto fail;
919 }
920
921 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
922
923 desc->src_addr = src + offset;
924 desc->dst_addr = dest + offset;
925 desc->size = bytes;
926
927 if (!first)
928 first = desc;
929 else
930 list_add_tail(&desc->node, &first->tx_list);
931 }
932
933 first->txd.cookie = -EBUSY;
934 first->txd.flags = flags;
935
936 return &first->txd;
937fail:
938 ep93xx_dma_desc_put(edmac, first);
939 return NULL;
940}
941
942/**
943 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
944 * @chan: channel
945 * @sgl: list of buffers to transfer
946 * @sg_len: number of entries in @sgl
947 * @dir: direction of tha DMA transfer
948 * @flags: flags for the descriptor
949 *
950 * Returns a valid DMA descriptor or %NULL in case of failure.
951 */
952static struct dma_async_tx_descriptor *
953ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
954 unsigned int sg_len, enum dma_data_direction dir,
955 unsigned long flags)
956{
957 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
958 struct ep93xx_dma_desc *desc, *first;
959 struct scatterlist *sg;
960 int i;
961
962 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
963 dev_warn(chan2dev(edmac),
964 "channel was configured with different direction\n");
965 return NULL;
966 }
967
968 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
969 dev_warn(chan2dev(edmac),
970 "channel is already used for cyclic transfers\n");
971 return NULL;
972 }
973
974 first = NULL;
975 for_each_sg(sgl, sg, sg_len, i) {
976 size_t sg_len = sg_dma_len(sg);
977
978 if (sg_len > DMA_MAX_CHAN_BYTES) {
979 dev_warn(chan2dev(edmac), "too big transfer size %d\n",
980 sg_len);
981 goto fail;
982 }
983
984 desc = ep93xx_dma_desc_get(edmac);
985 if (!desc) {
986 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
987 goto fail;
988 }
989
990 if (dir == DMA_TO_DEVICE) {
991 desc->src_addr = sg_dma_address(sg);
992 desc->dst_addr = edmac->runtime_addr;
993 } else {
994 desc->src_addr = edmac->runtime_addr;
995 desc->dst_addr = sg_dma_address(sg);
996 }
997 desc->size = sg_len;
998
999 if (!first)
1000 first = desc;
1001 else
1002 list_add_tail(&desc->node, &first->tx_list);
1003 }
1004
1005 first->txd.cookie = -EBUSY;
1006 first->txd.flags = flags;
1007
1008 return &first->txd;
1009
1010fail:
1011 ep93xx_dma_desc_put(edmac, first);
1012 return NULL;
1013}
1014
1015/**
1016 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1017 * @chan: channel
1018 * @dma_addr: DMA mapped address of the buffer
1019 * @buf_len: length of the buffer (in bytes)
1020 * @period_len: lenght of a single period
1021 * @dir: direction of the operation
1022 *
1023 * Prepares a descriptor for cyclic DMA operation. This means that once the
1024 * descriptor is submitted, we will be submitting in a @period_len sized
1025 * buffers and calling callback once the period has been elapsed. Transfer
1026 * terminates only when client calls dmaengine_terminate_all() for this
1027 * channel.
1028 *
1029 * Returns a valid DMA descriptor or %NULL in case of failure.
1030 */
1031static struct dma_async_tx_descriptor *
1032ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1033 size_t buf_len, size_t period_len,
1034 enum dma_data_direction dir)
1035{
1036 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1037 struct ep93xx_dma_desc *desc, *first;
1038 size_t offset = 0;
1039
1040 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1041 dev_warn(chan2dev(edmac),
1042 "channel was configured with different direction\n");
1043 return NULL;
1044 }
1045
1046 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1047 dev_warn(chan2dev(edmac),
1048 "channel is already used for cyclic transfers\n");
1049 return NULL;
1050 }
1051
1052 if (period_len > DMA_MAX_CHAN_BYTES) {
1053 dev_warn(chan2dev(edmac), "too big period length %d\n",
1054 period_len);
1055 return NULL;
1056 }
1057
1058 /* Split the buffer into period size chunks */
1059 first = NULL;
1060 for (offset = 0; offset < buf_len; offset += period_len) {
1061 desc = ep93xx_dma_desc_get(edmac);
1062 if (!desc) {
1063 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1064 goto fail;
1065 }
1066
1067 if (dir == DMA_TO_DEVICE) {
1068 desc->src_addr = dma_addr + offset;
1069 desc->dst_addr = edmac->runtime_addr;
1070 } else {
1071 desc->src_addr = edmac->runtime_addr;
1072 desc->dst_addr = dma_addr + offset;
1073 }
1074
1075 desc->size = period_len;
1076
1077 if (!first)
1078 first = desc;
1079 else
1080 list_add_tail(&desc->node, &first->tx_list);
1081 }
1082
1083 first->txd.cookie = -EBUSY;
1084
1085 return &first->txd;
1086
1087fail:
1088 ep93xx_dma_desc_put(edmac, first);
1089 return NULL;
1090}
1091
1092/**
1093 * ep93xx_dma_terminate_all - terminate all transactions
1094 * @edmac: channel
1095 *
1096 * Stops all DMA transactions. All descriptors are put back to the
1097 * @edmac->free_list and callbacks are _not_ called.
1098 */
1099static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
1100{
1101 struct ep93xx_dma_desc *desc, *_d;
1102 unsigned long flags;
1103 LIST_HEAD(list);
1104
1105 spin_lock_irqsave(&edmac->lock, flags);
1106 /* First we disable and flush the DMA channel */
1107 edmac->edma->hw_shutdown(edmac);
1108 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1109 list_splice_init(&edmac->active, &list);
1110 list_splice_init(&edmac->queue, &list);
1111 /*
1112 * We then re-enable the channel. This way we can continue submitting
1113 * the descriptors by just calling ->hw_submit() again.
1114 */
1115 edmac->edma->hw_setup(edmac);
1116 spin_unlock_irqrestore(&edmac->lock, flags);
1117
1118 list_for_each_entry_safe(desc, _d, &list, node)
1119 ep93xx_dma_desc_put(edmac, desc);
1120
1121 return 0;
1122}
1123
1124static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
1125 struct dma_slave_config *config)
1126{
1127 enum dma_slave_buswidth width;
1128 unsigned long flags;
1129 u32 addr, ctrl;
1130
1131 if (!edmac->edma->m2m)
1132 return -EINVAL;
1133
1134 switch (config->direction) {
1135 case DMA_FROM_DEVICE:
1136 width = config->src_addr_width;
1137 addr = config->src_addr;
1138 break;
1139
1140 case DMA_TO_DEVICE:
1141 width = config->dst_addr_width;
1142 addr = config->dst_addr;
1143 break;
1144
1145 default:
1146 return -EINVAL;
1147 }
1148
1149 switch (width) {
1150 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1151 ctrl = 0;
1152 break;
1153 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1154 ctrl = M2M_CONTROL_PW_16;
1155 break;
1156 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1157 ctrl = M2M_CONTROL_PW_32;
1158 break;
1159 default:
1160 return -EINVAL;
1161 }
1162
1163 spin_lock_irqsave(&edmac->lock, flags);
1164 edmac->runtime_addr = addr;
1165 edmac->runtime_ctrl = ctrl;
1166 spin_unlock_irqrestore(&edmac->lock, flags);
1167
1168 return 0;
1169}
1170
1171/**
1172 * ep93xx_dma_control - manipulate all pending operations on a channel
1173 * @chan: channel
1174 * @cmd: control command to perform
1175 * @arg: optional argument
1176 *
1177 * Controls the channel. Function returns %0 in case of success or negative
1178 * error in case of failure.
1179 */
1180static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1181 unsigned long arg)
1182{
1183 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1184 struct dma_slave_config *config;
1185
1186 switch (cmd) {
1187 case DMA_TERMINATE_ALL:
1188 return ep93xx_dma_terminate_all(edmac);
1189
1190 case DMA_SLAVE_CONFIG:
1191 config = (struct dma_slave_config *)arg;
1192 return ep93xx_dma_slave_config(edmac, config);
1193
1194 default:
1195 break;
1196 }
1197
1198 return -ENOSYS;
1199}
1200
1201/**
1202 * ep93xx_dma_tx_status - check if a transaction is completed
1203 * @chan: channel
1204 * @cookie: transaction specific cookie
1205 * @state: state of the transaction is stored here if given
1206 *
1207 * This function can be used to query state of a given transaction.
1208 */
1209static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1210 dma_cookie_t cookie,
1211 struct dma_tx_state *state)
1212{
1213 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1214 dma_cookie_t last_used, last_completed;
1215 enum dma_status ret;
1216 unsigned long flags;
1217
1218 spin_lock_irqsave(&edmac->lock, flags);
1219 last_used = chan->cookie;
1220 last_completed = edmac->last_completed;
1221 spin_unlock_irqrestore(&edmac->lock, flags);
1222
1223 ret = dma_async_is_complete(cookie, last_completed, last_used);
1224 dma_set_tx_state(state, last_completed, last_used, 0);
1225
1226 return ret;
1227}
1228
1229/**
1230 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1231 * @chan: channel
1232 *
1233 * When this function is called, all pending transactions are pushed to the
1234 * hardware and executed.
1235 */
1236static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1237{
1238 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1239}
1240
1241static int __init ep93xx_dma_probe(struct platform_device *pdev)
1242{
1243 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1244 struct ep93xx_dma_engine *edma;
1245 struct dma_device *dma_dev;
1246 size_t edma_size;
1247 int ret, i;
1248
1249 edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1250 edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1251 if (!edma)
1252 return -ENOMEM;
1253
1254 dma_dev = &edma->dma_dev;
1255 edma->m2m = platform_get_device_id(pdev)->driver_data;
1256 edma->num_channels = pdata->num_channels;
1257
1258 INIT_LIST_HEAD(&dma_dev->channels);
1259 for (i = 0; i < pdata->num_channels; i++) {
1260 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1261 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1262
1263 edmac->chan.device = dma_dev;
1264 edmac->regs = cdata->base;
1265 edmac->irq = cdata->irq;
1266 edmac->edma = edma;
1267
1268 edmac->clk = clk_get(NULL, cdata->name);
1269 if (IS_ERR(edmac->clk)) {
1270 dev_warn(&pdev->dev, "failed to get clock for %s\n",
1271 cdata->name);
1272 continue;
1273 }
1274
1275 spin_lock_init(&edmac->lock);
1276 INIT_LIST_HEAD(&edmac->active);
1277 INIT_LIST_HEAD(&edmac->queue);
1278 INIT_LIST_HEAD(&edmac->free_list);
1279 tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1280 (unsigned long)edmac);
1281
1282 list_add_tail(&edmac->chan.device_node,
1283 &dma_dev->channels);
1284 }
1285
1286 dma_cap_zero(dma_dev->cap_mask);
1287 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1288 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1289
1290 dma_dev->dev = &pdev->dev;
1291 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1292 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1293 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1294 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1295 dma_dev->device_control = ep93xx_dma_control;
1296 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1297 dma_dev->device_tx_status = ep93xx_dma_tx_status;
1298
1299 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1300
1301 if (edma->m2m) {
1302 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1303 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1304
1305 edma->hw_setup = m2m_hw_setup;
1306 edma->hw_shutdown = m2m_hw_shutdown;
1307 edma->hw_submit = m2m_hw_submit;
1308 edma->hw_interrupt = m2m_hw_interrupt;
1309 } else {
1310 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1311
1312 edma->hw_setup = m2p_hw_setup;
1313 edma->hw_shutdown = m2p_hw_shutdown;
1314 edma->hw_submit = m2p_hw_submit;
1315 edma->hw_interrupt = m2p_hw_interrupt;
1316 }
1317
1318 ret = dma_async_device_register(dma_dev);
1319 if (unlikely(ret)) {
1320 for (i = 0; i < edma->num_channels; i++) {
1321 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1322 if (!IS_ERR_OR_NULL(edmac->clk))
1323 clk_put(edmac->clk);
1324 }
1325 kfree(edma);
1326 } else {
1327 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1328 edma->m2m ? "M" : "P");
1329 }
1330
1331 return ret;
1332}
1333
1334static struct platform_device_id ep93xx_dma_driver_ids[] = {
1335 { "ep93xx-dma-m2p", 0 },
1336 { "ep93xx-dma-m2m", 1 },
1337 { },
1338};
1339
1340static struct platform_driver ep93xx_dma_driver = {
1341 .driver = {
1342 .name = "ep93xx-dma",
1343 },
1344 .id_table = ep93xx_dma_driver_ids,
1345};
1346
1347static int __init ep93xx_dma_module_init(void)
1348{
1349 return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1350}
1351subsys_initcall(ep93xx_dma_module_init);
1352
1353MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1354MODULE_DESCRIPTION("EP93xx DMA driver");
1355MODULE_LICENSE("GPL");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index de35c3ad8a69..52e2900d9d8e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -86,9 +86,6 @@ config SPI_BFIN_SPORT
86 help 86 help
87 Enable support for a SPI bus via the Blackfin SPORT peripheral. 87 Enable support for a SPI bus via the Blackfin SPORT peripheral.
88 88
89 This driver can also be built as a module. If so, the module
90 will be called spi_bfin_sport.
91
92config SPI_AU1550 89config SPI_AU1550
93 tristate "Au1550/Au12x0 SPI Controller" 90 tristate "Au1550/Au12x0 SPI Controller"
94 depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL 91 depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL
@@ -97,9 +94,6 @@ config SPI_AU1550
97 If you say yes to this option, support will be included for the 94 If you say yes to this option, support will be included for the
98 Au1550 SPI controller (may also work with Au1200,Au1210,Au1250). 95 Au1550 SPI controller (may also work with Au1200,Au1210,Au1250).
99 96
100 This driver can also be built as a module. If so, the module
101 will be called au1550_spi.
102
103config SPI_BITBANG 97config SPI_BITBANG
104 tristate "Utilities for Bitbanging SPI masters" 98 tristate "Utilities for Bitbanging SPI masters"
105 help 99 help
@@ -130,9 +124,6 @@ config SPI_COLDFIRE_QSPI
130 This enables support for the Coldfire QSPI controller in master 124 This enables support for the Coldfire QSPI controller in master
131 mode. 125 mode.
132 126
133 This driver can also be built as a module. If so, the module
134 will be called coldfire_qspi.
135
136config SPI_DAVINCI 127config SPI_DAVINCI
137 tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" 128 tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
138 depends on SPI_MASTER && ARCH_DAVINCI 129 depends on SPI_MASTER && ARCH_DAVINCI
@@ -140,9 +131,6 @@ config SPI_DAVINCI
140 help 131 help
141 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. 132 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
142 133
143 This driver can also be built as a module. The module will be called
144 davinci_spi.
145
146config SPI_EP93XX 134config SPI_EP93XX
147 tristate "Cirrus Logic EP93xx SPI controller" 135 tristate "Cirrus Logic EP93xx SPI controller"
148 depends on ARCH_EP93XX 136 depends on ARCH_EP93XX
@@ -150,9 +138,6 @@ config SPI_EP93XX
150 This enables using the Cirrus EP93xx SPI controller in master 138 This enables using the Cirrus EP93xx SPI controller in master
151 mode. 139 mode.
152 140
153 To compile this driver as a module, choose M here. The module will be
154 called ep93xx_spi.
155
156config SPI_GPIO 141config SPI_GPIO
157 tristate "GPIO-based bitbanging SPI Master" 142 tristate "GPIO-based bitbanging SPI Master"
158 depends on GENERIC_GPIO 143 depends on GENERIC_GPIO
@@ -169,21 +154,6 @@ config SPI_GPIO
169 GPIO operations, you should be able to leverage that for better 154 GPIO operations, you should be able to leverage that for better
170 speed with a custom version of this driver; see the source code. 155 speed with a custom version of this driver; see the source code.
171 156
172config SPI_IMX_VER_IMX1
173 def_bool y if SOC_IMX1
174
175config SPI_IMX_VER_0_0
176 def_bool y if SOC_IMX21 || SOC_IMX27
177
178config SPI_IMX_VER_0_4
179 def_bool y if SOC_IMX31
180
181config SPI_IMX_VER_0_7
182 def_bool y if ARCH_MX25 || SOC_IMX35 || SOC_IMX51 || SOC_IMX53
183
184config SPI_IMX_VER_2_3
185 def_bool y if SOC_IMX51 || SOC_IMX53
186
187config SPI_IMX 157config SPI_IMX
188 tristate "Freescale i.MX SPI controllers" 158 tristate "Freescale i.MX SPI controllers"
189 depends on ARCH_MXC 159 depends on ARCH_MXC
@@ -328,16 +298,6 @@ config SPI_S3C24XX_FIQ
328 no free DMA channels, or when doing transfers that required both 298 no free DMA channels, or when doing transfers that required both
329 TX and RX data paths. 299 TX and RX data paths.
330 300
331config SPI_S3C24XX_GPIO
332 tristate "Samsung S3C24XX series SPI by GPIO"
333 depends on ARCH_S3C2410 && EXPERIMENTAL
334 select SPI_BITBANG
335 help
336 SPI driver for Samsung S3C24XX series ARM SoCs using
337 GPIO lines to provide the SPI bus. This can be used where
338 the inbuilt hardware cannot provide the transfer mode, or
339 where the board is using non hardware connected pins.
340
341config SPI_S3C64XX 301config SPI_S3C64XX
342 tristate "Samsung S3C64XX series type SPI" 302 tristate "Samsung S3C64XX series type SPI"
343 depends on (ARCH_S3C64XX || ARCH_S5P64X0) 303 depends on (ARCH_S3C64XX || ARCH_S5P64X0)
@@ -385,16 +345,16 @@ config SPI_TI_SSP
385 This selects an SPI master implementation using a TI sequencer 345 This selects an SPI master implementation using a TI sequencer
386 serial port. 346 serial port.
387 347
388 To compile this driver as a module, choose M here: the
389 module will be called ti-ssp-spi.
390
391config SPI_TOPCLIFF_PCH 348config SPI_TOPCLIFF_PCH
392 tristate "Topcliff PCH SPI Controller" 349 tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH SPI controller"
393 depends on PCI 350 depends on PCI
394 help 351 help
395 SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus 352 SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
396 used in some x86 embedded processors. 353 used in some x86 embedded processors.
397 354
355 This driver also supports the ML7213, a companion chip for the
356 Atom E6xx series and compatible with the Intel EG20T PCH.
357
398config SPI_TXX9 358config SPI_TXX9
399 tristate "Toshiba TXx9 SPI controller" 359 tristate "Toshiba TXx9 SPI controller"
400 depends on GENERIC_GPIO && CPU_TX49XX 360 depends on GENERIC_GPIO && CPU_TX49XX
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 0f8c69b6b19e..61c3261c388c 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -7,68 +7,55 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
7# small core, mostly translating board-specific 7# small core, mostly translating board-specific
8# config declarations into driver model code 8# config declarations into driver model code
9obj-$(CONFIG_SPI_MASTER) += spi.o 9obj-$(CONFIG_SPI_MASTER) += spi.o
10obj-$(CONFIG_SPI_SPIDEV) += spidev.o
10 11
11# SPI master controller drivers (bus) 12# SPI master controller drivers (bus)
12obj-$(CONFIG_SPI_ALTERA) += spi_altera.o 13obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
13obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o 14obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
14obj-$(CONFIG_SPI_ATH79) += ath79_spi.o 15obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
15obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o 16obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
16obj-$(CONFIG_SPI_BFIN_SPORT) += spi_bfin_sport.o 17obj-$(CONFIG_SPI_BFIN) += spi-bfin5xx.o
17obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o 18obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
18obj-$(CONFIG_SPI_AU1550) += au1550_spi.o 19obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
19obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o 20obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
20obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o 21obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
21obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o 22obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
22obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o 23obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
23obj-$(CONFIG_SPI_DW_PCI) += dw_spi_midpci.o 24obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
24dw_spi_midpci-objs := dw_spi_pci.o dw_spi_mid.o 25obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o
25obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o 26spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o
26obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o 27obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o
27obj-$(CONFIG_SPI_GPIO) += spi_gpio.o 28obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
28obj-$(CONFIG_SPI_IMX) += spi_imx.o 29obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
29obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o 30obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
30obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o 31obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
31obj-$(CONFIG_SPI_PXA2XX_PCI) += pxa2xx_spi_pci.o 32obj-$(CONFIG_SPI_IMX) += spi-imx.o
32obj-$(CONFIG_SPI_OC_TINY) += spi_oc_tiny.o 33obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
33obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o 34obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
34obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o 35obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
35obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o 36obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
36obj-$(CONFIG_SPI_ORION) += orion_spi.o 37obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
37obj-$(CONFIG_SPI_PL022) += amba-pl022.o 38obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
38obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o 39obj-$(CONFIG_SPI_OMAP_UWIRE) += spi-omap-uwire.o
39obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o 40obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o
40obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o 41obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
41obj-$(CONFIG_SPI_FSL_LIB) += spi_fsl_lib.o 42obj-$(CONFIG_SPI_ORION) += spi-orion.o
42obj-$(CONFIG_SPI_FSL_ESPI) += spi_fsl_espi.o 43obj-$(CONFIG_SPI_PL022) += spi-pl022.o
43obj-$(CONFIG_SPI_FSL_SPI) += spi_fsl_spi.o 44obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
44obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o 45obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx.o
45obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o 46obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
46obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o 47obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
47obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o 48spi-s3c24xx-hw-y := spi-s3c24xx.o
48obj-$(CONFIG_SPI_TEGRA) += spi_tegra.o 49spi-s3c24xx-hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi-s3c24xx-fiq.o
49obj-$(CONFIG_SPI_TI_SSP) += ti-ssp-spi.o 50obj-$(CONFIG_SPI_S3C64XX) += spi-s3c64xx.o
50obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o 51obj-$(CONFIG_SPI_SH) += spi-sh.o
51obj-$(CONFIG_SPI_TXX9) += spi_txx9.o 52obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
52obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o 53obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
53obj-$(CONFIG_SPI_SH) += spi_sh.o 54obj-$(CONFIG_SPI_STMP3XXX) += spi-stmp.o
54obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o 55obj-$(CONFIG_SPI_TEGRA) += spi-tegra.o
55obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o 56obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o
56obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o 57obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
57obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o 58obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
59obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
60obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
58 61
59# special build for s3c24xx spi driver with fiq support
60spi_s3c24xx_hw-y := spi_s3c24xx.o
61spi_s3c24xx_hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi_s3c24xx_fiq.o
62
63# ... add above this line ...
64
65# SPI protocol drivers (device/link on bus)
66obj-$(CONFIG_SPI_SPIDEV) += spidev.o
67obj-$(CONFIG_SPI_TLE62X0) += tle62x0.o
68# ... add above this line ...
69
70# SPI slave controller drivers (upstream link)
71# ... add above this line ...
72
73# SPI slave drivers (protocol for that link)
74# ... add above this line ...
diff --git a/drivers/spi/atmel_spi.h b/drivers/spi/atmel_spi.h
deleted file mode 100644
index 6e06b6ad3a45..000000000000
--- a/drivers/spi/atmel_spi.h
+++ /dev/null
@@ -1,167 +0,0 @@
1/*
2 * Register definitions for Atmel Serial Peripheral Interface (SPI)
3 *
4 * Copyright (C) 2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ATMEL_SPI_H__
11#define __ATMEL_SPI_H__
12
13/* SPI register offsets */
14#define SPI_CR 0x0000
15#define SPI_MR 0x0004
16#define SPI_RDR 0x0008
17#define SPI_TDR 0x000c
18#define SPI_SR 0x0010
19#define SPI_IER 0x0014
20#define SPI_IDR 0x0018
21#define SPI_IMR 0x001c
22#define SPI_CSR0 0x0030
23#define SPI_CSR1 0x0034
24#define SPI_CSR2 0x0038
25#define SPI_CSR3 0x003c
26#define SPI_RPR 0x0100
27#define SPI_RCR 0x0104
28#define SPI_TPR 0x0108
29#define SPI_TCR 0x010c
30#define SPI_RNPR 0x0110
31#define SPI_RNCR 0x0114
32#define SPI_TNPR 0x0118
33#define SPI_TNCR 0x011c
34#define SPI_PTCR 0x0120
35#define SPI_PTSR 0x0124
36
37/* Bitfields in CR */
38#define SPI_SPIEN_OFFSET 0
39#define SPI_SPIEN_SIZE 1
40#define SPI_SPIDIS_OFFSET 1
41#define SPI_SPIDIS_SIZE 1
42#define SPI_SWRST_OFFSET 7
43#define SPI_SWRST_SIZE 1
44#define SPI_LASTXFER_OFFSET 24
45#define SPI_LASTXFER_SIZE 1
46
47/* Bitfields in MR */
48#define SPI_MSTR_OFFSET 0
49#define SPI_MSTR_SIZE 1
50#define SPI_PS_OFFSET 1
51#define SPI_PS_SIZE 1
52#define SPI_PCSDEC_OFFSET 2
53#define SPI_PCSDEC_SIZE 1
54#define SPI_FDIV_OFFSET 3
55#define SPI_FDIV_SIZE 1
56#define SPI_MODFDIS_OFFSET 4
57#define SPI_MODFDIS_SIZE 1
58#define SPI_LLB_OFFSET 7
59#define SPI_LLB_SIZE 1
60#define SPI_PCS_OFFSET 16
61#define SPI_PCS_SIZE 4
62#define SPI_DLYBCS_OFFSET 24
63#define SPI_DLYBCS_SIZE 8
64
65/* Bitfields in RDR */
66#define SPI_RD_OFFSET 0
67#define SPI_RD_SIZE 16
68
69/* Bitfields in TDR */
70#define SPI_TD_OFFSET 0
71#define SPI_TD_SIZE 16
72
73/* Bitfields in SR */
74#define SPI_RDRF_OFFSET 0
75#define SPI_RDRF_SIZE 1
76#define SPI_TDRE_OFFSET 1
77#define SPI_TDRE_SIZE 1
78#define SPI_MODF_OFFSET 2
79#define SPI_MODF_SIZE 1
80#define SPI_OVRES_OFFSET 3
81#define SPI_OVRES_SIZE 1
82#define SPI_ENDRX_OFFSET 4
83#define SPI_ENDRX_SIZE 1
84#define SPI_ENDTX_OFFSET 5
85#define SPI_ENDTX_SIZE 1
86#define SPI_RXBUFF_OFFSET 6
87#define SPI_RXBUFF_SIZE 1
88#define SPI_TXBUFE_OFFSET 7
89#define SPI_TXBUFE_SIZE 1
90#define SPI_NSSR_OFFSET 8
91#define SPI_NSSR_SIZE 1
92#define SPI_TXEMPTY_OFFSET 9
93#define SPI_TXEMPTY_SIZE 1
94#define SPI_SPIENS_OFFSET 16
95#define SPI_SPIENS_SIZE 1
96
97/* Bitfields in CSR0 */
98#define SPI_CPOL_OFFSET 0
99#define SPI_CPOL_SIZE 1
100#define SPI_NCPHA_OFFSET 1
101#define SPI_NCPHA_SIZE 1
102#define SPI_CSAAT_OFFSET 3
103#define SPI_CSAAT_SIZE 1
104#define SPI_BITS_OFFSET 4
105#define SPI_BITS_SIZE 4
106#define SPI_SCBR_OFFSET 8
107#define SPI_SCBR_SIZE 8
108#define SPI_DLYBS_OFFSET 16
109#define SPI_DLYBS_SIZE 8
110#define SPI_DLYBCT_OFFSET 24
111#define SPI_DLYBCT_SIZE 8
112
113/* Bitfields in RCR */
114#define SPI_RXCTR_OFFSET 0
115#define SPI_RXCTR_SIZE 16
116
117/* Bitfields in TCR */
118#define SPI_TXCTR_OFFSET 0
119#define SPI_TXCTR_SIZE 16
120
121/* Bitfields in RNCR */
122#define SPI_RXNCR_OFFSET 0
123#define SPI_RXNCR_SIZE 16
124
125/* Bitfields in TNCR */
126#define SPI_TXNCR_OFFSET 0
127#define SPI_TXNCR_SIZE 16
128
129/* Bitfields in PTCR */
130#define SPI_RXTEN_OFFSET 0
131#define SPI_RXTEN_SIZE 1
132#define SPI_RXTDIS_OFFSET 1
133#define SPI_RXTDIS_SIZE 1
134#define SPI_TXTEN_OFFSET 8
135#define SPI_TXTEN_SIZE 1
136#define SPI_TXTDIS_OFFSET 9
137#define SPI_TXTDIS_SIZE 1
138
139/* Constants for BITS */
140#define SPI_BITS_8_BPT 0
141#define SPI_BITS_9_BPT 1
142#define SPI_BITS_10_BPT 2
143#define SPI_BITS_11_BPT 3
144#define SPI_BITS_12_BPT 4
145#define SPI_BITS_13_BPT 5
146#define SPI_BITS_14_BPT 6
147#define SPI_BITS_15_BPT 7
148#define SPI_BITS_16_BPT 8
149
150/* Bit manipulation macros */
151#define SPI_BIT(name) \
152 (1 << SPI_##name##_OFFSET)
153#define SPI_BF(name,value) \
154 (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
155#define SPI_BFEXT(name,value) \
156 (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
157#define SPI_BFINS(name,value,old) \
158 ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
159 | SPI_BF(name,value))
160
161/* Register access macros */
162#define spi_readl(port,reg) \
163 __raw_readl((port)->regs + SPI_##reg)
164#define spi_writel(port,reg,value) \
165 __raw_writel((value), (port)->regs + SPI_##reg)
166
167#endif /* __ATMEL_SPI_H__ */
diff --git a/drivers/spi/spi_altera.c b/drivers/spi/spi-altera.c
index 4813a63ce6fb..4813a63ce6fb 100644
--- a/drivers/spi/spi_altera.c
+++ b/drivers/spi/spi-altera.c
diff --git a/drivers/spi/ath79_spi.c b/drivers/spi/spi-ath79.c
index fcff810ea3b0..03019bf5a5e9 100644
--- a/drivers/spi/ath79_spi.c
+++ b/drivers/spi/spi-ath79.c
@@ -232,7 +232,7 @@ static __devinit int ath79_spi_probe(struct platform_device *pdev)
232 goto err_put_master; 232 goto err_put_master;
233 } 233 }
234 234
235 sp->base = ioremap(r->start, r->end - r->start + 1); 235 sp->base = ioremap(r->start, resource_size(r));
236 if (!sp->base) { 236 if (!sp->base) {
237 ret = -ENXIO; 237 ret = -ENXIO;
238 goto err_put_master; 238 goto err_put_master;
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/spi-atmel.c
index 08711e9202ab..82dee9a6c0de 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/spi-atmel.c
@@ -25,7 +25,160 @@
25#include <mach/gpio.h> 25#include <mach/gpio.h>
26#include <mach/cpu.h> 26#include <mach/cpu.h>
27 27
28#include "atmel_spi.h" 28/* SPI register offsets */
29#define SPI_CR 0x0000
30#define SPI_MR 0x0004
31#define SPI_RDR 0x0008
32#define SPI_TDR 0x000c
33#define SPI_SR 0x0010
34#define SPI_IER 0x0014
35#define SPI_IDR 0x0018
36#define SPI_IMR 0x001c
37#define SPI_CSR0 0x0030
38#define SPI_CSR1 0x0034
39#define SPI_CSR2 0x0038
40#define SPI_CSR3 0x003c
41#define SPI_RPR 0x0100
42#define SPI_RCR 0x0104
43#define SPI_TPR 0x0108
44#define SPI_TCR 0x010c
45#define SPI_RNPR 0x0110
46#define SPI_RNCR 0x0114
47#define SPI_TNPR 0x0118
48#define SPI_TNCR 0x011c
49#define SPI_PTCR 0x0120
50#define SPI_PTSR 0x0124
51
52/* Bitfields in CR */
53#define SPI_SPIEN_OFFSET 0
54#define SPI_SPIEN_SIZE 1
55#define SPI_SPIDIS_OFFSET 1
56#define SPI_SPIDIS_SIZE 1
57#define SPI_SWRST_OFFSET 7
58#define SPI_SWRST_SIZE 1
59#define SPI_LASTXFER_OFFSET 24
60#define SPI_LASTXFER_SIZE 1
61
62/* Bitfields in MR */
63#define SPI_MSTR_OFFSET 0
64#define SPI_MSTR_SIZE 1
65#define SPI_PS_OFFSET 1
66#define SPI_PS_SIZE 1
67#define SPI_PCSDEC_OFFSET 2
68#define SPI_PCSDEC_SIZE 1
69#define SPI_FDIV_OFFSET 3
70#define SPI_FDIV_SIZE 1
71#define SPI_MODFDIS_OFFSET 4
72#define SPI_MODFDIS_SIZE 1
73#define SPI_LLB_OFFSET 7
74#define SPI_LLB_SIZE 1
75#define SPI_PCS_OFFSET 16
76#define SPI_PCS_SIZE 4
77#define SPI_DLYBCS_OFFSET 24
78#define SPI_DLYBCS_SIZE 8
79
80/* Bitfields in RDR */
81#define SPI_RD_OFFSET 0
82#define SPI_RD_SIZE 16
83
84/* Bitfields in TDR */
85#define SPI_TD_OFFSET 0
86#define SPI_TD_SIZE 16
87
88/* Bitfields in SR */
89#define SPI_RDRF_OFFSET 0
90#define SPI_RDRF_SIZE 1
91#define SPI_TDRE_OFFSET 1
92#define SPI_TDRE_SIZE 1
93#define SPI_MODF_OFFSET 2
94#define SPI_MODF_SIZE 1
95#define SPI_OVRES_OFFSET 3
96#define SPI_OVRES_SIZE 1
97#define SPI_ENDRX_OFFSET 4
98#define SPI_ENDRX_SIZE 1
99#define SPI_ENDTX_OFFSET 5
100#define SPI_ENDTX_SIZE 1
101#define SPI_RXBUFF_OFFSET 6
102#define SPI_RXBUFF_SIZE 1
103#define SPI_TXBUFE_OFFSET 7
104#define SPI_TXBUFE_SIZE 1
105#define SPI_NSSR_OFFSET 8
106#define SPI_NSSR_SIZE 1
107#define SPI_TXEMPTY_OFFSET 9
108#define SPI_TXEMPTY_SIZE 1
109#define SPI_SPIENS_OFFSET 16
110#define SPI_SPIENS_SIZE 1
111
112/* Bitfields in CSR0 */
113#define SPI_CPOL_OFFSET 0
114#define SPI_CPOL_SIZE 1
115#define SPI_NCPHA_OFFSET 1
116#define SPI_NCPHA_SIZE 1
117#define SPI_CSAAT_OFFSET 3
118#define SPI_CSAAT_SIZE 1
119#define SPI_BITS_OFFSET 4
120#define SPI_BITS_SIZE 4
121#define SPI_SCBR_OFFSET 8
122#define SPI_SCBR_SIZE 8
123#define SPI_DLYBS_OFFSET 16
124#define SPI_DLYBS_SIZE 8
125#define SPI_DLYBCT_OFFSET 24
126#define SPI_DLYBCT_SIZE 8
127
128/* Bitfields in RCR */
129#define SPI_RXCTR_OFFSET 0
130#define SPI_RXCTR_SIZE 16
131
132/* Bitfields in TCR */
133#define SPI_TXCTR_OFFSET 0
134#define SPI_TXCTR_SIZE 16
135
136/* Bitfields in RNCR */
137#define SPI_RXNCR_OFFSET 0
138#define SPI_RXNCR_SIZE 16
139
140/* Bitfields in TNCR */
141#define SPI_TXNCR_OFFSET 0
142#define SPI_TXNCR_SIZE 16
143
144/* Bitfields in PTCR */
145#define SPI_RXTEN_OFFSET 0
146#define SPI_RXTEN_SIZE 1
147#define SPI_RXTDIS_OFFSET 1
148#define SPI_RXTDIS_SIZE 1
149#define SPI_TXTEN_OFFSET 8
150#define SPI_TXTEN_SIZE 1
151#define SPI_TXTDIS_OFFSET 9
152#define SPI_TXTDIS_SIZE 1
153
154/* Constants for BITS */
155#define SPI_BITS_8_BPT 0
156#define SPI_BITS_9_BPT 1
157#define SPI_BITS_10_BPT 2
158#define SPI_BITS_11_BPT 3
159#define SPI_BITS_12_BPT 4
160#define SPI_BITS_13_BPT 5
161#define SPI_BITS_14_BPT 6
162#define SPI_BITS_15_BPT 7
163#define SPI_BITS_16_BPT 8
164
165/* Bit manipulation macros */
166#define SPI_BIT(name) \
167 (1 << SPI_##name##_OFFSET)
168#define SPI_BF(name,value) \
169 (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
170#define SPI_BFEXT(name,value) \
171 (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
172#define SPI_BFINS(name,value,old) \
173 ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
174 | SPI_BF(name,value))
175
176/* Register access macros */
177#define spi_readl(port,reg) \
178 __raw_readl((port)->regs + SPI_##reg)
179#define spi_writel(port,reg,value) \
180 __raw_writel((value), (port)->regs + SPI_##reg)
181
29 182
30/* 183/*
31 * The core SPI transfer engine just talks to a register bank to set up 184 * The core SPI transfer engine just talks to a register bank to set up
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/spi-au1550.c
index b50563d320e1..bddee5f516b2 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/spi-au1550.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * au1550_spi.c - au1550 psc spi controller driver 2 * au1550 psc spi controller driver
3 * may work also with au1200, au1210, au1250 3 * may work also with au1200, au1210, au1250
4 * will not work on au1000, au1100 and au1500 (no full spi controller there) 4 * will not work on au1000, au1100 and au1500 (no full spi controller there)
5 * 5 *
diff --git a/drivers/spi/spi_bfin_sport.c b/drivers/spi/spi-bfin-sport.c
index e557ff617b11..e557ff617b11 100644
--- a/drivers/spi/spi_bfin_sport.c
+++ b/drivers/spi/spi-bfin-sport.c
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index cc880c95e7de..b8d25f2b7038 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -58,7 +58,7 @@ struct bfin_spi_master_data {
58 struct spi_master *master; 58 struct spi_master *master;
59 59
60 /* Regs base of SPI controller */ 60 /* Regs base of SPI controller */
61 void __iomem *regs_base; 61 struct bfin_spi_regs __iomem *regs;
62 62
63 /* Pin request list */ 63 /* Pin request list */
64 u16 *pin_req; 64 u16 *pin_req;
@@ -122,34 +122,14 @@ struct bfin_spi_slave_data {
122 const struct bfin_spi_transfer_ops *ops; 122 const struct bfin_spi_transfer_ops *ops;
123}; 123};
124 124
125#define DEFINE_SPI_REG(reg, off) \
126static inline u16 read_##reg(struct bfin_spi_master_data *drv_data) \
127 { return bfin_read16(drv_data->regs_base + off); } \
128static inline void write_##reg(struct bfin_spi_master_data *drv_data, u16 v) \
129 { bfin_write16(drv_data->regs_base + off, v); }
130
131DEFINE_SPI_REG(CTRL, 0x00)
132DEFINE_SPI_REG(FLAG, 0x04)
133DEFINE_SPI_REG(STAT, 0x08)
134DEFINE_SPI_REG(TDBR, 0x0C)
135DEFINE_SPI_REG(RDBR, 0x10)
136DEFINE_SPI_REG(BAUD, 0x14)
137DEFINE_SPI_REG(SHAW, 0x18)
138
139static void bfin_spi_enable(struct bfin_spi_master_data *drv_data) 125static void bfin_spi_enable(struct bfin_spi_master_data *drv_data)
140{ 126{
141 u16 cr; 127 bfin_write_or(&drv_data->regs->ctl, BIT_CTL_ENABLE);
142
143 cr = read_CTRL(drv_data);
144 write_CTRL(drv_data, (cr | BIT_CTL_ENABLE));
145} 128}
146 129
147static void bfin_spi_disable(struct bfin_spi_master_data *drv_data) 130static void bfin_spi_disable(struct bfin_spi_master_data *drv_data)
148{ 131{
149 u16 cr; 132 bfin_write_and(&drv_data->regs->ctl, ~BIT_CTL_ENABLE);
150
151 cr = read_CTRL(drv_data);
152 write_CTRL(drv_data, (cr & (~BIT_CTL_ENABLE)));
153} 133}
154 134
155/* Caculate the SPI_BAUD register value based on input HZ */ 135/* Caculate the SPI_BAUD register value based on input HZ */
@@ -172,10 +152,10 @@ static int bfin_spi_flush(struct bfin_spi_master_data *drv_data)
172 unsigned long limit = loops_per_jiffy << 1; 152 unsigned long limit = loops_per_jiffy << 1;
173 153
174 /* wait for stop and clear stat */ 154 /* wait for stop and clear stat */
175 while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && --limit) 155 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF) && --limit)
176 cpu_relax(); 156 cpu_relax();
177 157
178 write_STAT(drv_data, BIT_STAT_CLR); 158 bfin_write(&drv_data->regs->stat, BIT_STAT_CLR);
179 159
180 return limit; 160 return limit;
181} 161}
@@ -183,29 +163,19 @@ static int bfin_spi_flush(struct bfin_spi_master_data *drv_data)
183/* Chip select operation functions for cs_change flag */ 163/* Chip select operation functions for cs_change flag */
184static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) 164static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip)
185{ 165{
186 if (likely(chip->chip_select_num < MAX_CTRL_CS)) { 166 if (likely(chip->chip_select_num < MAX_CTRL_CS))
187 u16 flag = read_FLAG(drv_data); 167 bfin_write_and(&drv_data->regs->flg, ~chip->flag);
188 168 else
189 flag &= ~chip->flag;
190
191 write_FLAG(drv_data, flag);
192 } else {
193 gpio_set_value(chip->cs_gpio, 0); 169 gpio_set_value(chip->cs_gpio, 0);
194 }
195} 170}
196 171
197static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data, 172static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data,
198 struct bfin_spi_slave_data *chip) 173 struct bfin_spi_slave_data *chip)
199{ 174{
200 if (likely(chip->chip_select_num < MAX_CTRL_CS)) { 175 if (likely(chip->chip_select_num < MAX_CTRL_CS))
201 u16 flag = read_FLAG(drv_data); 176 bfin_write_or(&drv_data->regs->flg, chip->flag);
202 177 else
203 flag |= chip->flag;
204
205 write_FLAG(drv_data, flag);
206 } else {
207 gpio_set_value(chip->cs_gpio, 1); 178 gpio_set_value(chip->cs_gpio, 1);
208 }
209 179
210 /* Move delay here for consistency */ 180 /* Move delay here for consistency */
211 if (chip->cs_chg_udelay) 181 if (chip->cs_chg_udelay)
@@ -216,25 +186,15 @@ static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data,
216static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data, 186static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data,
217 struct bfin_spi_slave_data *chip) 187 struct bfin_spi_slave_data *chip)
218{ 188{
219 if (chip->chip_select_num < MAX_CTRL_CS) { 189 if (chip->chip_select_num < MAX_CTRL_CS)
220 u16 flag = read_FLAG(drv_data); 190 bfin_write_or(&drv_data->regs->flg, chip->flag >> 8);
221
222 flag |= (chip->flag >> 8);
223
224 write_FLAG(drv_data, flag);
225 }
226} 191}
227 192
228static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data, 193static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data,
229 struct bfin_spi_slave_data *chip) 194 struct bfin_spi_slave_data *chip)
230{ 195{
231 if (chip->chip_select_num < MAX_CTRL_CS) { 196 if (chip->chip_select_num < MAX_CTRL_CS)
232 u16 flag = read_FLAG(drv_data); 197 bfin_write_and(&drv_data->regs->flg, ~(chip->flag >> 8));
233
234 flag &= ~(chip->flag >> 8);
235
236 write_FLAG(drv_data, flag);
237 }
238} 198}
239 199
240/* stop controller and re-config current chip*/ 200/* stop controller and re-config current chip*/
@@ -243,15 +203,15 @@ static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data)
243 struct bfin_spi_slave_data *chip = drv_data->cur_chip; 203 struct bfin_spi_slave_data *chip = drv_data->cur_chip;
244 204
245 /* Clear status and disable clock */ 205 /* Clear status and disable clock */
246 write_STAT(drv_data, BIT_STAT_CLR); 206 bfin_write(&drv_data->regs->stat, BIT_STAT_CLR);
247 bfin_spi_disable(drv_data); 207 bfin_spi_disable(drv_data);
248 dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); 208 dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n");
249 209
250 SSYNC(); 210 SSYNC();
251 211
252 /* Load the registers */ 212 /* Load the registers */
253 write_CTRL(drv_data, chip->ctl_reg); 213 bfin_write(&drv_data->regs->ctl, chip->ctl_reg);
254 write_BAUD(drv_data, chip->baud); 214 bfin_write(&drv_data->regs->baud, chip->baud);
255 215
256 bfin_spi_enable(drv_data); 216 bfin_spi_enable(drv_data);
257 bfin_spi_cs_active(drv_data, chip); 217 bfin_spi_cs_active(drv_data, chip);
@@ -260,7 +220,7 @@ static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data)
260/* used to kick off transfer in rx mode and read unwanted RX data */ 220/* used to kick off transfer in rx mode and read unwanted RX data */
261static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data) 221static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data)
262{ 222{
263 (void) read_RDBR(drv_data); 223 (void) bfin_read(&drv_data->regs->rdbr);
264} 224}
265 225
266static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data) 226static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data)
@@ -269,10 +229,10 @@ static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data)
269 bfin_spi_dummy_read(drv_data); 229 bfin_spi_dummy_read(drv_data);
270 230
271 while (drv_data->tx < drv_data->tx_end) { 231 while (drv_data->tx < drv_data->tx_end) {
272 write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); 232 bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++)));
273 /* wait until transfer finished. 233 /* wait until transfer finished.
274 checking SPIF or TXS may not guarantee transfer completion */ 234 checking SPIF or TXS may not guarantee transfer completion */
275 while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 235 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
276 cpu_relax(); 236 cpu_relax();
277 /* discard RX data and clear RXS */ 237 /* discard RX data and clear RXS */
278 bfin_spi_dummy_read(drv_data); 238 bfin_spi_dummy_read(drv_data);
@@ -287,10 +247,10 @@ static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data)
287 bfin_spi_dummy_read(drv_data); 247 bfin_spi_dummy_read(drv_data);
288 248
289 while (drv_data->rx < drv_data->rx_end) { 249 while (drv_data->rx < drv_data->rx_end) {
290 write_TDBR(drv_data, tx_val); 250 bfin_write(&drv_data->regs->tdbr, tx_val);
291 while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 251 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
292 cpu_relax(); 252 cpu_relax();
293 *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); 253 *(u8 *) (drv_data->rx++) = bfin_read(&drv_data->regs->rdbr);
294 } 254 }
295} 255}
296 256
@@ -300,10 +260,10 @@ static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data)
300 bfin_spi_dummy_read(drv_data); 260 bfin_spi_dummy_read(drv_data);
301 261
302 while (drv_data->rx < drv_data->rx_end) { 262 while (drv_data->rx < drv_data->rx_end) {
303 write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); 263 bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++)));
304 while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 264 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
305 cpu_relax(); 265 cpu_relax();
306 *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); 266 *(u8 *) (drv_data->rx++) = bfin_read(&drv_data->regs->rdbr);
307 } 267 }
308} 268}
309 269
@@ -319,11 +279,11 @@ static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data)
319 bfin_spi_dummy_read(drv_data); 279 bfin_spi_dummy_read(drv_data);
320 280
321 while (drv_data->tx < drv_data->tx_end) { 281 while (drv_data->tx < drv_data->tx_end) {
322 write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); 282 bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx)));
323 drv_data->tx += 2; 283 drv_data->tx += 2;
324 /* wait until transfer finished. 284 /* wait until transfer finished.
325 checking SPIF or TXS may not guarantee transfer completion */ 285 checking SPIF or TXS may not guarantee transfer completion */
326 while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 286 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
327 cpu_relax(); 287 cpu_relax();
328 /* discard RX data and clear RXS */ 288 /* discard RX data and clear RXS */
329 bfin_spi_dummy_read(drv_data); 289 bfin_spi_dummy_read(drv_data);
@@ -338,10 +298,10 @@ static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data)
338 bfin_spi_dummy_read(drv_data); 298 bfin_spi_dummy_read(drv_data);
339 299
340 while (drv_data->rx < drv_data->rx_end) { 300 while (drv_data->rx < drv_data->rx_end) {
341 write_TDBR(drv_data, tx_val); 301 bfin_write(&drv_data->regs->tdbr, tx_val);
342 while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 302 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
343 cpu_relax(); 303 cpu_relax();
344 *(u16 *) (drv_data->rx) = read_RDBR(drv_data); 304 *(u16 *) (drv_data->rx) = bfin_read(&drv_data->regs->rdbr);
345 drv_data->rx += 2; 305 drv_data->rx += 2;
346 } 306 }
347} 307}
@@ -352,11 +312,11 @@ static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data)
352 bfin_spi_dummy_read(drv_data); 312 bfin_spi_dummy_read(drv_data);
353 313
354 while (drv_data->rx < drv_data->rx_end) { 314 while (drv_data->rx < drv_data->rx_end) {
355 write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); 315 bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx)));
356 drv_data->tx += 2; 316 drv_data->tx += 2;
357 while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 317 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
358 cpu_relax(); 318 cpu_relax();
359 *(u16 *) (drv_data->rx) = read_RDBR(drv_data); 319 *(u16 *) (drv_data->rx) = bfin_read(&drv_data->regs->rdbr);
360 drv_data->rx += 2; 320 drv_data->rx += 2;
361 } 321 }
362} 322}
@@ -428,7 +388,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
428 int loop = 0; 388 int loop = 0;
429 389
430 /* wait until transfer finished. */ 390 /* wait until transfer finished. */
431 while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 391 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
432 cpu_relax(); 392 cpu_relax();
433 393
434 if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) || 394 if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) ||
@@ -439,11 +399,11 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
439 if (n_bytes % 2) { 399 if (n_bytes % 2) {
440 u16 *buf = (u16 *)drv_data->rx; 400 u16 *buf = (u16 *)drv_data->rx;
441 for (loop = 0; loop < n_bytes / 2; loop++) 401 for (loop = 0; loop < n_bytes / 2; loop++)
442 *buf++ = read_RDBR(drv_data); 402 *buf++ = bfin_read(&drv_data->regs->rdbr);
443 } else { 403 } else {
444 u8 *buf = (u8 *)drv_data->rx; 404 u8 *buf = (u8 *)drv_data->rx;
445 for (loop = 0; loop < n_bytes; loop++) 405 for (loop = 0; loop < n_bytes; loop++)
446 *buf++ = read_RDBR(drv_data); 406 *buf++ = bfin_read(&drv_data->regs->rdbr);
447 } 407 }
448 drv_data->rx += n_bytes; 408 drv_data->rx += n_bytes;
449 } 409 }
@@ -468,15 +428,15 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
468 u16 *buf = (u16 *)drv_data->rx; 428 u16 *buf = (u16 *)drv_data->rx;
469 u16 *buf2 = (u16 *)drv_data->tx; 429 u16 *buf2 = (u16 *)drv_data->tx;
470 for (loop = 0; loop < n_bytes / 2; loop++) { 430 for (loop = 0; loop < n_bytes / 2; loop++) {
471 *buf++ = read_RDBR(drv_data); 431 *buf++ = bfin_read(&drv_data->regs->rdbr);
472 write_TDBR(drv_data, *buf2++); 432 bfin_write(&drv_data->regs->tdbr, *buf2++);
473 } 433 }
474 } else { 434 } else {
475 u8 *buf = (u8 *)drv_data->rx; 435 u8 *buf = (u8 *)drv_data->rx;
476 u8 *buf2 = (u8 *)drv_data->tx; 436 u8 *buf2 = (u8 *)drv_data->tx;
477 for (loop = 0; loop < n_bytes; loop++) { 437 for (loop = 0; loop < n_bytes; loop++) {
478 *buf++ = read_RDBR(drv_data); 438 *buf++ = bfin_read(&drv_data->regs->rdbr);
479 write_TDBR(drv_data, *buf2++); 439 bfin_write(&drv_data->regs->tdbr, *buf2++);
480 } 440 }
481 } 441 }
482 } else if (drv_data->rx) { 442 } else if (drv_data->rx) {
@@ -485,14 +445,14 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
485 if (n_bytes % 2) { 445 if (n_bytes % 2) {
486 u16 *buf = (u16 *)drv_data->rx; 446 u16 *buf = (u16 *)drv_data->rx;
487 for (loop = 0; loop < n_bytes / 2; loop++) { 447 for (loop = 0; loop < n_bytes / 2; loop++) {
488 *buf++ = read_RDBR(drv_data); 448 *buf++ = bfin_read(&drv_data->regs->rdbr);
489 write_TDBR(drv_data, chip->idle_tx_val); 449 bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val);
490 } 450 }
491 } else { 451 } else {
492 u8 *buf = (u8 *)drv_data->rx; 452 u8 *buf = (u8 *)drv_data->rx;
493 for (loop = 0; loop < n_bytes; loop++) { 453 for (loop = 0; loop < n_bytes; loop++) {
494 *buf++ = read_RDBR(drv_data); 454 *buf++ = bfin_read(&drv_data->regs->rdbr);
495 write_TDBR(drv_data, chip->idle_tx_val); 455 bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val);
496 } 456 }
497 } 457 }
498 } else if (drv_data->tx) { 458 } else if (drv_data->tx) {
@@ -501,14 +461,14 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
501 if (n_bytes % 2) { 461 if (n_bytes % 2) {
502 u16 *buf = (u16 *)drv_data->tx; 462 u16 *buf = (u16 *)drv_data->tx;
503 for (loop = 0; loop < n_bytes / 2; loop++) { 463 for (loop = 0; loop < n_bytes / 2; loop++) {
504 read_RDBR(drv_data); 464 bfin_read(&drv_data->regs->rdbr);
505 write_TDBR(drv_data, *buf++); 465 bfin_write(&drv_data->regs->tdbr, *buf++);
506 } 466 }
507 } else { 467 } else {
508 u8 *buf = (u8 *)drv_data->tx; 468 u8 *buf = (u8 *)drv_data->tx;
509 for (loop = 0; loop < n_bytes; loop++) { 469 for (loop = 0; loop < n_bytes; loop++) {
510 read_RDBR(drv_data); 470 bfin_read(&drv_data->regs->rdbr);
511 write_TDBR(drv_data, *buf++); 471 bfin_write(&drv_data->regs->tdbr, *buf++);
512 } 472 }
513 } 473 }
514 } 474 }
@@ -528,19 +488,19 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
528 struct spi_message *msg = drv_data->cur_msg; 488 struct spi_message *msg = drv_data->cur_msg;
529 unsigned long timeout; 489 unsigned long timeout;
530 unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel); 490 unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel);
531 u16 spistat = read_STAT(drv_data); 491 u16 spistat = bfin_read(&drv_data->regs->stat);
532 492
533 dev_dbg(&drv_data->pdev->dev, 493 dev_dbg(&drv_data->pdev->dev,
534 "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", 494 "in dma_irq_handler dmastat:0x%x spistat:0x%x\n",
535 dmastat, spistat); 495 dmastat, spistat);
536 496
537 if (drv_data->rx != NULL) { 497 if (drv_data->rx != NULL) {
538 u16 cr = read_CTRL(drv_data); 498 u16 cr = bfin_read(&drv_data->regs->ctl);
539 /* discard old RX data and clear RXS */ 499 /* discard old RX data and clear RXS */
540 bfin_spi_dummy_read(drv_data); 500 bfin_spi_dummy_read(drv_data);
541 write_CTRL(drv_data, cr & ~BIT_CTL_ENABLE); /* Disable SPI */ 501 bfin_write(&drv_data->regs->ctl, cr & ~BIT_CTL_ENABLE); /* Disable SPI */
542 write_CTRL(drv_data, cr & ~BIT_CTL_TIMOD); /* Restore State */ 502 bfin_write(&drv_data->regs->ctl, cr & ~BIT_CTL_TIMOD); /* Restore State */
543 write_STAT(drv_data, BIT_STAT_CLR); /* Clear Status */ 503 bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); /* Clear Status */
544 } 504 }
545 505
546 clear_dma_irqstat(drv_data->dma_channel); 506 clear_dma_irqstat(drv_data->dma_channel);
@@ -552,17 +512,17 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
552 * register until it goes low for 2 successive reads 512 * register until it goes low for 2 successive reads
553 */ 513 */
554 if (drv_data->tx != NULL) { 514 if (drv_data->tx != NULL) {
555 while ((read_STAT(drv_data) & BIT_STAT_TXS) || 515 while ((bfin_read(&drv_data->regs->stat) & BIT_STAT_TXS) ||
556 (read_STAT(drv_data) & BIT_STAT_TXS)) 516 (bfin_read(&drv_data->regs->stat) & BIT_STAT_TXS))
557 cpu_relax(); 517 cpu_relax();
558 } 518 }
559 519
560 dev_dbg(&drv_data->pdev->dev, 520 dev_dbg(&drv_data->pdev->dev,
561 "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", 521 "in dma_irq_handler dmastat:0x%x spistat:0x%x\n",
562 dmastat, read_STAT(drv_data)); 522 dmastat, bfin_read(&drv_data->regs->stat));
563 523
564 timeout = jiffies + HZ; 524 timeout = jiffies + HZ;
565 while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) 525 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF))
566 if (!time_before(jiffies, timeout)) { 526 if (!time_before(jiffies, timeout)) {
567 dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF"); 527 dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF");
568 break; 528 break;
@@ -699,9 +659,9 @@ static void bfin_spi_pump_transfers(unsigned long data)
699 bfin_spi_giveback(drv_data); 659 bfin_spi_giveback(drv_data);
700 return; 660 return;
701 } 661 }
702 cr = read_CTRL(drv_data) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); 662 cr = bfin_read(&drv_data->regs->ctl) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE);
703 cr |= cr_width; 663 cr |= cr_width;
704 write_CTRL(drv_data, cr); 664 bfin_write(&drv_data->regs->ctl, cr);
705 665
706 dev_dbg(&drv_data->pdev->dev, 666 dev_dbg(&drv_data->pdev->dev,
707 "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n", 667 "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n",
@@ -712,11 +672,11 @@ static void bfin_spi_pump_transfers(unsigned long data)
712 672
713 /* Speed setup (surely valid because already checked) */ 673 /* Speed setup (surely valid because already checked) */
714 if (transfer->speed_hz) 674 if (transfer->speed_hz)
715 write_BAUD(drv_data, hz_to_spi_baud(transfer->speed_hz)); 675 bfin_write(&drv_data->regs->baud, hz_to_spi_baud(transfer->speed_hz));
716 else 676 else
717 write_BAUD(drv_data, chip->baud); 677 bfin_write(&drv_data->regs->baud, chip->baud);
718 678
719 write_STAT(drv_data, BIT_STAT_CLR); 679 bfin_write(&drv_data->regs->stat, BIT_STAT_CLR);
720 bfin_spi_cs_active(drv_data, chip); 680 bfin_spi_cs_active(drv_data, chip);
721 681
722 dev_dbg(&drv_data->pdev->dev, 682 dev_dbg(&drv_data->pdev->dev,
@@ -749,7 +709,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
749 } 709 }
750 710
751 /* poll for SPI completion before start */ 711 /* poll for SPI completion before start */
752 while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) 712 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF))
753 cpu_relax(); 713 cpu_relax();
754 714
755 /* dirty hack for autobuffer DMA mode */ 715 /* dirty hack for autobuffer DMA mode */
@@ -766,7 +726,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
766 enable_dma(drv_data->dma_channel); 726 enable_dma(drv_data->dma_channel);
767 727
768 /* start SPI transfer */ 728 /* start SPI transfer */
769 write_CTRL(drv_data, cr | BIT_CTL_TIMOD_DMA_TX); 729 bfin_write(&drv_data->regs->ctl, cr | BIT_CTL_TIMOD_DMA_TX);
770 730
771 /* just return here, there can only be one transfer 731 /* just return here, there can only be one transfer
772 * in this mode 732 * in this mode
@@ -821,7 +781,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
821 set_dma_config(drv_data->dma_channel, dma_config); 781 set_dma_config(drv_data->dma_channel, dma_config);
822 local_irq_save(flags); 782 local_irq_save(flags);
823 SSYNC(); 783 SSYNC();
824 write_CTRL(drv_data, cr); 784 bfin_write(&drv_data->regs->ctl, cr);
825 enable_dma(drv_data->dma_channel); 785 enable_dma(drv_data->dma_channel);
826 dma_enable_irq(drv_data->dma_channel); 786 dma_enable_irq(drv_data->dma_channel);
827 local_irq_restore(flags); 787 local_irq_restore(flags);
@@ -835,7 +795,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
835 * problems with setting up the output value in TDBR prior to the 795 * problems with setting up the output value in TDBR prior to the
836 * start of the transfer. 796 * start of the transfer.
837 */ 797 */
838 write_CTRL(drv_data, cr | BIT_CTL_TXMOD); 798 bfin_write(&drv_data->regs->ctl, cr | BIT_CTL_TXMOD);
839 799
840 if (chip->pio_interrupt) { 800 if (chip->pio_interrupt) {
841 /* SPI irq should have been disabled by now */ 801 /* SPI irq should have been disabled by now */
@@ -845,19 +805,19 @@ static void bfin_spi_pump_transfers(unsigned long data)
845 805
846 /* start transfer */ 806 /* start transfer */
847 if (drv_data->tx == NULL) 807 if (drv_data->tx == NULL)
848 write_TDBR(drv_data, chip->idle_tx_val); 808 bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val);
849 else { 809 else {
850 int loop; 810 int loop;
851 if (bits_per_word % 16 == 0) { 811 if (bits_per_word % 16 == 0) {
852 u16 *buf = (u16 *)drv_data->tx; 812 u16 *buf = (u16 *)drv_data->tx;
853 for (loop = 0; loop < bits_per_word / 16; 813 for (loop = 0; loop < bits_per_word / 16;
854 loop++) { 814 loop++) {
855 write_TDBR(drv_data, *buf++); 815 bfin_write(&drv_data->regs->tdbr, *buf++);
856 } 816 }
857 } else if (bits_per_word % 8 == 0) { 817 } else if (bits_per_word % 8 == 0) {
858 u8 *buf = (u8 *)drv_data->tx; 818 u8 *buf = (u8 *)drv_data->tx;
859 for (loop = 0; loop < bits_per_word / 8; loop++) 819 for (loop = 0; loop < bits_per_word / 8; loop++)
860 write_TDBR(drv_data, *buf++); 820 bfin_write(&drv_data->regs->tdbr, *buf++);
861 } 821 }
862 822
863 drv_data->tx += drv_data->n_bytes; 823 drv_data->tx += drv_data->n_bytes;
@@ -1005,7 +965,7 @@ static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1005 965
1006#define MAX_SPI_SSEL 7 966#define MAX_SPI_SSEL 7
1007 967
1008static u16 ssel[][MAX_SPI_SSEL] = { 968static const u16 ssel[][MAX_SPI_SSEL] = {
1009 {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3, 969 {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3,
1010 P_SPI0_SSEL4, P_SPI0_SSEL5, 970 P_SPI0_SSEL4, P_SPI0_SSEL5,
1011 P_SPI0_SSEL6, P_SPI0_SSEL7}, 971 P_SPI0_SSEL6, P_SPI0_SSEL7},
@@ -1226,7 +1186,7 @@ static void bfin_spi_cleanup(struct spi_device *spi)
1226 spi_set_ctldata(spi, NULL); 1186 spi_set_ctldata(spi, NULL);
1227} 1187}
1228 1188
1229static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data) 1189static int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data)
1230{ 1190{
1231 INIT_LIST_HEAD(&drv_data->queue); 1191 INIT_LIST_HEAD(&drv_data->queue);
1232 spin_lock_init(&drv_data->lock); 1192 spin_lock_init(&drv_data->lock);
@@ -1248,7 +1208,7 @@ static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data)
1248 return 0; 1208 return 0;
1249} 1209}
1250 1210
1251static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data) 1211static int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data)
1252{ 1212{
1253 unsigned long flags; 1213 unsigned long flags;
1254 1214
@@ -1270,7 +1230,7 @@ static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data)
1270 return 0; 1230 return 0;
1271} 1231}
1272 1232
1273static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) 1233static int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
1274{ 1234{
1275 unsigned long flags; 1235 unsigned long flags;
1276 unsigned limit = 500; 1236 unsigned limit = 500;
@@ -1299,7 +1259,7 @@ static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
1299 return status; 1259 return status;
1300} 1260}
1301 1261
1302static inline int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data) 1262static int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data)
1303{ 1263{
1304 int status; 1264 int status;
1305 1265
@@ -1353,8 +1313,8 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
1353 goto out_error_get_res; 1313 goto out_error_get_res;
1354 } 1314 }
1355 1315
1356 drv_data->regs_base = ioremap(res->start, resource_size(res)); 1316 drv_data->regs = ioremap(res->start, resource_size(res));
1357 if (drv_data->regs_base == NULL) { 1317 if (drv_data->regs == NULL) {
1358 dev_err(dev, "Cannot map IO\n"); 1318 dev_err(dev, "Cannot map IO\n");
1359 status = -ENXIO; 1319 status = -ENXIO;
1360 goto out_error_ioremap; 1320 goto out_error_ioremap;
@@ -1397,8 +1357,8 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
1397 /* Reset SPI registers. If these registers were used by the boot loader, 1357 /* Reset SPI registers. If these registers were used by the boot loader,
1398 * the sky may fall on your head if you enable the dma controller. 1358 * the sky may fall on your head if you enable the dma controller.
1399 */ 1359 */
1400 write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); 1360 bfin_write(&drv_data->regs->ctl, BIT_CTL_CPHA | BIT_CTL_MASTER);
1401 write_FLAG(drv_data, 0xFF00); 1361 bfin_write(&drv_data->regs->flg, 0xFF00);
1402 1362
1403 /* Register with the SPI framework */ 1363 /* Register with the SPI framework */
1404 platform_set_drvdata(pdev, drv_data); 1364 platform_set_drvdata(pdev, drv_data);
@@ -1408,15 +1368,15 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
1408 goto out_error_queue_alloc; 1368 goto out_error_queue_alloc;
1409 } 1369 }
1410 1370
1411 dev_info(dev, "%s, Version %s, regs_base@%p, dma channel@%d\n", 1371 dev_info(dev, "%s, Version %s, regs@%p, dma channel@%d\n",
1412 DRV_DESC, DRV_VERSION, drv_data->regs_base, 1372 DRV_DESC, DRV_VERSION, drv_data->regs,
1413 drv_data->dma_channel); 1373 drv_data->dma_channel);
1414 return status; 1374 return status;
1415 1375
1416out_error_queue_alloc: 1376out_error_queue_alloc:
1417 bfin_spi_destroy_queue(drv_data); 1377 bfin_spi_destroy_queue(drv_data);
1418out_error_free_io: 1378out_error_free_io:
1419 iounmap((void *) drv_data->regs_base); 1379 iounmap(drv_data->regs);
1420out_error_ioremap: 1380out_error_ioremap:
1421out_error_get_res: 1381out_error_get_res:
1422 spi_master_put(master); 1382 spi_master_put(master);
@@ -1473,14 +1433,14 @@ static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state)
1473 if (status != 0) 1433 if (status != 0)
1474 return status; 1434 return status;
1475 1435
1476 drv_data->ctrl_reg = read_CTRL(drv_data); 1436 drv_data->ctrl_reg = bfin_read(&drv_data->regs->ctl);
1477 drv_data->flag_reg = read_FLAG(drv_data); 1437 drv_data->flag_reg = bfin_read(&drv_data->regs->flg);
1478 1438
1479 /* 1439 /*
1480 * reset SPI_CTL and SPI_FLG registers 1440 * reset SPI_CTL and SPI_FLG registers
1481 */ 1441 */
1482 write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); 1442 bfin_write(&drv_data->regs->ctl, BIT_CTL_CPHA | BIT_CTL_MASTER);
1483 write_FLAG(drv_data, 0xFF00); 1443 bfin_write(&drv_data->regs->flg, 0xFF00);
1484 1444
1485 return 0; 1445 return 0;
1486} 1446}
@@ -1490,8 +1450,8 @@ static int bfin_spi_resume(struct platform_device *pdev)
1490 struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); 1450 struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
1491 int status = 0; 1451 int status = 0;
1492 1452
1493 write_CTRL(drv_data, drv_data->ctrl_reg); 1453 bfin_write(&drv_data->regs->ctl, drv_data->ctrl_reg);
1494 write_FLAG(drv_data, drv_data->flag_reg); 1454 bfin_write(&drv_data->regs->flg, drv_data->flag_reg);
1495 1455
1496 /* Start the queue running */ 1456 /* Start the queue running */
1497 status = bfin_spi_start_queue(drv_data); 1457 status = bfin_spi_start_queue(drv_data);
diff --git a/drivers/spi/spi_bitbang_txrx.h b/drivers/spi/spi-bitbang-txrx.h
index c16bf853c3eb..c16bf853c3eb 100644
--- a/drivers/spi/spi_bitbang_txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi-bitbang.c
index 14a63f6010d1..02d57fbba295 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * spi_bitbang.c - polling/bitbanging SPI master controller driver utilities 2 * polling/bitbanging SPI master controller driver utilities
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
@@ -68,7 +68,7 @@ static unsigned bitbang_txrx_8(
68 unsigned ns, 68 unsigned ns,
69 struct spi_transfer *t 69 struct spi_transfer *t
70) { 70) {
71 unsigned bits = spi->bits_per_word; 71 unsigned bits = t->bits_per_word ? : spi->bits_per_word;
72 unsigned count = t->len; 72 unsigned count = t->len;
73 const u8 *tx = t->tx_buf; 73 const u8 *tx = t->tx_buf;
74 u8 *rx = t->rx_buf; 74 u8 *rx = t->rx_buf;
@@ -94,7 +94,7 @@ static unsigned bitbang_txrx_16(
94 unsigned ns, 94 unsigned ns,
95 struct spi_transfer *t 95 struct spi_transfer *t
96) { 96) {
97 unsigned bits = spi->bits_per_word; 97 unsigned bits = t->bits_per_word ? : spi->bits_per_word;
98 unsigned count = t->len; 98 unsigned count = t->len;
99 const u16 *tx = t->tx_buf; 99 const u16 *tx = t->tx_buf;
100 u16 *rx = t->rx_buf; 100 u16 *rx = t->rx_buf;
@@ -120,7 +120,7 @@ static unsigned bitbang_txrx_32(
120 unsigned ns, 120 unsigned ns,
121 struct spi_transfer *t 121 struct spi_transfer *t
122) { 122) {
123 unsigned bits = spi->bits_per_word; 123 unsigned bits = t->bits_per_word ? : spi->bits_per_word;
124 unsigned count = t->len; 124 unsigned count = t->len;
125 const u32 *tx = t->tx_buf; 125 const u32 *tx = t->tx_buf;
126 u32 *rx = t->rx_buf; 126 u32 *rx = t->rx_buf;
diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi-butterfly.c
index 0d4ceba3b590..9f907ec52def 100644
--- a/drivers/spi/spi_butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * spi_butterfly.c - parport-to-butterfly adapter 2 * parport-to-butterfly adapter
3 * 3 *
4 * Copyright (C) 2005 David Brownell 4 * Copyright (C) 2005 David Brownell
5 * 5 *
@@ -149,7 +149,7 @@ static void butterfly_chipselect(struct spi_device *spi, int value)
149#define spidelay(X) do{}while(0) 149#define spidelay(X) do{}while(0)
150//#define spidelay ndelay 150//#define spidelay ndelay
151 151
152#include "spi_bitbang_txrx.h" 152#include "spi-bitbang-txrx.h"
153 153
154static u32 154static u32
155butterfly_txrx_word_mode0(struct spi_device *spi, 155butterfly_txrx_word_mode0(struct spi_device *spi,
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/spi-coldfire-qspi.c
index ae2cd1c1fda8..ae2cd1c1fda8 100644
--- a/drivers/spi/coldfire_qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/spi-davinci.c
index 1f0ed8005c91..1f0ed8005c91 100644
--- a/drivers/spi/davinci_spi.c
+++ b/drivers/spi/spi-davinci.c
diff --git a/drivers/spi/dw_spi_mid.c b/drivers/spi/spi-dw-mid.c
index 489178243d88..130e55537db6 100644
--- a/drivers/spi/dw_spi_mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * dw_spi_mid.c - special handling for DW core on Intel MID platform 2 * Special handling for DW core on Intel MID platform
3 * 3 *
4 * Copyright (c) 2009, Intel Corporation. 4 * Copyright (c) 2009, Intel Corporation.
5 * 5 *
@@ -23,7 +23,7 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/spi/spi.h> 24#include <linux/spi/spi.h>
25 25
26#include "dw_spi.h" 26#include "spi-dw.h"
27 27
28#ifdef CONFIG_SPI_DW_MID_DMA 28#ifdef CONFIG_SPI_DW_MID_DMA
29#include <linux/intel_mid_dma.h> 29#include <linux/intel_mid_dma.h>
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/spi-dw-mmio.c
index e0e813dad150..34eb66501dbf 100644
--- a/drivers/spi/dw_spi_mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * dw_spi_mmio.c - Memory-mapped interface driver for DW SPI Core 2 * Memory-mapped interface driver for DW SPI Core
3 * 3 *
4 * Copyright (c) 2010, Octasic semiconductor. 4 * Copyright (c) 2010, Octasic semiconductor.
5 * 5 *
@@ -16,7 +16,7 @@
16#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18 18
19#include "dw_spi.h" 19#include "spi-dw.h"
20 20
21#define DRIVER_NAME "dw_spi_mmio" 21#define DRIVER_NAME "dw_spi_mmio"
22 22
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/spi-dw-pci.c
index ad260aa5e526..c5f37f03ac8b 100644
--- a/drivers/spi/dw_spi_pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * dw_spi_pci.c - PCI interface driver for DW SPI Core 2 * PCI interface driver for DW SPI Core
3 * 3 *
4 * Copyright (c) 2009, Intel Corporation. 4 * Copyright (c) 2009, Intel Corporation.
5 * 5 *
@@ -22,7 +22,7 @@
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/spi/spi.h> 23#include <linux/spi/spi.h>
24 24
25#include "dw_spi.h" 25#include "spi-dw.h"
26 26
27#define DRIVER_NAME "dw_spi_pci" 27#define DRIVER_NAME "dw_spi_pci"
28 28
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/spi-dw.c
index 919fa9d9e16b..857cd30b44bb 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/spi-dw.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * dw_spi.c - Designware SPI core controller driver (refer pxa2xx_spi.c) 2 * Designware SPI core controller driver (refer pxa2xx_spi.c)
3 * 3 *
4 * Copyright (c) 2009, Intel Corporation. 4 * Copyright (c) 2009, Intel Corporation.
5 * 5 *
@@ -24,7 +24,7 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/spi/spi.h> 25#include <linux/spi/spi.h>
26 26
27#include "dw_spi.h" 27#include "spi-dw.h"
28 28
29#ifdef CONFIG_DEBUG_FS 29#ifdef CONFIG_DEBUG_FS
30#include <linux/debugfs.h> 30#include <linux/debugfs.h>
@@ -818,9 +818,11 @@ int __devinit dw_spi_add_host(struct dw_spi *dws)
818 dws->prev_chip = NULL; 818 dws->prev_chip = NULL;
819 dws->dma_inited = 0; 819 dws->dma_inited = 0;
820 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); 820 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
821 snprintf(dws->name, sizeof(dws->name), "dw_spi%d",
822 dws->bus_num);
821 823
822 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, 824 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
823 "dw_spi", dws); 825 dws->name, dws);
824 if (ret < 0) { 826 if (ret < 0) {
825 dev_err(&master->dev, "can not get IRQ\n"); 827 dev_err(&master->dev, "can not get IRQ\n");
826 goto err_free_master; 828 goto err_free_master;
diff --git a/drivers/spi/dw_spi.h b/drivers/spi/spi-dw.h
index 7a5e78d2a5cb..8b7b07bf6c3f 100644
--- a/drivers/spi/dw_spi.h
+++ b/drivers/spi/spi-dw.h
@@ -96,6 +96,7 @@ struct dw_spi {
96 struct spi_device *cur_dev; 96 struct spi_device *cur_dev;
97 struct device *parent_dev; 97 struct device *parent_dev;
98 enum dw_ssi_type type; 98 enum dw_ssi_type type;
99 char name[16];
99 100
100 void __iomem *regs; 101 void __iomem *regs;
101 unsigned long paddr; 102 unsigned long paddr;
diff --git a/drivers/spi/ep93xx_spi.c b/drivers/spi/spi-ep93xx.c
index d3570071e98f..1cf645479bfe 100644
--- a/drivers/spi/ep93xx_spi.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for Cirrus Logic EP93xx SPI controller. 2 * Driver for Cirrus Logic EP93xx SPI controller.
3 * 3 *
4 * Copyright (c) 2010 Mika Westerberg 4 * Copyright (C) 2010-2011 Mika Westerberg
5 * 5 *
6 * Explicit FIFO handling code was inspired by amba-pl022 driver. 6 * Explicit FIFO handling code was inspired by amba-pl022 driver.
7 * 7 *
@@ -21,13 +21,16 @@
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/dmaengine.h>
24#include <linux/bitops.h> 25#include <linux/bitops.h>
25#include <linux/interrupt.h> 26#include <linux/interrupt.h>
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
27#include <linux/workqueue.h> 28#include <linux/workqueue.h>
28#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/scatterlist.h>
29#include <linux/spi/spi.h> 31#include <linux/spi/spi.h>
30 32
33#include <mach/dma.h>
31#include <mach/ep93xx_spi.h> 34#include <mach/ep93xx_spi.h>
32 35
33#define SSPCR0 0x0000 36#define SSPCR0 0x0000
@@ -71,6 +74,7 @@
71 * @pdev: pointer to platform device 74 * @pdev: pointer to platform device
72 * @clk: clock for the controller 75 * @clk: clock for the controller
73 * @regs_base: pointer to ioremap()'d registers 76 * @regs_base: pointer to ioremap()'d registers
77 * @sspdr_phys: physical address of the SSPDR register
74 * @irq: IRQ number used by the driver 78 * @irq: IRQ number used by the driver
75 * @min_rate: minimum clock rate (in Hz) supported by the controller 79 * @min_rate: minimum clock rate (in Hz) supported by the controller
76 * @max_rate: maximum clock rate (in Hz) supported by the controller 80 * @max_rate: maximum clock rate (in Hz) supported by the controller
@@ -84,6 +88,14 @@
84 * @rx: current byte in transfer to receive 88 * @rx: current byte in transfer to receive
85 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one 89 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
86 * frame decreases this level and sending one frame increases it. 90 * frame decreases this level and sending one frame increases it.
91 * @dma_rx: RX DMA channel
92 * @dma_tx: TX DMA channel
93 * @dma_rx_data: RX parameters passed to the DMA engine
94 * @dma_tx_data: TX parameters passed to the DMA engine
95 * @rx_sgt: sg table for RX transfers
96 * @tx_sgt: sg table for TX transfers
97 * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
98 * the client
87 * 99 *
88 * This structure holds EP93xx SPI controller specific information. When 100 * This structure holds EP93xx SPI controller specific information. When
89 * @running is %true, driver accepts transfer requests from protocol drivers. 101 * @running is %true, driver accepts transfer requests from protocol drivers.
@@ -100,6 +112,7 @@ struct ep93xx_spi {
100 const struct platform_device *pdev; 112 const struct platform_device *pdev;
101 struct clk *clk; 113 struct clk *clk;
102 void __iomem *regs_base; 114 void __iomem *regs_base;
115 unsigned long sspdr_phys;
103 int irq; 116 int irq;
104 unsigned long min_rate; 117 unsigned long min_rate;
105 unsigned long max_rate; 118 unsigned long max_rate;
@@ -112,6 +125,13 @@ struct ep93xx_spi {
112 size_t tx; 125 size_t tx;
113 size_t rx; 126 size_t rx;
114 size_t fifo_level; 127 size_t fifo_level;
128 struct dma_chan *dma_rx;
129 struct dma_chan *dma_tx;
130 struct ep93xx_dma_data dma_rx_data;
131 struct ep93xx_dma_data dma_tx_data;
132 struct sg_table rx_sgt;
133 struct sg_table tx_sgt;
134 void *zeropage;
115}; 135};
116 136
117/** 137/**
@@ -496,14 +516,195 @@ static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
496 espi->fifo_level++; 516 espi->fifo_level++;
497 } 517 }
498 518
499 if (espi->rx == t->len) { 519 if (espi->rx == t->len)
500 msg->actual_length += t->len;
501 return 0; 520 return 0;
502 }
503 521
504 return -EINPROGRESS; 522 return -EINPROGRESS;
505} 523}
506 524
525static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
526{
527 /*
528 * Now everything is set up for the current transfer. We prime the TX
529 * FIFO, enable interrupts, and wait for the transfer to complete.
530 */
531 if (ep93xx_spi_read_write(espi)) {
532 ep93xx_spi_enable_interrupts(espi);
533 wait_for_completion(&espi->wait);
534 }
535}
536
537/**
538 * ep93xx_spi_dma_prepare() - prepares a DMA transfer
539 * @espi: ep93xx SPI controller struct
540 * @dir: DMA transfer direction
541 *
542 * Function configures the DMA, maps the buffer and prepares the DMA
543 * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
544 * in case of failure.
545 */
546static struct dma_async_tx_descriptor *
547ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
548{
549 struct spi_transfer *t = espi->current_msg->state;
550 struct dma_async_tx_descriptor *txd;
551 enum dma_slave_buswidth buswidth;
552 struct dma_slave_config conf;
553 struct scatterlist *sg;
554 struct sg_table *sgt;
555 struct dma_chan *chan;
556 const void *buf, *pbuf;
557 size_t len = t->len;
558 int i, ret, nents;
559
560 if (bits_per_word(espi) > 8)
561 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
562 else
563 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
564
565 memset(&conf, 0, sizeof(conf));
566 conf.direction = dir;
567
568 if (dir == DMA_FROM_DEVICE) {
569 chan = espi->dma_rx;
570 buf = t->rx_buf;
571 sgt = &espi->rx_sgt;
572
573 conf.src_addr = espi->sspdr_phys;
574 conf.src_addr_width = buswidth;
575 } else {
576 chan = espi->dma_tx;
577 buf = t->tx_buf;
578 sgt = &espi->tx_sgt;
579
580 conf.dst_addr = espi->sspdr_phys;
581 conf.dst_addr_width = buswidth;
582 }
583
584 ret = dmaengine_slave_config(chan, &conf);
585 if (ret)
586 return ERR_PTR(ret);
587
588 /*
589 * We need to split the transfer into PAGE_SIZE'd chunks. This is
590 * because we are using @espi->zeropage to provide a zero RX buffer
591 * for the TX transfers and we have only allocated one page for that.
592 *
593 * For performance reasons we allocate a new sg_table only when
594 * needed. Otherwise we will re-use the current one. Eventually the
595 * last sg_table is released in ep93xx_spi_release_dma().
596 */
597
598 nents = DIV_ROUND_UP(len, PAGE_SIZE);
599 if (nents != sgt->nents) {
600 sg_free_table(sgt);
601
602 ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
603 if (ret)
604 return ERR_PTR(ret);
605 }
606
607 pbuf = buf;
608 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
609 size_t bytes = min_t(size_t, len, PAGE_SIZE);
610
611 if (buf) {
612 sg_set_page(sg, virt_to_page(pbuf), bytes,
613 offset_in_page(pbuf));
614 } else {
615 sg_set_page(sg, virt_to_page(espi->zeropage),
616 bytes, 0);
617 }
618
619 pbuf += bytes;
620 len -= bytes;
621 }
622
623 if (WARN_ON(len)) {
624 dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
625 return ERR_PTR(-EINVAL);
626 }
627
628 nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
629 if (!nents)
630 return ERR_PTR(-ENOMEM);
631
632 txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
633 dir, DMA_CTRL_ACK);
634 if (!txd) {
635 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
636 return ERR_PTR(-ENOMEM);
637 }
638 return txd;
639}
640
641/**
642 * ep93xx_spi_dma_finish() - finishes with a DMA transfer
643 * @espi: ep93xx SPI controller struct
644 * @dir: DMA transfer direction
645 *
646 * Function finishes with the DMA transfer. After this, the DMA buffer is
647 * unmapped.
648 */
649static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
650 enum dma_data_direction dir)
651{
652 struct dma_chan *chan;
653 struct sg_table *sgt;
654
655 if (dir == DMA_FROM_DEVICE) {
656 chan = espi->dma_rx;
657 sgt = &espi->rx_sgt;
658 } else {
659 chan = espi->dma_tx;
660 sgt = &espi->tx_sgt;
661 }
662
663 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
664}
665
666static void ep93xx_spi_dma_callback(void *callback_param)
667{
668 complete(callback_param);
669}
670
671static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
672{
673 struct spi_message *msg = espi->current_msg;
674 struct dma_async_tx_descriptor *rxd, *txd;
675
676 rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE);
677 if (IS_ERR(rxd)) {
678 dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
679 msg->status = PTR_ERR(rxd);
680 return;
681 }
682
683 txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE);
684 if (IS_ERR(txd)) {
685 ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
686 dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
687 msg->status = PTR_ERR(txd);
688 return;
689 }
690
691 /* We are ready when RX is done */
692 rxd->callback = ep93xx_spi_dma_callback;
693 rxd->callback_param = &espi->wait;
694
695 /* Now submit both descriptors and wait while they finish */
696 dmaengine_submit(rxd);
697 dmaengine_submit(txd);
698
699 dma_async_issue_pending(espi->dma_rx);
700 dma_async_issue_pending(espi->dma_tx);
701
702 wait_for_completion(&espi->wait);
703
704 ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE);
705 ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
706}
707
507/** 708/**
508 * ep93xx_spi_process_transfer() - processes one SPI transfer 709 * ep93xx_spi_process_transfer() - processes one SPI transfer
509 * @espi: ep93xx SPI controller struct 710 * @espi: ep93xx SPI controller struct
@@ -556,13 +757,14 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
556 espi->tx = 0; 757 espi->tx = 0;
557 758
558 /* 759 /*
559 * Now everything is set up for the current transfer. We prime the TX 760 * There is no point of setting up DMA for the transfers which will
560 * FIFO, enable interrupts, and wait for the transfer to complete. 761 * fit into the FIFO and can be transferred with a single interrupt.
762 * So in these cases we will be using PIO and don't bother for DMA.
561 */ 763 */
562 if (ep93xx_spi_read_write(espi)) { 764 if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
563 ep93xx_spi_enable_interrupts(espi); 765 ep93xx_spi_dma_transfer(espi);
564 wait_for_completion(&espi->wait); 766 else
565 } 767 ep93xx_spi_pio_transfer(espi);
566 768
567 /* 769 /*
568 * In case of error during transmit, we bail out from processing 770 * In case of error during transmit, we bail out from processing
@@ -571,6 +773,8 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
571 if (msg->status) 773 if (msg->status)
572 return; 774 return;
573 775
776 msg->actual_length += t->len;
777
574 /* 778 /*
575 * After this transfer is finished, perform any possible 779 * After this transfer is finished, perform any possible
576 * post-transfer actions requested by the protocol driver. 780 * post-transfer actions requested by the protocol driver.
@@ -752,6 +956,75 @@ static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
752 return IRQ_HANDLED; 956 return IRQ_HANDLED;
753} 957}
754 958
959static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
960{
961 if (ep93xx_dma_chan_is_m2p(chan))
962 return false;
963
964 chan->private = filter_param;
965 return true;
966}
967
968static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
969{
970 dma_cap_mask_t mask;
971 int ret;
972
973 espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
974 if (!espi->zeropage)
975 return -ENOMEM;
976
977 dma_cap_zero(mask);
978 dma_cap_set(DMA_SLAVE, mask);
979
980 espi->dma_rx_data.port = EP93XX_DMA_SSP;
981 espi->dma_rx_data.direction = DMA_FROM_DEVICE;
982 espi->dma_rx_data.name = "ep93xx-spi-rx";
983
984 espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
985 &espi->dma_rx_data);
986 if (!espi->dma_rx) {
987 ret = -ENODEV;
988 goto fail_free_page;
989 }
990
991 espi->dma_tx_data.port = EP93XX_DMA_SSP;
992 espi->dma_tx_data.direction = DMA_TO_DEVICE;
993 espi->dma_tx_data.name = "ep93xx-spi-tx";
994
995 espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
996 &espi->dma_tx_data);
997 if (!espi->dma_tx) {
998 ret = -ENODEV;
999 goto fail_release_rx;
1000 }
1001
1002 return 0;
1003
1004fail_release_rx:
1005 dma_release_channel(espi->dma_rx);
1006 espi->dma_rx = NULL;
1007fail_free_page:
1008 free_page((unsigned long)espi->zeropage);
1009
1010 return ret;
1011}
1012
1013static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
1014{
1015 if (espi->dma_rx) {
1016 dma_release_channel(espi->dma_rx);
1017 sg_free_table(&espi->rx_sgt);
1018 }
1019 if (espi->dma_tx) {
1020 dma_release_channel(espi->dma_tx);
1021 sg_free_table(&espi->tx_sgt);
1022 }
1023
1024 if (espi->zeropage)
1025 free_page((unsigned long)espi->zeropage);
1026}
1027
755static int __init ep93xx_spi_probe(struct platform_device *pdev) 1028static int __init ep93xx_spi_probe(struct platform_device *pdev)
756{ 1029{
757 struct spi_master *master; 1030 struct spi_master *master;
@@ -818,6 +1091,7 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
818 goto fail_put_clock; 1091 goto fail_put_clock;
819 } 1092 }
820 1093
1094 espi->sspdr_phys = res->start + SSPDR;
821 espi->regs_base = ioremap(res->start, resource_size(res)); 1095 espi->regs_base = ioremap(res->start, resource_size(res));
822 if (!espi->regs_base) { 1096 if (!espi->regs_base) {
823 dev_err(&pdev->dev, "failed to map resources\n"); 1097 dev_err(&pdev->dev, "failed to map resources\n");
@@ -832,10 +1106,13 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
832 goto fail_unmap_regs; 1106 goto fail_unmap_regs;
833 } 1107 }
834 1108
1109 if (info->use_dma && ep93xx_spi_setup_dma(espi))
1110 dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
1111
835 espi->wq = create_singlethread_workqueue("ep93xx_spid"); 1112 espi->wq = create_singlethread_workqueue("ep93xx_spid");
836 if (!espi->wq) { 1113 if (!espi->wq) {
837 dev_err(&pdev->dev, "unable to create workqueue\n"); 1114 dev_err(&pdev->dev, "unable to create workqueue\n");
838 goto fail_free_irq; 1115 goto fail_free_dma;
839 } 1116 }
840 INIT_WORK(&espi->msg_work, ep93xx_spi_work); 1117 INIT_WORK(&espi->msg_work, ep93xx_spi_work);
841 INIT_LIST_HEAD(&espi->msg_queue); 1118 INIT_LIST_HEAD(&espi->msg_queue);
@@ -857,7 +1134,8 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
857 1134
858fail_free_queue: 1135fail_free_queue:
859 destroy_workqueue(espi->wq); 1136 destroy_workqueue(espi->wq);
860fail_free_irq: 1137fail_free_dma:
1138 ep93xx_spi_release_dma(espi);
861 free_irq(espi->irq, espi); 1139 free_irq(espi->irq, espi);
862fail_unmap_regs: 1140fail_unmap_regs:
863 iounmap(espi->regs_base); 1141 iounmap(espi->regs_base);
@@ -901,6 +1179,7 @@ static int __exit ep93xx_spi_remove(struct platform_device *pdev)
901 } 1179 }
902 spin_unlock_irq(&espi->lock); 1180 spin_unlock_irq(&espi->lock);
903 1181
1182 ep93xx_spi_release_dma(espi);
904 free_irq(espi->irq, espi); 1183 free_irq(espi->irq, espi);
905 iounmap(espi->regs_base); 1184 iounmap(espi->regs_base);
906 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1185 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi_fsl_espi.c b/drivers/spi/spi-fsl-espi.c
index 496f895a0024..54e499d5f92c 100644
--- a/drivers/spi/spi_fsl_espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -22,7 +22,7 @@
22#include <linux/err.h> 22#include <linux/err.h>
23#include <sysdev/fsl_soc.h> 23#include <sysdev/fsl_soc.h>
24 24
25#include "spi_fsl_lib.h" 25#include "spi-fsl-lib.h"
26 26
27/* eSPI Controller registers */ 27/* eSPI Controller registers */
28struct fsl_espi_reg { 28struct fsl_espi_reg {
diff --git a/drivers/spi/spi_fsl_lib.c b/drivers/spi/spi-fsl-lib.c
index ff59f42ae990..2674fad7f68a 100644
--- a/drivers/spi/spi_fsl_lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -25,7 +25,7 @@
25#include <linux/of_spi.h> 25#include <linux/of_spi.h>
26#include <sysdev/fsl_soc.h> 26#include <sysdev/fsl_soc.h>
27 27
28#include "spi_fsl_lib.h" 28#include "spi-fsl-lib.h"
29 29
30#define MPC8XXX_SPI_RX_BUF(type) \ 30#define MPC8XXX_SPI_RX_BUF(type) \
31void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ 31void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
diff --git a/drivers/spi/spi_fsl_lib.h b/drivers/spi/spi-fsl-lib.h
index cbe881b9ea76..cbe881b9ea76 100644
--- a/drivers/spi/spi_fsl_lib.h
+++ b/drivers/spi/spi-fsl-lib.h
diff --git a/drivers/spi/spi_fsl_spi.c b/drivers/spi/spi-fsl-spi.c
index 7963c9b49566..d2407558773f 100644
--- a/drivers/spi/spi_fsl_spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -37,7 +37,7 @@
37#include <asm/cpm.h> 37#include <asm/cpm.h>
38#include <asm/qe.h> 38#include <asm/qe.h>
39 39
40#include "spi_fsl_lib.h" 40#include "spi-fsl-lib.h"
41 41
42/* CPM1 and CPM2 are mutually exclusive. */ 42/* CPM1 and CPM2 are mutually exclusive. */
43#ifdef CONFIG_CPM1 43#ifdef CONFIG_CPM1
@@ -684,7 +684,7 @@ static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
684 struct device_node *np = dev->of_node; 684 struct device_node *np = dev->of_node;
685 const u32 *iprop; 685 const u32 *iprop;
686 int size; 686 int size;
687 unsigned long spi_base_ofs; 687 void __iomem *spi_base;
688 unsigned long pram_ofs = -ENOMEM; 688 unsigned long pram_ofs = -ENOMEM;
689 689
690 /* Can't use of_address_to_resource(), QE muram isn't at 0. */ 690 /* Can't use of_address_to_resource(), QE muram isn't at 0. */
@@ -702,33 +702,27 @@ static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
702 return pram_ofs; 702 return pram_ofs;
703 } 703 }
704 704
705 /* CPM1 and CPM2 pram must be at a fixed addr. */ 705 spi_base = of_iomap(np, 1);
706 if (!iprop || size != sizeof(*iprop) * 4) 706 if (spi_base == NULL)
707 return -ENOMEM; 707 return -EINVAL;
708
709 spi_base_ofs = cpm_muram_alloc_fixed(iprop[2], 2);
710 if (IS_ERR_VALUE(spi_base_ofs))
711 return -ENOMEM;
712 708
713 if (mspi->flags & SPI_CPM2) { 709 if (mspi->flags & SPI_CPM2) {
714 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); 710 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
715 if (!IS_ERR_VALUE(pram_ofs)) { 711 out_be16(spi_base, pram_ofs);
716 u16 __iomem *spi_base = cpm_muram_addr(spi_base_ofs);
717
718 out_be16(spi_base, pram_ofs);
719 }
720 } else { 712 } else {
721 struct spi_pram __iomem *pram = cpm_muram_addr(spi_base_ofs); 713 struct spi_pram __iomem *pram = spi_base;
722 u16 rpbase = in_be16(&pram->rpbase); 714 u16 rpbase = in_be16(&pram->rpbase);
723 715
724 /* Microcode relocation patch applied? */ 716 /* Microcode relocation patch applied? */
725 if (rpbase) 717 if (rpbase)
726 pram_ofs = rpbase; 718 pram_ofs = rpbase;
727 else 719 else {
728 return spi_base_ofs; 720 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
721 out_be16(spi_base, pram_ofs);
722 }
729 } 723 }
730 724
731 cpm_muram_free(spi_base_ofs); 725 iounmap(spi_base);
732 return pram_ofs; 726 return pram_ofs;
733} 727}
734 728
diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi-gpio.c
index 63e51b011d50..0e88ab745490 100644
--- a/drivers/spi/spi_gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * spi_gpio.c - SPI master driver using generic bitbanged GPIO 2 * SPI master driver using generic bitbanged GPIO
3 * 3 *
4 * Copyright (C) 2006,2008 David Brownell 4 * Copyright (C) 2006,2008 David Brownell
5 * 5 *
@@ -69,7 +69,7 @@ struct spi_gpio {
69 * #define SPI_MOSI_GPIO 120 69 * #define SPI_MOSI_GPIO 120
70 * #define SPI_SCK_GPIO 121 70 * #define SPI_SCK_GPIO 121
71 * #define SPI_N_CHIPSEL 4 71 * #define SPI_N_CHIPSEL 4
72 * #include "spi_gpio.c" 72 * #include "spi-gpio.c"
73 */ 73 */
74 74
75#ifndef DRIVER_NAME 75#ifndef DRIVER_NAME
@@ -127,7 +127,7 @@ static inline int getmiso(const struct spi_device *spi)
127 */ 127 */
128#define spidelay(nsecs) do {} while (0) 128#define spidelay(nsecs) do {} while (0)
129 129
130#include "spi_bitbang_txrx.h" 130#include "spi-bitbang-txrx.h"
131 131
132/* 132/*
133 * These functions can leverage inline expansion of GPIO calls to shrink 133 * These functions can leverage inline expansion of GPIO calls to shrink
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi-imx.c
index 69d6dba67c19..8ac6542aedcd 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi-imx.c
@@ -34,6 +34,9 @@
34#include <linux/spi/spi.h> 34#include <linux/spi/spi.h>
35#include <linux/spi/spi_bitbang.h> 35#include <linux/spi/spi_bitbang.h>
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/of.h>
38#include <linux/of_device.h>
39#include <linux/of_gpio.h>
37 40
38#include <mach/spi.h> 41#include <mach/spi.h>
39 42
@@ -45,9 +48,6 @@
45#define MXC_CSPIINT 0x0c 48#define MXC_CSPIINT 0x0c
46#define MXC_RESET 0x1c 49#define MXC_RESET 0x1c
47 50
48#define MX3_CSPISTAT 0x14
49#define MX3_CSPISTAT_RR (1 << 3)
50
51/* generic defines to abstract from the different register layouts */ 51/* generic defines to abstract from the different register layouts */
52#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ 52#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
53#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ 53#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
@@ -60,12 +60,12 @@ struct spi_imx_config {
60}; 60};
61 61
62enum spi_imx_devtype { 62enum spi_imx_devtype {
63 SPI_IMX_VER_IMX1, 63 IMX1_CSPI,
64 SPI_IMX_VER_0_0, 64 IMX21_CSPI,
65 SPI_IMX_VER_0_4, 65 IMX27_CSPI,
66 SPI_IMX_VER_0_5, 66 IMX31_CSPI,
67 SPI_IMX_VER_0_7, 67 IMX35_CSPI, /* CSPI on all i.mx except above */
68 SPI_IMX_VER_2_3, 68 IMX51_ECSPI, /* ECSPI on i.mx51 and later */
69}; 69};
70 70
71struct spi_imx_data; 71struct spi_imx_data;
@@ -76,7 +76,7 @@ struct spi_imx_devtype_data {
76 void (*trigger)(struct spi_imx_data *); 76 void (*trigger)(struct spi_imx_data *);
77 int (*rx_available)(struct spi_imx_data *); 77 int (*rx_available)(struct spi_imx_data *);
78 void (*reset)(struct spi_imx_data *); 78 void (*reset)(struct spi_imx_data *);
79 unsigned int fifosize; 79 enum spi_imx_devtype devtype;
80}; 80};
81 81
82struct spi_imx_data { 82struct spi_imx_data {
@@ -87,7 +87,6 @@ struct spi_imx_data {
87 int irq; 87 int irq;
88 struct clk *clk; 88 struct clk *clk;
89 unsigned long spi_clk; 89 unsigned long spi_clk;
90 int *chipselect;
91 90
92 unsigned int count; 91 unsigned int count;
93 void (*tx)(struct spi_imx_data *); 92 void (*tx)(struct spi_imx_data *);
@@ -96,9 +95,25 @@ struct spi_imx_data {
96 const void *tx_buf; 95 const void *tx_buf;
97 unsigned int txfifo; /* number of words pushed in tx FIFO */ 96 unsigned int txfifo; /* number of words pushed in tx FIFO */
98 97
99 struct spi_imx_devtype_data devtype_data; 98 struct spi_imx_devtype_data *devtype_data;
99 int chipselect[0];
100}; 100};
101 101
102static inline int is_imx27_cspi(struct spi_imx_data *d)
103{
104 return d->devtype_data->devtype == IMX27_CSPI;
105}
106
107static inline int is_imx35_cspi(struct spi_imx_data *d)
108{
109 return d->devtype_data->devtype == IMX35_CSPI;
110}
111
112static inline unsigned spi_imx_get_fifosize(struct spi_imx_data *d)
113{
114 return (d->devtype_data->devtype == IMX51_ECSPI) ? 64 : 8;
115}
116
102#define MXC_SPI_BUF_RX(type) \ 117#define MXC_SPI_BUF_RX(type) \
103static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \ 118static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
104{ \ 119{ \
@@ -140,14 +155,9 @@ static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
140 155
141/* MX21, MX27 */ 156/* MX21, MX27 */
142static unsigned int spi_imx_clkdiv_1(unsigned int fin, 157static unsigned int spi_imx_clkdiv_1(unsigned int fin,
143 unsigned int fspi) 158 unsigned int fspi, unsigned int max)
144{ 159{
145 int i, max; 160 int i;
146
147 if (cpu_is_mx21())
148 max = 18;
149 else
150 max = 16;
151 161
152 for (i = 2; i < max; i++) 162 for (i = 2; i < max; i++)
153 if (fspi * mxc_clkdivs[i] >= fin) 163 if (fspi * mxc_clkdivs[i] >= fin)
@@ -171,30 +181,30 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
171 return 7; 181 return 7;
172} 182}
173 183
174#define SPI_IMX2_3_CTRL 0x08 184#define MX51_ECSPI_CTRL 0x08
175#define SPI_IMX2_3_CTRL_ENABLE (1 << 0) 185#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
176#define SPI_IMX2_3_CTRL_XCH (1 << 2) 186#define MX51_ECSPI_CTRL_XCH (1 << 2)
177#define SPI_IMX2_3_CTRL_MODE_MASK (0xf << 4) 187#define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
178#define SPI_IMX2_3_CTRL_POSTDIV_OFFSET 8 188#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
179#define SPI_IMX2_3_CTRL_PREDIV_OFFSET 12 189#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
180#define SPI_IMX2_3_CTRL_CS(cs) ((cs) << 18) 190#define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18)
181#define SPI_IMX2_3_CTRL_BL_OFFSET 20 191#define MX51_ECSPI_CTRL_BL_OFFSET 20
182 192
183#define SPI_IMX2_3_CONFIG 0x0c 193#define MX51_ECSPI_CONFIG 0x0c
184#define SPI_IMX2_3_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0)) 194#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
185#define SPI_IMX2_3_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4)) 195#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
186#define SPI_IMX2_3_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8)) 196#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
187#define SPI_IMX2_3_CONFIG_SSBPOL(cs) (1 << ((cs) + 12)) 197#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
188 198
189#define SPI_IMX2_3_INT 0x10 199#define MX51_ECSPI_INT 0x10
190#define SPI_IMX2_3_INT_TEEN (1 << 0) 200#define MX51_ECSPI_INT_TEEN (1 << 0)
191#define SPI_IMX2_3_INT_RREN (1 << 3) 201#define MX51_ECSPI_INT_RREN (1 << 3)
192 202
193#define SPI_IMX2_3_STAT 0x18 203#define MX51_ECSPI_STAT 0x18
194#define SPI_IMX2_3_STAT_RR (1 << 3) 204#define MX51_ECSPI_STAT_RR (1 << 3)
195 205
196/* MX51 eCSPI */ 206/* MX51 eCSPI */
197static unsigned int spi_imx2_3_clkdiv(unsigned int fin, unsigned int fspi) 207static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi)
198{ 208{
199 /* 209 /*
200 * there are two 4-bit dividers, the pre-divider divides by 210 * there are two 4-bit dividers, the pre-divider divides by
@@ -222,36 +232,36 @@ static unsigned int spi_imx2_3_clkdiv(unsigned int fin, unsigned int fspi)
222 232
223 pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n", 233 pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
224 __func__, fin, fspi, post, pre); 234 __func__, fin, fspi, post, pre);
225 return (pre << SPI_IMX2_3_CTRL_PREDIV_OFFSET) | 235 return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
226 (post << SPI_IMX2_3_CTRL_POSTDIV_OFFSET); 236 (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
227} 237}
228 238
229static void __maybe_unused spi_imx2_3_intctrl(struct spi_imx_data *spi_imx, int enable) 239static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
230{ 240{
231 unsigned val = 0; 241 unsigned val = 0;
232 242
233 if (enable & MXC_INT_TE) 243 if (enable & MXC_INT_TE)
234 val |= SPI_IMX2_3_INT_TEEN; 244 val |= MX51_ECSPI_INT_TEEN;
235 245
236 if (enable & MXC_INT_RR) 246 if (enable & MXC_INT_RR)
237 val |= SPI_IMX2_3_INT_RREN; 247 val |= MX51_ECSPI_INT_RREN;
238 248
239 writel(val, spi_imx->base + SPI_IMX2_3_INT); 249 writel(val, spi_imx->base + MX51_ECSPI_INT);
240} 250}
241 251
242static void __maybe_unused spi_imx2_3_trigger(struct spi_imx_data *spi_imx) 252static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
243{ 253{
244 u32 reg; 254 u32 reg;
245 255
246 reg = readl(spi_imx->base + SPI_IMX2_3_CTRL); 256 reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
247 reg |= SPI_IMX2_3_CTRL_XCH; 257 reg |= MX51_ECSPI_CTRL_XCH;
248 writel(reg, spi_imx->base + SPI_IMX2_3_CTRL); 258 writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
249} 259}
250 260
251static int __maybe_unused spi_imx2_3_config(struct spi_imx_data *spi_imx, 261static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
252 struct spi_imx_config *config) 262 struct spi_imx_config *config)
253{ 263{
254 u32 ctrl = SPI_IMX2_3_CTRL_ENABLE, cfg = 0; 264 u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0;
255 265
256 /* 266 /*
257 * The hardware seems to have a race condition when changing modes. The 267 * The hardware seems to have a race condition when changing modes. The
@@ -260,42 +270,42 @@ static int __maybe_unused spi_imx2_3_config(struct spi_imx_data *spi_imx,
260 * the same time. 270 * the same time.
261 * So set master mode for all channels as we do not support slave mode. 271 * So set master mode for all channels as we do not support slave mode.
262 */ 272 */
263 ctrl |= SPI_IMX2_3_CTRL_MODE_MASK; 273 ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
264 274
265 /* set clock speed */ 275 /* set clock speed */
266 ctrl |= spi_imx2_3_clkdiv(spi_imx->spi_clk, config->speed_hz); 276 ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz);
267 277
268 /* set chip select to use */ 278 /* set chip select to use */
269 ctrl |= SPI_IMX2_3_CTRL_CS(config->cs); 279 ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
270 280
271 ctrl |= (config->bpw - 1) << SPI_IMX2_3_CTRL_BL_OFFSET; 281 ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
272 282
273 cfg |= SPI_IMX2_3_CONFIG_SBBCTRL(config->cs); 283 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(config->cs);
274 284
275 if (config->mode & SPI_CPHA) 285 if (config->mode & SPI_CPHA)
276 cfg |= SPI_IMX2_3_CONFIG_SCLKPHA(config->cs); 286 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
277 287
278 if (config->mode & SPI_CPOL) 288 if (config->mode & SPI_CPOL)
279 cfg |= SPI_IMX2_3_CONFIG_SCLKPOL(config->cs); 289 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
280 290
281 if (config->mode & SPI_CS_HIGH) 291 if (config->mode & SPI_CS_HIGH)
282 cfg |= SPI_IMX2_3_CONFIG_SSBPOL(config->cs); 292 cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
283 293
284 writel(ctrl, spi_imx->base + SPI_IMX2_3_CTRL); 294 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
285 writel(cfg, spi_imx->base + SPI_IMX2_3_CONFIG); 295 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
286 296
287 return 0; 297 return 0;
288} 298}
289 299
290static int __maybe_unused spi_imx2_3_rx_available(struct spi_imx_data *spi_imx) 300static int __maybe_unused mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
291{ 301{
292 return readl(spi_imx->base + SPI_IMX2_3_STAT) & SPI_IMX2_3_STAT_RR; 302 return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
293} 303}
294 304
295static void __maybe_unused spi_imx2_3_reset(struct spi_imx_data *spi_imx) 305static void __maybe_unused mx51_ecspi_reset(struct spi_imx_data *spi_imx)
296{ 306{
297 /* drain receive buffer */ 307 /* drain receive buffer */
298 while (spi_imx2_3_rx_available(spi_imx)) 308 while (mx51_ecspi_rx_available(spi_imx))
299 readl(spi_imx->base + MXC_CSPIRXDATA); 309 readl(spi_imx->base + MXC_CSPIRXDATA);
300} 310}
301 311
@@ -343,32 +353,7 @@ static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx)
343 writel(reg, spi_imx->base + MXC_CSPICTRL); 353 writel(reg, spi_imx->base + MXC_CSPICTRL);
344} 354}
345 355
346static int __maybe_unused spi_imx0_4_config(struct spi_imx_data *spi_imx, 356static int __maybe_unused mx31_config(struct spi_imx_data *spi_imx,
347 struct spi_imx_config *config)
348{
349 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
350 int cs = spi_imx->chipselect[config->cs];
351
352 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
353 MX31_CSPICTRL_DR_SHIFT;
354
355 reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
356
357 if (config->mode & SPI_CPHA)
358 reg |= MX31_CSPICTRL_PHA;
359 if (config->mode & SPI_CPOL)
360 reg |= MX31_CSPICTRL_POL;
361 if (config->mode & SPI_CS_HIGH)
362 reg |= MX31_CSPICTRL_SSPOL;
363 if (cs < 0)
364 reg |= (cs + 32) << MX31_CSPICTRL_CS_SHIFT;
365
366 writel(reg, spi_imx->base + MXC_CSPICTRL);
367
368 return 0;
369}
370
371static int __maybe_unused spi_imx0_7_config(struct spi_imx_data *spi_imx,
372 struct spi_imx_config *config) 357 struct spi_imx_config *config)
373{ 358{
374 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; 359 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
@@ -377,8 +362,12 @@ static int __maybe_unused spi_imx0_7_config(struct spi_imx_data *spi_imx,
377 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << 362 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
378 MX31_CSPICTRL_DR_SHIFT; 363 MX31_CSPICTRL_DR_SHIFT;
379 364
380 reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; 365 if (is_imx35_cspi(spi_imx)) {
381 reg |= MX31_CSPICTRL_SSCTL; 366 reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
367 reg |= MX31_CSPICTRL_SSCTL;
368 } else {
369 reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
370 }
382 371
383 if (config->mode & SPI_CPHA) 372 if (config->mode & SPI_CPHA)
384 reg |= MX31_CSPICTRL_PHA; 373 reg |= MX31_CSPICTRL_PHA;
@@ -387,7 +376,9 @@ static int __maybe_unused spi_imx0_7_config(struct spi_imx_data *spi_imx,
387 if (config->mode & SPI_CS_HIGH) 376 if (config->mode & SPI_CS_HIGH)
388 reg |= MX31_CSPICTRL_SSPOL; 377 reg |= MX31_CSPICTRL_SSPOL;
389 if (cs < 0) 378 if (cs < 0)
390 reg |= (cs + 32) << MX35_CSPICTRL_CS_SHIFT; 379 reg |= (cs + 32) <<
380 (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
381 MX31_CSPICTRL_CS_SHIFT);
391 382
392 writel(reg, spi_imx->base + MXC_CSPICTRL); 383 writel(reg, spi_imx->base + MXC_CSPICTRL);
393 384
@@ -399,77 +390,78 @@ static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx)
399 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; 390 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
400} 391}
401 392
402static void __maybe_unused spi_imx0_4_reset(struct spi_imx_data *spi_imx) 393static void __maybe_unused mx31_reset(struct spi_imx_data *spi_imx)
403{ 394{
404 /* drain receive buffer */ 395 /* drain receive buffer */
405 while (readl(spi_imx->base + MX3_CSPISTAT) & MX3_CSPISTAT_RR) 396 while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
406 readl(spi_imx->base + MXC_CSPIRXDATA); 397 readl(spi_imx->base + MXC_CSPIRXDATA);
407} 398}
408 399
409#define MX27_INTREG_RR (1 << 4) 400#define MX21_INTREG_RR (1 << 4)
410#define MX27_INTREG_TEEN (1 << 9) 401#define MX21_INTREG_TEEN (1 << 9)
411#define MX27_INTREG_RREN (1 << 13) 402#define MX21_INTREG_RREN (1 << 13)
412 403
413#define MX27_CSPICTRL_POL (1 << 5) 404#define MX21_CSPICTRL_POL (1 << 5)
414#define MX27_CSPICTRL_PHA (1 << 6) 405#define MX21_CSPICTRL_PHA (1 << 6)
415#define MX27_CSPICTRL_SSPOL (1 << 8) 406#define MX21_CSPICTRL_SSPOL (1 << 8)
416#define MX27_CSPICTRL_XCH (1 << 9) 407#define MX21_CSPICTRL_XCH (1 << 9)
417#define MX27_CSPICTRL_ENABLE (1 << 10) 408#define MX21_CSPICTRL_ENABLE (1 << 10)
418#define MX27_CSPICTRL_MASTER (1 << 11) 409#define MX21_CSPICTRL_MASTER (1 << 11)
419#define MX27_CSPICTRL_DR_SHIFT 14 410#define MX21_CSPICTRL_DR_SHIFT 14
420#define MX27_CSPICTRL_CS_SHIFT 19 411#define MX21_CSPICTRL_CS_SHIFT 19
421 412
422static void __maybe_unused mx27_intctrl(struct spi_imx_data *spi_imx, int enable) 413static void __maybe_unused mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
423{ 414{
424 unsigned int val = 0; 415 unsigned int val = 0;
425 416
426 if (enable & MXC_INT_TE) 417 if (enable & MXC_INT_TE)
427 val |= MX27_INTREG_TEEN; 418 val |= MX21_INTREG_TEEN;
428 if (enable & MXC_INT_RR) 419 if (enable & MXC_INT_RR)
429 val |= MX27_INTREG_RREN; 420 val |= MX21_INTREG_RREN;
430 421
431 writel(val, spi_imx->base + MXC_CSPIINT); 422 writel(val, spi_imx->base + MXC_CSPIINT);
432} 423}
433 424
434static void __maybe_unused mx27_trigger(struct spi_imx_data *spi_imx) 425static void __maybe_unused mx21_trigger(struct spi_imx_data *spi_imx)
435{ 426{
436 unsigned int reg; 427 unsigned int reg;
437 428
438 reg = readl(spi_imx->base + MXC_CSPICTRL); 429 reg = readl(spi_imx->base + MXC_CSPICTRL);
439 reg |= MX27_CSPICTRL_XCH; 430 reg |= MX21_CSPICTRL_XCH;
440 writel(reg, spi_imx->base + MXC_CSPICTRL); 431 writel(reg, spi_imx->base + MXC_CSPICTRL);
441} 432}
442 433
443static int __maybe_unused mx27_config(struct spi_imx_data *spi_imx, 434static int __maybe_unused mx21_config(struct spi_imx_data *spi_imx,
444 struct spi_imx_config *config) 435 struct spi_imx_config *config)
445{ 436{
446 unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER; 437 unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
447 int cs = spi_imx->chipselect[config->cs]; 438 int cs = spi_imx->chipselect[config->cs];
439 unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
448 440
449 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) << 441 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) <<
450 MX27_CSPICTRL_DR_SHIFT; 442 MX21_CSPICTRL_DR_SHIFT;
451 reg |= config->bpw - 1; 443 reg |= config->bpw - 1;
452 444
453 if (config->mode & SPI_CPHA) 445 if (config->mode & SPI_CPHA)
454 reg |= MX27_CSPICTRL_PHA; 446 reg |= MX21_CSPICTRL_PHA;
455 if (config->mode & SPI_CPOL) 447 if (config->mode & SPI_CPOL)
456 reg |= MX27_CSPICTRL_POL; 448 reg |= MX21_CSPICTRL_POL;
457 if (config->mode & SPI_CS_HIGH) 449 if (config->mode & SPI_CS_HIGH)
458 reg |= MX27_CSPICTRL_SSPOL; 450 reg |= MX21_CSPICTRL_SSPOL;
459 if (cs < 0) 451 if (cs < 0)
460 reg |= (cs + 32) << MX27_CSPICTRL_CS_SHIFT; 452 reg |= (cs + 32) << MX21_CSPICTRL_CS_SHIFT;
461 453
462 writel(reg, spi_imx->base + MXC_CSPICTRL); 454 writel(reg, spi_imx->base + MXC_CSPICTRL);
463 455
464 return 0; 456 return 0;
465} 457}
466 458
467static int __maybe_unused mx27_rx_available(struct spi_imx_data *spi_imx) 459static int __maybe_unused mx21_rx_available(struct spi_imx_data *spi_imx)
468{ 460{
469 return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR; 461 return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
470} 462}
471 463
472static void __maybe_unused spi_imx0_0_reset(struct spi_imx_data *spi_imx) 464static void __maybe_unused mx21_reset(struct spi_imx_data *spi_imx)
473{ 465{
474 writel(1, spi_imx->base + MXC_RESET); 466 writel(1, spi_imx->base + MXC_RESET);
475} 467}
@@ -535,61 +527,94 @@ static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx)
535 writel(1, spi_imx->base + MXC_RESET); 527 writel(1, spi_imx->base + MXC_RESET);
536} 528}
537 529
538/* 530static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
539 * These version numbers are taken from the Freescale driver. Unfortunately it 531 .intctrl = mx1_intctrl,
540 * doesn't support i.MX1, so this entry doesn't match the scheme. :-( 532 .config = mx1_config,
541 */ 533 .trigger = mx1_trigger,
542static struct spi_imx_devtype_data spi_imx_devtype_data[] __devinitdata = { 534 .rx_available = mx1_rx_available,
543#ifdef CONFIG_SPI_IMX_VER_IMX1 535 .reset = mx1_reset,
544 [SPI_IMX_VER_IMX1] = { 536 .devtype = IMX1_CSPI,
545 .intctrl = mx1_intctrl, 537};
546 .config = mx1_config, 538
547 .trigger = mx1_trigger, 539static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
548 .rx_available = mx1_rx_available, 540 .intctrl = mx21_intctrl,
549 .reset = mx1_reset, 541 .config = mx21_config,
550 .fifosize = 8, 542 .trigger = mx21_trigger,
551 }, 543 .rx_available = mx21_rx_available,
552#endif 544 .reset = mx21_reset,
553#ifdef CONFIG_SPI_IMX_VER_0_0 545 .devtype = IMX21_CSPI,
554 [SPI_IMX_VER_0_0] = { 546};
555 .intctrl = mx27_intctrl, 547
556 .config = mx27_config, 548static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
557 .trigger = mx27_trigger, 549 /* i.mx27 cspi shares the functions with i.mx21 one */
558 .rx_available = mx27_rx_available, 550 .intctrl = mx21_intctrl,
559 .reset = spi_imx0_0_reset, 551 .config = mx21_config,
560 .fifosize = 8, 552 .trigger = mx21_trigger,
561 }, 553 .rx_available = mx21_rx_available,
562#endif 554 .reset = mx21_reset,
563#ifdef CONFIG_SPI_IMX_VER_0_4 555 .devtype = IMX27_CSPI,
564 [SPI_IMX_VER_0_4] = { 556};
565 .intctrl = mx31_intctrl, 557
566 .config = spi_imx0_4_config, 558static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
567 .trigger = mx31_trigger, 559 .intctrl = mx31_intctrl,
568 .rx_available = mx31_rx_available, 560 .config = mx31_config,
569 .reset = spi_imx0_4_reset, 561 .trigger = mx31_trigger,
570 .fifosize = 8, 562 .rx_available = mx31_rx_available,
571 }, 563 .reset = mx31_reset,
572#endif 564 .devtype = IMX31_CSPI,
573#ifdef CONFIG_SPI_IMX_VER_0_7 565};
574 [SPI_IMX_VER_0_7] = { 566
575 .intctrl = mx31_intctrl, 567static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
576 .config = spi_imx0_7_config, 568 /* i.mx35 and later cspi shares the functions with i.mx31 one */
577 .trigger = mx31_trigger, 569 .intctrl = mx31_intctrl,
578 .rx_available = mx31_rx_available, 570 .config = mx31_config,
579 .reset = spi_imx0_4_reset, 571 .trigger = mx31_trigger,
580 .fifosize = 8, 572 .rx_available = mx31_rx_available,
581 }, 573 .reset = mx31_reset,
582#endif 574 .devtype = IMX35_CSPI,
583#ifdef CONFIG_SPI_IMX_VER_2_3 575};
584 [SPI_IMX_VER_2_3] = { 576
585 .intctrl = spi_imx2_3_intctrl, 577static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
586 .config = spi_imx2_3_config, 578 .intctrl = mx51_ecspi_intctrl,
587 .trigger = spi_imx2_3_trigger, 579 .config = mx51_ecspi_config,
588 .rx_available = spi_imx2_3_rx_available, 580 .trigger = mx51_ecspi_trigger,
589 .reset = spi_imx2_3_reset, 581 .rx_available = mx51_ecspi_rx_available,
590 .fifosize = 64, 582 .reset = mx51_ecspi_reset,
591 }, 583 .devtype = IMX51_ECSPI,
592#endif 584};
585
586static struct platform_device_id spi_imx_devtype[] = {
587 {
588 .name = "imx1-cspi",
589 .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data,
590 }, {
591 .name = "imx21-cspi",
592 .driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data,
593 }, {
594 .name = "imx27-cspi",
595 .driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data,
596 }, {
597 .name = "imx31-cspi",
598 .driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data,
599 }, {
600 .name = "imx35-cspi",
601 .driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data,
602 }, {
603 .name = "imx51-ecspi",
604 .driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data,
605 }, {
606 /* sentinel */
607 }
608};
609
610static const struct of_device_id spi_imx_dt_ids[] = {
611 { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
612 { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
613 { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
614 { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
615 { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
616 { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
617 { /* sentinel */ }
593}; 618};
594 619
595static void spi_imx_chipselect(struct spi_device *spi, int is_active) 620static void spi_imx_chipselect(struct spi_device *spi, int is_active)
@@ -607,21 +632,21 @@ static void spi_imx_chipselect(struct spi_device *spi, int is_active)
607 632
608static void spi_imx_push(struct spi_imx_data *spi_imx) 633static void spi_imx_push(struct spi_imx_data *spi_imx)
609{ 634{
610 while (spi_imx->txfifo < spi_imx->devtype_data.fifosize) { 635 while (spi_imx->txfifo < spi_imx_get_fifosize(spi_imx)) {
611 if (!spi_imx->count) 636 if (!spi_imx->count)
612 break; 637 break;
613 spi_imx->tx(spi_imx); 638 spi_imx->tx(spi_imx);
614 spi_imx->txfifo++; 639 spi_imx->txfifo++;
615 } 640 }
616 641
617 spi_imx->devtype_data.trigger(spi_imx); 642 spi_imx->devtype_data->trigger(spi_imx);
618} 643}
619 644
620static irqreturn_t spi_imx_isr(int irq, void *dev_id) 645static irqreturn_t spi_imx_isr(int irq, void *dev_id)
621{ 646{
622 struct spi_imx_data *spi_imx = dev_id; 647 struct spi_imx_data *spi_imx = dev_id;
623 648
624 while (spi_imx->devtype_data.rx_available(spi_imx)) { 649 while (spi_imx->devtype_data->rx_available(spi_imx)) {
625 spi_imx->rx(spi_imx); 650 spi_imx->rx(spi_imx);
626 spi_imx->txfifo--; 651 spi_imx->txfifo--;
627 } 652 }
@@ -635,12 +660,12 @@ static irqreturn_t spi_imx_isr(int irq, void *dev_id)
635 /* No data left to push, but still waiting for rx data, 660 /* No data left to push, but still waiting for rx data,
636 * enable receive data available interrupt. 661 * enable receive data available interrupt.
637 */ 662 */
638 spi_imx->devtype_data.intctrl( 663 spi_imx->devtype_data->intctrl(
639 spi_imx, MXC_INT_RR); 664 spi_imx, MXC_INT_RR);
640 return IRQ_HANDLED; 665 return IRQ_HANDLED;
641 } 666 }
642 667
643 spi_imx->devtype_data.intctrl(spi_imx, 0); 668 spi_imx->devtype_data->intctrl(spi_imx, 0);
644 complete(&spi_imx->xfer_done); 669 complete(&spi_imx->xfer_done);
645 670
646 return IRQ_HANDLED; 671 return IRQ_HANDLED;
@@ -677,7 +702,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
677 } else 702 } else
678 BUG(); 703 BUG();
679 704
680 spi_imx->devtype_data.config(spi_imx, &config); 705 spi_imx->devtype_data->config(spi_imx, &config);
681 706
682 return 0; 707 return 0;
683} 708}
@@ -696,7 +721,7 @@ static int spi_imx_transfer(struct spi_device *spi,
696 721
697 spi_imx_push(spi_imx); 722 spi_imx_push(spi_imx);
698 723
699 spi_imx->devtype_data.intctrl(spi_imx, MXC_INT_TE); 724 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
700 725
701 wait_for_completion(&spi_imx->xfer_done); 726 wait_for_completion(&spi_imx->xfer_done);
702 727
@@ -723,72 +748,47 @@ static void spi_imx_cleanup(struct spi_device *spi)
723{ 748{
724} 749}
725 750
726static struct platform_device_id spi_imx_devtype[] = {
727 {
728 .name = "imx1-cspi",
729 .driver_data = SPI_IMX_VER_IMX1,
730 }, {
731 .name = "imx21-cspi",
732 .driver_data = SPI_IMX_VER_0_0,
733 }, {
734 .name = "imx25-cspi",
735 .driver_data = SPI_IMX_VER_0_7,
736 }, {
737 .name = "imx27-cspi",
738 .driver_data = SPI_IMX_VER_0_0,
739 }, {
740 .name = "imx31-cspi",
741 .driver_data = SPI_IMX_VER_0_4,
742 }, {
743 .name = "imx35-cspi",
744 .driver_data = SPI_IMX_VER_0_7,
745 }, {
746 .name = "imx51-cspi",
747 .driver_data = SPI_IMX_VER_0_7,
748 }, {
749 .name = "imx51-ecspi",
750 .driver_data = SPI_IMX_VER_2_3,
751 }, {
752 .name = "imx53-cspi",
753 .driver_data = SPI_IMX_VER_0_7,
754 }, {
755 .name = "imx53-ecspi",
756 .driver_data = SPI_IMX_VER_2_3,
757 }, {
758 /* sentinel */
759 }
760};
761
762static int __devinit spi_imx_probe(struct platform_device *pdev) 751static int __devinit spi_imx_probe(struct platform_device *pdev)
763{ 752{
764 struct spi_imx_master *mxc_platform_info; 753 struct device_node *np = pdev->dev.of_node;
754 const struct of_device_id *of_id =
755 of_match_device(spi_imx_dt_ids, &pdev->dev);
756 struct spi_imx_master *mxc_platform_info =
757 dev_get_platdata(&pdev->dev);
765 struct spi_master *master; 758 struct spi_master *master;
766 struct spi_imx_data *spi_imx; 759 struct spi_imx_data *spi_imx;
767 struct resource *res; 760 struct resource *res;
768 int i, ret; 761 int i, ret, num_cs;
769 762
770 mxc_platform_info = dev_get_platdata(&pdev->dev); 763 if (!np && !mxc_platform_info) {
771 if (!mxc_platform_info) {
772 dev_err(&pdev->dev, "can't get the platform data\n"); 764 dev_err(&pdev->dev, "can't get the platform data\n");
773 return -EINVAL; 765 return -EINVAL;
774 } 766 }
775 767
776 master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data)); 768 ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs);
769 if (ret < 0)
770 num_cs = mxc_platform_info->num_chipselect;
771
772 master = spi_alloc_master(&pdev->dev,
773 sizeof(struct spi_imx_data) + sizeof(int) * num_cs);
777 if (!master) 774 if (!master)
778 return -ENOMEM; 775 return -ENOMEM;
779 776
780 platform_set_drvdata(pdev, master); 777 platform_set_drvdata(pdev, master);
781 778
782 master->bus_num = pdev->id; 779 master->bus_num = pdev->id;
783 master->num_chipselect = mxc_platform_info->num_chipselect; 780 master->num_chipselect = num_cs;
784 781
785 spi_imx = spi_master_get_devdata(master); 782 spi_imx = spi_master_get_devdata(master);
786 spi_imx->bitbang.master = spi_master_get(master); 783 spi_imx->bitbang.master = spi_master_get(master);
787 spi_imx->chipselect = mxc_platform_info->chipselect;
788 784
789 for (i = 0; i < master->num_chipselect; i++) { 785 for (i = 0; i < master->num_chipselect; i++) {
790 if (spi_imx->chipselect[i] < 0) 786 int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
787 if (cs_gpio < 0)
788 cs_gpio = mxc_platform_info->chipselect[i];
789 if (cs_gpio < 0)
791 continue; 790 continue;
791 spi_imx->chipselect[i] = cs_gpio;
792 ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); 792 ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
793 if (ret) { 793 if (ret) {
794 while (i > 0) { 794 while (i > 0) {
@@ -810,8 +810,8 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
810 810
811 init_completion(&spi_imx->xfer_done); 811 init_completion(&spi_imx->xfer_done);
812 812
813 spi_imx->devtype_data = 813 spi_imx->devtype_data = of_id ? of_id->data :
814 spi_imx_devtype_data[pdev->id_entry->driver_data]; 814 (struct spi_imx_devtype_data *) pdev->id_entry->driver_data;
815 815
816 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 816 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
817 if (!res) { 817 if (!res) {
@@ -854,10 +854,11 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
854 clk_enable(spi_imx->clk); 854 clk_enable(spi_imx->clk);
855 spi_imx->spi_clk = clk_get_rate(spi_imx->clk); 855 spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
856 856
857 spi_imx->devtype_data.reset(spi_imx); 857 spi_imx->devtype_data->reset(spi_imx);
858 858
859 spi_imx->devtype_data.intctrl(spi_imx, 0); 859 spi_imx->devtype_data->intctrl(spi_imx, 0);
860 860
861 master->dev.of_node = pdev->dev.of_node;
861 ret = spi_bitbang_start(&spi_imx->bitbang); 862 ret = spi_bitbang_start(&spi_imx->bitbang);
862 if (ret) { 863 if (ret) {
863 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); 864 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
@@ -920,6 +921,7 @@ static struct platform_driver spi_imx_driver = {
920 .driver = { 921 .driver = {
921 .name = DRIVER_NAME, 922 .name = DRIVER_NAME,
922 .owner = THIS_MODULE, 923 .owner = THIS_MODULE,
924 .of_match_table = spi_imx_dt_ids,
923 }, 925 },
924 .id_table = spi_imx_devtype, 926 .id_table = spi_imx_devtype,
925 .probe = spi_imx_probe, 927 .probe = spi_imx_probe,
diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi-lm70llp.c
index 7746a41ab6d6..933eb9d9ddd4 100644
--- a/drivers/spi/spi_lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * spi_lm70llp.c - driver for LM70EVAL-LLP board for the LM70 sensor 2 * Driver for LM70EVAL-LLP board for the LM70 sensor
3 * 3 *
4 * Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com> 4 * Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com>
5 * 5 *
@@ -174,7 +174,7 @@ static inline int getmiso(struct spi_device *s)
174} 174}
175/*--------------------------------------------------------------------*/ 175/*--------------------------------------------------------------------*/
176 176
177#include "spi_bitbang_txrx.h" 177#include "spi-bitbang-txrx.h"
178 178
179static void lm70_chipselect(struct spi_device *spi, int value) 179static void lm70_chipselect(struct spi_device *spi, int value)
180{ 180{
diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/spi-mpc512x-psc.c
index 6a5b4238fb6b..6a5b4238fb6b 100644
--- a/drivers/spi/mpc512x_psc_spi.c
+++ b/drivers/spi/spi-mpc512x-psc.c
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/spi-mpc52xx-psc.c
index e30baf0852ac..e30baf0852ac 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/spi-mpc52xx.c
index 015a974bed72..015a974bed72 100644
--- a/drivers/spi/mpc52xx_spi.c
+++ b/drivers/spi/spi-mpc52xx.c
diff --git a/drivers/spi/spi_nuc900.c b/drivers/spi/spi-nuc900.c
index 3cd15f690f16..c0a6ce81f9c0 100644
--- a/drivers/spi/spi_nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -1,5 +1,4 @@
1/* linux/drivers/spi/spi_nuc900.c 1/*
2 *
3 * Copyright (c) 2009 Nuvoton technology. 2 * Copyright (c) 2009 Nuvoton technology.
4 * Wan ZongShun <mcuos.com@gmail.com> 3 * Wan ZongShun <mcuos.com@gmail.com>
5 * 4 *
@@ -7,7 +6,7 @@
7 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
9 * 8 *
10*/ 9 */
11 10
12#include <linux/init.h> 11#include <linux/init.h>
13#include <linux/spinlock.h> 12#include <linux/spinlock.h>
diff --git a/drivers/spi/spi_oc_tiny.c b/drivers/spi/spi-oc-tiny.c
index f1bde66cea19..f1bde66cea19 100644
--- a/drivers/spi/spi_oc_tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
diff --git a/drivers/spi/omap_spi_100k.c b/drivers/spi/spi-omap-100k.c
index 9bd1c92ad96e..9bd1c92ad96e 100644
--- a/drivers/spi/omap_spi_100k.c
+++ b/drivers/spi/spi-omap-100k.c
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/spi-omap-uwire.c
index 160d3266205f..00a8e9d7dbe4 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * omap_uwire.c -- MicroWire interface driver for OMAP 2 * MicroWire interface driver for OMAP
3 * 3 *
4 * Copyright 2003 MontaVista Software Inc. <source@mvista.com> 4 * Copyright 2003 MontaVista Software Inc. <source@mvista.com>
5 * 5 *
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 969cdd2fe124..fde3a2d4f120 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1116,8 +1116,8 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
1116 status = -ENODEV; 1116 status = -ENODEV;
1117 goto err1; 1117 goto err1;
1118 } 1118 }
1119 if (!request_mem_region(r->start, (r->end - r->start) + 1, 1119 if (!request_mem_region(r->start, resource_size(r),
1120 dev_name(&pdev->dev))) { 1120 dev_name(&pdev->dev))) {
1121 status = -EBUSY; 1121 status = -EBUSY;
1122 goto err1; 1122 goto err1;
1123 } 1123 }
@@ -1125,7 +1125,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
1125 r->start += pdata->regs_offset; 1125 r->start += pdata->regs_offset;
1126 r->end += pdata->regs_offset; 1126 r->end += pdata->regs_offset;
1127 mcspi->phys = r->start; 1127 mcspi->phys = r->start;
1128 mcspi->base = ioremap(r->start, r->end - r->start + 1); 1128 mcspi->base = ioremap(r->start, resource_size(r));
1129 if (!mcspi->base) { 1129 if (!mcspi->base) {
1130 dev_dbg(&pdev->dev, "can't ioremap MCSPI\n"); 1130 dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
1131 status = -ENOMEM; 1131 status = -ENOMEM;
@@ -1190,7 +1190,7 @@ err4:
1190err3: 1190err3:
1191 kfree(mcspi->dma_channels); 1191 kfree(mcspi->dma_channels);
1192err2: 1192err2:
1193 release_mem_region(r->start, (r->end - r->start) + 1); 1193 release_mem_region(r->start, resource_size(r));
1194 iounmap(mcspi->base); 1194 iounmap(mcspi->base);
1195err1: 1195err1:
1196 return status; 1196 return status;
@@ -1210,7 +1210,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
1210 1210
1211 omap2_mcspi_disable_clocks(mcspi); 1211 omap2_mcspi_disable_clocks(mcspi);
1212 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1212 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1213 release_mem_region(r->start, (r->end - r->start) + 1); 1213 release_mem_region(r->start, resource_size(r));
1214 1214
1215 base = mcspi->base; 1215 base = mcspi->base;
1216 spi_unregister_master(master); 1216 spi_unregister_master(master);
diff --git a/drivers/spi/orion_spi.c b/drivers/spi/spi-orion.c
index 0b677dc041ad..9421a390a5e3 100644
--- a/drivers/spi/orion_spi.c
+++ b/drivers/spi/spi-orion.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * orion_spi.c -- Marvell Orion SPI controller driver 2 * Marvell Orion SPI controller driver
3 * 3 *
4 * Author: Shadi Ammouri <shadi@marvell.com> 4 * Author: Shadi Ammouri <shadi@marvell.com>
5 * Copyright (C) 2007-2008 Marvell Ltd. 5 * Copyright (C) 2007-2008 Marvell Ltd.
@@ -489,7 +489,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
489 goto out; 489 goto out;
490 } 490 }
491 491
492 if (!request_mem_region(r->start, (r->end - r->start) + 1, 492 if (!request_mem_region(r->start, resource_size(r),
493 dev_name(&pdev->dev))) { 493 dev_name(&pdev->dev))) {
494 status = -EBUSY; 494 status = -EBUSY;
495 goto out; 495 goto out;
@@ -511,7 +511,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
511 return status; 511 return status;
512 512
513out_rel_mem: 513out_rel_mem:
514 release_mem_region(r->start, (r->end - r->start) + 1); 514 release_mem_region(r->start, resource_size(r));
515 515
516out: 516out:
517 spi_master_put(master); 517 spi_master_put(master);
@@ -531,7 +531,7 @@ static int __exit orion_spi_remove(struct platform_device *pdev)
531 cancel_work_sync(&spi->work); 531 cancel_work_sync(&spi->work);
532 532
533 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 533 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
534 release_mem_region(r->start, (r->end - r->start) + 1); 534 release_mem_region(r->start, resource_size(r));
535 535
536 spi_unregister_master(master); 536 spi_unregister_master(master);
537 537
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/spi-pl022.c
index d18ce9e946d8..eba88c749fb1 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/spi/amba-pl022.c
3 *
4 * A driver for the ARM PL022 PrimeCell SSP/SPI bus master. 2 * A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
5 * 3 *
6 * Copyright (C) 2008-2009 ST-Ericsson AB 4 * Copyright (C) 2008-2009 ST-Ericsson AB
@@ -42,6 +40,7 @@
42#include <linux/dmaengine.h> 40#include <linux/dmaengine.h>
43#include <linux/dma-mapping.h> 41#include <linux/dma-mapping.h>
44#include <linux/scatterlist.h> 42#include <linux/scatterlist.h>
43#include <linux/pm_runtime.h>
45 44
46/* 45/*
47 * This macro is used to define some register default values. 46 * This macro is used to define some register default values.
@@ -383,6 +382,8 @@ struct pl022 {
383 enum ssp_reading read; 382 enum ssp_reading read;
384 enum ssp_writing write; 383 enum ssp_writing write;
385 u32 exp_fifo_level; 384 u32 exp_fifo_level;
385 enum ssp_rx_level_trig rx_lev_trig;
386 enum ssp_tx_level_trig tx_lev_trig;
386 /* DMA settings */ 387 /* DMA settings */
387#ifdef CONFIG_DMA_ENGINE 388#ifdef CONFIG_DMA_ENGINE
388 struct dma_chan *dma_rx_channel; 389 struct dma_chan *dma_rx_channel;
@@ -517,6 +518,7 @@ static void giveback(struct pl022 *pl022)
517 clk_disable(pl022->clk); 518 clk_disable(pl022->clk);
518 amba_pclk_disable(pl022->adev); 519 amba_pclk_disable(pl022->adev);
519 amba_vcore_disable(pl022->adev); 520 amba_vcore_disable(pl022->adev);
521 pm_runtime_put(&pl022->adev->dev);
520} 522}
521 523
522/** 524/**
@@ -909,12 +911,10 @@ static int configure_dma(struct pl022 *pl022)
909 struct dma_slave_config rx_conf = { 911 struct dma_slave_config rx_conf = {
910 .src_addr = SSP_DR(pl022->phybase), 912 .src_addr = SSP_DR(pl022->phybase),
911 .direction = DMA_FROM_DEVICE, 913 .direction = DMA_FROM_DEVICE,
912 .src_maxburst = pl022->vendor->fifodepth >> 1,
913 }; 914 };
914 struct dma_slave_config tx_conf = { 915 struct dma_slave_config tx_conf = {
915 .dst_addr = SSP_DR(pl022->phybase), 916 .dst_addr = SSP_DR(pl022->phybase),
916 .direction = DMA_TO_DEVICE, 917 .direction = DMA_TO_DEVICE,
917 .dst_maxburst = pl022->vendor->fifodepth >> 1,
918 }; 918 };
919 unsigned int pages; 919 unsigned int pages;
920 int ret; 920 int ret;
@@ -928,6 +928,54 @@ static int configure_dma(struct pl022 *pl022)
928 if (!rxchan || !txchan) 928 if (!rxchan || !txchan)
929 return -ENODEV; 929 return -ENODEV;
930 930
931 /*
932 * If supplied, the DMA burstsize should equal the FIFO trigger level.
933 * Notice that the DMA engine uses one-to-one mapping. Since we can
934 * not trigger on 2 elements this needs explicit mapping rather than
935 * calculation.
936 */
937 switch (pl022->rx_lev_trig) {
938 case SSP_RX_1_OR_MORE_ELEM:
939 rx_conf.src_maxburst = 1;
940 break;
941 case SSP_RX_4_OR_MORE_ELEM:
942 rx_conf.src_maxburst = 4;
943 break;
944 case SSP_RX_8_OR_MORE_ELEM:
945 rx_conf.src_maxburst = 8;
946 break;
947 case SSP_RX_16_OR_MORE_ELEM:
948 rx_conf.src_maxburst = 16;
949 break;
950 case SSP_RX_32_OR_MORE_ELEM:
951 rx_conf.src_maxburst = 32;
952 break;
953 default:
954 rx_conf.src_maxburst = pl022->vendor->fifodepth >> 1;
955 break;
956 }
957
958 switch (pl022->tx_lev_trig) {
959 case SSP_TX_1_OR_MORE_EMPTY_LOC:
960 tx_conf.dst_maxburst = 1;
961 break;
962 case SSP_TX_4_OR_MORE_EMPTY_LOC:
963 tx_conf.dst_maxburst = 4;
964 break;
965 case SSP_TX_8_OR_MORE_EMPTY_LOC:
966 tx_conf.dst_maxburst = 8;
967 break;
968 case SSP_TX_16_OR_MORE_EMPTY_LOC:
969 tx_conf.dst_maxburst = 16;
970 break;
971 case SSP_TX_32_OR_MORE_EMPTY_LOC:
972 tx_conf.dst_maxburst = 32;
973 break;
974 default:
975 tx_conf.dst_maxburst = pl022->vendor->fifodepth >> 1;
976 break;
977 }
978
931 switch (pl022->read) { 979 switch (pl022->read) {
932 case READING_NULL: 980 case READING_NULL:
933 /* Use the same as for writing */ 981 /* Use the same as for writing */
@@ -1496,6 +1544,7 @@ static void pump_messages(struct work_struct *work)
1496 * and core will be disabled when giveback() is called in each method 1544 * and core will be disabled when giveback() is called in each method
1497 * (poll/interrupt/DMA) 1545 * (poll/interrupt/DMA)
1498 */ 1546 */
1547 pm_runtime_get_sync(&pl022->adev->dev);
1499 amba_vcore_enable(pl022->adev); 1548 amba_vcore_enable(pl022->adev);
1500 amba_pclk_enable(pl022->adev); 1549 amba_pclk_enable(pl022->adev);
1501 clk_enable(pl022->clk); 1550 clk_enable(pl022->clk);
@@ -1629,17 +1678,57 @@ static int verify_controller_parameters(struct pl022 *pl022,
1629 "Communication mode is configured incorrectly\n"); 1678 "Communication mode is configured incorrectly\n");
1630 return -EINVAL; 1679 return -EINVAL;
1631 } 1680 }
1632 if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) 1681 switch (chip_info->rx_lev_trig) {
1633 || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { 1682 case SSP_RX_1_OR_MORE_ELEM:
1683 case SSP_RX_4_OR_MORE_ELEM:
1684 case SSP_RX_8_OR_MORE_ELEM:
1685 /* These are always OK, all variants can handle this */
1686 break;
1687 case SSP_RX_16_OR_MORE_ELEM:
1688 if (pl022->vendor->fifodepth < 16) {
1689 dev_err(&pl022->adev->dev,
1690 "RX FIFO Trigger Level is configured incorrectly\n");
1691 return -EINVAL;
1692 }
1693 break;
1694 case SSP_RX_32_OR_MORE_ELEM:
1695 if (pl022->vendor->fifodepth < 32) {
1696 dev_err(&pl022->adev->dev,
1697 "RX FIFO Trigger Level is configured incorrectly\n");
1698 return -EINVAL;
1699 }
1700 break;
1701 default:
1634 dev_err(&pl022->adev->dev, 1702 dev_err(&pl022->adev->dev,
1635 "RX FIFO Trigger Level is configured incorrectly\n"); 1703 "RX FIFO Trigger Level is configured incorrectly\n");
1636 return -EINVAL; 1704 return -EINVAL;
1705 break;
1637 } 1706 }
1638 if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) 1707 switch (chip_info->tx_lev_trig) {
1639 || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { 1708 case SSP_TX_1_OR_MORE_EMPTY_LOC:
1709 case SSP_TX_4_OR_MORE_EMPTY_LOC:
1710 case SSP_TX_8_OR_MORE_EMPTY_LOC:
1711 /* These are always OK, all variants can handle this */
1712 break;
1713 case SSP_TX_16_OR_MORE_EMPTY_LOC:
1714 if (pl022->vendor->fifodepth < 16) {
1715 dev_err(&pl022->adev->dev,
1716 "TX FIFO Trigger Level is configured incorrectly\n");
1717 return -EINVAL;
1718 }
1719 break;
1720 case SSP_TX_32_OR_MORE_EMPTY_LOC:
1721 if (pl022->vendor->fifodepth < 32) {
1722 dev_err(&pl022->adev->dev,
1723 "TX FIFO Trigger Level is configured incorrectly\n");
1724 return -EINVAL;
1725 }
1726 break;
1727 default:
1640 dev_err(&pl022->adev->dev, 1728 dev_err(&pl022->adev->dev,
1641 "TX FIFO Trigger Level is configured incorrectly\n"); 1729 "TX FIFO Trigger Level is configured incorrectly\n");
1642 return -EINVAL; 1730 return -EINVAL;
1731 break;
1643 } 1732 }
1644 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { 1733 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
1645 if ((chip_info->ctrl_len < SSP_BITS_4) 1734 if ((chip_info->ctrl_len < SSP_BITS_4)
@@ -1874,6 +1963,9 @@ static int pl022_setup(struct spi_device *spi)
1874 goto err_config_params; 1963 goto err_config_params;
1875 } 1964 }
1876 1965
1966 pl022->rx_lev_trig = chip_info->rx_lev_trig;
1967 pl022->tx_lev_trig = chip_info->tx_lev_trig;
1968
1877 /* Now set controller state based on controller data */ 1969 /* Now set controller state based on controller data */
1878 chip->xfer_type = chip_info->com_mode; 1970 chip->xfer_type = chip_info->com_mode;
1879 if (!chip_info->cs_control) { 1971 if (!chip_info->cs_control) {
@@ -2094,6 +2186,8 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2094 } 2186 }
2095 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", 2187 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
2096 adev->res.start, pl022->virtbase); 2188 adev->res.start, pl022->virtbase);
2189 pm_runtime_enable(dev);
2190 pm_runtime_resume(dev);
2097 2191
2098 pl022->clk = clk_get(&adev->dev, NULL); 2192 pl022->clk = clk_get(&adev->dev, NULL);
2099 if (IS_ERR(pl022->clk)) { 2193 if (IS_ERR(pl022->clk)) {
@@ -2155,6 +2249,7 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2155 destroy_queue(pl022); 2249 destroy_queue(pl022);
2156 pl022_dma_remove(pl022); 2250 pl022_dma_remove(pl022);
2157 free_irq(adev->irq[0], pl022); 2251 free_irq(adev->irq[0], pl022);
2252 pm_runtime_disable(&adev->dev);
2158 err_no_irq: 2253 err_no_irq:
2159 clk_put(pl022->clk); 2254 clk_put(pl022->clk);
2160 err_no_clk: 2255 err_no_clk:
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 2a298c029194..b267fd901e54 100644
--- a/drivers/spi/spi_ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -502,7 +502,7 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op)
502 goto free_gpios; 502 goto free_gpios;
503 } 503 }
504 hw->mapbase = resource.start; 504 hw->mapbase = resource.start;
505 hw->mapsize = resource.end - resource.start + 1; 505 hw->mapsize = resource_size(&resource);
506 506
507 /* Sanity check */ 507 /* Sanity check */
508 if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) { 508 if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) {
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 378e504f89eb..378e504f89eb 100644
--- a/drivers/spi/pxa2xx_spi_pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/spi-pxa2xx.c
index dc25bee8d33f..dc25bee8d33f 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/spi-pxa2xx.c
diff --git a/drivers/spi/spi_s3c24xx_fiq.S b/drivers/spi/spi-s3c24xx-fiq.S
index 3793cae361db..059f2dc1fda2 100644
--- a/drivers/spi/spi_s3c24xx_fiq.S
+++ b/drivers/spi/spi-s3c24xx-fiq.S
@@ -17,7 +17,7 @@
17#include <mach/regs-irq.h> 17#include <mach/regs-irq.h>
18#include <plat/regs-spi.h> 18#include <plat/regs-spi.h>
19 19
20#include "spi_s3c24xx_fiq.h" 20#include "spi-s3c24xx-fiq.h"
21 21
22 .text 22 .text
23 23
diff --git a/drivers/spi/spi_s3c24xx_fiq.h b/drivers/spi/spi-s3c24xx-fiq.h
index a5950bb25b51..a5950bb25b51 100644
--- a/drivers/spi/spi_s3c24xx_fiq.h
+++ b/drivers/spi/spi-s3c24xx-fiq.h
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index 1a5fcabfd565..1996ac57ef91 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -1,5 +1,4 @@
1/* linux/drivers/spi/spi_s3c24xx.c 1/*
2 *
3 * Copyright (c) 2006 Ben Dooks 2 * Copyright (c) 2006 Ben Dooks
4 * Copyright 2006-2009 Simtec Electronics 3 * Copyright 2006-2009 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk> 4 * Ben Dooks <ben@simtec.co.uk>
@@ -32,7 +31,7 @@
32#include <plat/fiq.h> 31#include <plat/fiq.h>
33#include <asm/fiq.h> 32#include <asm/fiq.h>
34 33
35#include "spi_s3c24xx_fiq.h" 34#include "spi-s3c24xx-fiq.h"
36 35
37/** 36/**
38 * s3c24xx_spi_devstate - per device data 37 * s3c24xx_spi_devstate - per device data
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 8945e201e42e..595dacc7645f 100644
--- a/drivers/spi/spi_s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1,5 +1,4 @@
1/* linux/drivers/spi/spi_s3c64xx.c 1/*
2 *
3 * Copyright (C) 2009 Samsung Electronics Ltd. 2 * Copyright (C) 2009 Samsung Electronics Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com> 3 * Jaswinder Singh <jassi.brar@samsung.com>
5 * 4 *
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi-sh-msiof.c
index e00d94b22250..e00d94b22250 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi-sh-sci.c
index 5c6439161199..e7779c09f6ef 100644
--- a/drivers/spi/spi_sh_sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -78,7 +78,7 @@ static inline u32 getmiso(struct spi_device *dev)
78 78
79#define spidelay(x) ndelay(x) 79#define spidelay(x) ndelay(x)
80 80
81#include "spi_bitbang_txrx.h" 81#include "spi-bitbang-txrx.h"
82 82
83static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi, 83static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi,
84 unsigned nsecs, u32 word, u8 bits) 84 unsigned nsecs, u32 word, u8 bits)
diff --git a/drivers/spi/spi_sh.c b/drivers/spi/spi-sh.c
index 9eedd71ad898..9eedd71ad898 100644
--- a/drivers/spi/spi_sh.c
+++ b/drivers/spi/spi-sh.c
diff --git a/drivers/spi/spi_stmp.c b/drivers/spi/spi-stmp.c
index fadff76eb7e0..fadff76eb7e0 100644
--- a/drivers/spi/spi_stmp.c
+++ b/drivers/spi/spi-stmp.c
diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi-tegra.c
index 6c3aa6ecaade..a5a6302dc8e0 100644
--- a/drivers/spi/spi_tegra.c
+++ b/drivers/spi/spi-tegra.c
@@ -498,14 +498,14 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
498 goto err0; 498 goto err0;
499 } 499 }
500 500
501 if (!request_mem_region(r->start, (r->end - r->start) + 1, 501 if (!request_mem_region(r->start, resource_size(r),
502 dev_name(&pdev->dev))) { 502 dev_name(&pdev->dev))) {
503 ret = -EBUSY; 503 ret = -EBUSY;
504 goto err0; 504 goto err0;
505 } 505 }
506 506
507 tspi->phys = r->start; 507 tspi->phys = r->start;
508 tspi->base = ioremap(r->start, r->end - r->start + 1); 508 tspi->base = ioremap(r->start, resource_size(r));
509 if (!tspi->base) { 509 if (!tspi->base) {
510 dev_err(&pdev->dev, "can't ioremap iomem\n"); 510 dev_err(&pdev->dev, "can't ioremap iomem\n");
511 ret = -ENOMEM; 511 ret = -ENOMEM;
@@ -546,6 +546,7 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
546 tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id]; 546 tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
547 tspi->rx_dma_req.dev = tspi; 547 tspi->rx_dma_req.dev = tspi;
548 548
549 master->dev.of_node = pdev->dev.of_node;
549 ret = spi_register_master(master); 550 ret = spi_register_master(master);
550 551
551 if (ret < 0) 552 if (ret < 0)
@@ -563,7 +564,7 @@ err3:
563err2: 564err2:
564 iounmap(tspi->base); 565 iounmap(tspi->base);
565err1: 566err1:
566 release_mem_region(r->start, (r->end - r->start) + 1); 567 release_mem_region(r->start, resource_size(r));
567err0: 568err0:
568 spi_master_put(master); 569 spi_master_put(master);
569 return ret; 570 return ret;
@@ -588,17 +589,28 @@ static int __devexit spi_tegra_remove(struct platform_device *pdev)
588 iounmap(tspi->base); 589 iounmap(tspi->base);
589 590
590 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 591 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
591 release_mem_region(r->start, (r->end - r->start) + 1); 592 release_mem_region(r->start, resource_size(r));
592 593
593 return 0; 594 return 0;
594} 595}
595 596
596MODULE_ALIAS("platform:spi_tegra"); 597MODULE_ALIAS("platform:spi_tegra");
597 598
599#ifdef CONFIG_OF
600static struct of_device_id spi_tegra_of_match_table[] __devinitdata = {
601 { .compatible = "nvidia,tegra20-spi", },
602 {}
603};
604MODULE_DEVICE_TABLE(of, spi_tegra_of_match_table);
605#else /* CONFIG_OF */
606#define spi_tegra_of_match_table NULL
607#endif /* CONFIG_OF */
608
598static struct platform_driver spi_tegra_driver = { 609static struct platform_driver spi_tegra_driver = {
599 .driver = { 610 .driver = {
600 .name = "spi_tegra", 611 .name = "spi_tegra",
601 .owner = THIS_MODULE, 612 .owner = THIS_MODULE,
613 .of_match_table = spi_tegra_of_match_table,
602 }, 614 },
603 .remove = __devexit_p(spi_tegra_remove), 615 .remove = __devexit_p(spi_tegra_remove),
604}; 616};
diff --git a/drivers/spi/ti-ssp-spi.c b/drivers/spi/spi-ti-ssp.c
index ee22795c7973..ee22795c7973 100644
--- a/drivers/spi/ti-ssp-spi.c
+++ b/drivers/spi/spi-ti-ssp.c
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/spi-tle62x0.c
index 32a40876532f..940e73d1cf09 100644
--- a/drivers/spi/tle62x0.c
+++ b/drivers/spi/spi-tle62x0.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * tle62x0.c -- support Infineon TLE62x0 driver chips 2 * Support Infineon TLE62x0 driver chips
3 * 3 *
4 * Copyright (c) 2007 Simtec Electronics 4 * Copyright (c) 2007 Simtec Electronics
5 * Ben Dooks, <ben@simtec.co.uk> 5 * Ben Dooks, <ben@simtec.co.uk>
diff --git a/drivers/spi/spi_topcliff_pch.c b/drivers/spi/spi-topcliff-pch.c
index 79e48d451137..1d23f3831866 100644
--- a/drivers/spi/spi_topcliff_pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -26,6 +26,10 @@
26#include <linux/spi/spidev.h> 26#include <linux/spi/spidev.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/device.h> 28#include <linux/device.h>
29#include <linux/platform_device.h>
30
31#include <linux/dmaengine.h>
32#include <linux/pch_dma.h>
29 33
30/* Register offsets */ 34/* Register offsets */
31#define PCH_SPCR 0x00 /* SPI control register */ 35#define PCH_SPCR 0x00 /* SPI control register */
@@ -35,6 +39,7 @@
35#define PCH_SPDRR 0x10 /* SPI read data register */ 39#define PCH_SPDRR 0x10 /* SPI read data register */
36#define PCH_SSNXCR 0x18 /* SSN Expand Control Register */ 40#define PCH_SSNXCR 0x18 /* SSN Expand Control Register */
37#define PCH_SRST 0x1C /* SPI reset register */ 41#define PCH_SRST 0x1C /* SPI reset register */
42#define PCH_ADDRESS_SIZE 0x20
38 43
39#define PCH_SPSR_TFD 0x000007C0 44#define PCH_SPSR_TFD 0x000007C0
40#define PCH_SPSR_RFD 0x0000F800 45#define PCH_SPSR_RFD 0x0000F800
@@ -52,8 +57,6 @@
52#define STATUS_EXITING 2 57#define STATUS_EXITING 2
53#define PCH_SLEEP_TIME 10 58#define PCH_SLEEP_TIME 10
54 59
55#define PCH_ADDRESS_SIZE 0x20
56
57#define SSN_LOW 0x02U 60#define SSN_LOW 0x02U
58#define SSN_NO_CONTROL 0x00U 61#define SSN_NO_CONTROL 0x00U
59#define PCH_MAX_CS 0xFF 62#define PCH_MAX_CS 0xFF
@@ -73,22 +76,57 @@
73#define SPSR_TFI_BIT (1 << 0) 76#define SPSR_TFI_BIT (1 << 0)
74#define SPSR_RFI_BIT (1 << 1) 77#define SPSR_RFI_BIT (1 << 1)
75#define SPSR_FI_BIT (1 << 2) 78#define SPSR_FI_BIT (1 << 2)
79#define SPSR_ORF_BIT (1 << 3)
76#define SPBRR_SIZE_BIT (1 << 10) 80#define SPBRR_SIZE_BIT (1 << 10)
77 81
78#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|SPCR_ORIE_BIT|SPCR_MDFIE_BIT) 82#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\
83 SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
79 84
80#define SPCR_RFIC_FIELD 20 85#define SPCR_RFIC_FIELD 20
81#define SPCR_TFIC_FIELD 16 86#define SPCR_TFIC_FIELD 16
82 87
83#define SPSR_INT_BITS 0x1F 88#define MASK_SPBRR_SPBR_BITS ((1 << 10) - 1)
84#define MASK_SPBRR_SPBR_BITS (~((1 << 10) - 1)) 89#define MASK_RFIC_SPCR_BITS (0xf << SPCR_RFIC_FIELD)
85#define MASK_RFIC_SPCR_BITS (~(0xf << 20)) 90#define MASK_TFIC_SPCR_BITS (0xf << SPCR_TFIC_FIELD)
86#define MASK_TFIC_SPCR_BITS (~(0xf000f << 12))
87 91
88#define PCH_CLOCK_HZ 50000000 92#define PCH_CLOCK_HZ 50000000
89#define PCH_MAX_SPBR 1023 93#define PCH_MAX_SPBR 1023
90 94
95/* Definition for ML7213 by OKI SEMICONDUCTOR */
96#define PCI_VENDOR_ID_ROHM 0x10DB
97#define PCI_DEVICE_ID_ML7213_SPI 0x802c
98#define PCI_DEVICE_ID_ML7223_SPI 0x800F
91 99
100/*
101 * Set the number of SPI instance max
102 * Intel EG20T PCH : 1ch
103 * OKI SEMICONDUCTOR ML7213 IOH : 2ch
104 * OKI SEMICONDUCTOR ML7223 IOH : 1ch
105*/
106#define PCH_SPI_MAX_DEV 2
107
108#define PCH_BUF_SIZE 4096
109#define PCH_DMA_TRANS_SIZE 12
110
111static int use_dma = 1;
112
113struct pch_spi_dma_ctrl {
114 struct dma_async_tx_descriptor *desc_tx;
115 struct dma_async_tx_descriptor *desc_rx;
116 struct pch_dma_slave param_tx;
117 struct pch_dma_slave param_rx;
118 struct dma_chan *chan_tx;
119 struct dma_chan *chan_rx;
120 struct scatterlist *sg_tx_p;
121 struct scatterlist *sg_rx_p;
122 struct scatterlist sg_tx;
123 struct scatterlist sg_rx;
124 int nent;
125 void *tx_buf_virt;
126 void *rx_buf_virt;
127 dma_addr_t tx_buf_dma;
128 dma_addr_t rx_buf_dma;
129};
92/** 130/**
93 * struct pch_spi_data - Holds the SPI channel specific details 131 * struct pch_spi_data - Holds the SPI channel specific details
94 * @io_remap_addr: The remapped PCI base address 132 * @io_remap_addr: The remapped PCI base address
@@ -121,9 +159,13 @@
121 * @cur_trans: The current transfer that this SPI driver is 159 * @cur_trans: The current transfer that this SPI driver is
122 * handling 160 * handling
123 * @board_dat: Reference to the SPI device data structure 161 * @board_dat: Reference to the SPI device data structure
162 * @plat_dev: platform_device structure
163 * @ch: SPI channel number
164 * @irq_reg_sts: Status of IRQ registration
124 */ 165 */
125struct pch_spi_data { 166struct pch_spi_data {
126 void __iomem *io_remap_addr; 167 void __iomem *io_remap_addr;
168 unsigned long io_base_addr;
127 struct spi_master *master; 169 struct spi_master *master;
128 struct work_struct work; 170 struct work_struct work;
129 struct workqueue_struct *wk; 171 struct workqueue_struct *wk;
@@ -144,27 +186,36 @@ struct pch_spi_data {
144 struct spi_message *current_msg; 186 struct spi_message *current_msg;
145 struct spi_transfer *cur_trans; 187 struct spi_transfer *cur_trans;
146 struct pch_spi_board_data *board_dat; 188 struct pch_spi_board_data *board_dat;
189 struct platform_device *plat_dev;
190 int ch;
191 struct pch_spi_dma_ctrl dma;
192 int use_dma;
193 u8 irq_reg_sts;
147}; 194};
148 195
149/** 196/**
150 * struct pch_spi_board_data - Holds the SPI device specific details 197 * struct pch_spi_board_data - Holds the SPI device specific details
151 * @pdev: Pointer to the PCI device 198 * @pdev: Pointer to the PCI device
152 * @irq_reg_sts: Status of IRQ registration
153 * @pci_req_sts: Status of pci_request_regions
154 * @suspend_sts: Status of suspend 199 * @suspend_sts: Status of suspend
155 * @data: Pointer to SPI channel data structure 200 * @num: The number of SPI device instance
156 */ 201 */
157struct pch_spi_board_data { 202struct pch_spi_board_data {
158 struct pci_dev *pdev; 203 struct pci_dev *pdev;
159 u8 irq_reg_sts;
160 u8 pci_req_sts;
161 u8 suspend_sts; 204 u8 suspend_sts;
162 struct pch_spi_data *data; 205 int num;
206};
207
208struct pch_pd_dev_save {
209 int num;
210 struct platform_device *pd_save[PCH_SPI_MAX_DEV];
211 struct pch_spi_board_data *board_dat;
163}; 212};
164 213
165static struct pci_device_id pch_spi_pcidev_id[] = { 214static struct pci_device_id pch_spi_pcidev_id[] = {
166 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_GE_SPI)}, 215 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
167 {0,} 216 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
217 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
218 { }
168}; 219};
169 220
170/** 221/**
@@ -251,10 +302,10 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
251 reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */ 302 reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */
252 303
253 /* reset rx threshold */ 304 /* reset rx threshold */
254 reg_spcr_val &= MASK_RFIC_SPCR_BITS; 305 reg_spcr_val &= ~MASK_RFIC_SPCR_BITS;
255 reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD); 306 reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
256 iowrite32(((reg_spcr_val) &= (~(SPCR_RFIE_BIT))), 307
257 (io_remap_addr + PCH_SPCR)); 308 iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR));
258 } 309 }
259 310
260 /* update counts */ 311 /* update counts */
@@ -265,12 +316,15 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
265 316
266 /* if transfer complete interrupt */ 317 /* if transfer complete interrupt */
267 if (reg_spsr_val & SPSR_FI_BIT) { 318 if (reg_spsr_val & SPSR_FI_BIT) {
268 /* disable FI & RFI interrupts */ 319 if (tx_index < bpw_len)
269 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, 320 dev_err(&data->master->dev,
270 SPCR_FIE_BIT | SPCR_RFIE_BIT); 321 "%s : Transfer is not completed", __func__);
322 /* disable interrupts */
323 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
271 324
272 /* transfer is completed;inform pch_spi_process_messages */ 325 /* transfer is completed;inform pch_spi_process_messages */
273 data->transfer_complete = true; 326 data->transfer_complete = true;
327 data->transfer_active = false;
274 wake_up(&data->wait); 328 wake_up(&data->wait);
275 } 329 }
276} 330}
@@ -283,24 +337,28 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
283static irqreturn_t pch_spi_handler(int irq, void *dev_id) 337static irqreturn_t pch_spi_handler(int irq, void *dev_id)
284{ 338{
285 u32 reg_spsr_val; 339 u32 reg_spsr_val;
286 struct pch_spi_data *data;
287 void __iomem *spsr; 340 void __iomem *spsr;
288 void __iomem *io_remap_addr; 341 void __iomem *io_remap_addr;
289 irqreturn_t ret = IRQ_NONE; 342 irqreturn_t ret = IRQ_NONE;
290 struct pch_spi_board_data *board_dat = dev_id; 343 struct pch_spi_data *data = dev_id;
344 struct pch_spi_board_data *board_dat = data->board_dat;
291 345
292 if (board_dat->suspend_sts) { 346 if (board_dat->suspend_sts) {
293 dev_dbg(&board_dat->pdev->dev, 347 dev_dbg(&board_dat->pdev->dev,
294 "%s returning due to suspend\n", __func__); 348 "%s returning due to suspend\n", __func__);
295 return IRQ_NONE; 349 return IRQ_NONE;
296 } 350 }
351 if (data->use_dma)
352 return IRQ_NONE;
297 353
298 data = board_dat->data;
299 io_remap_addr = data->io_remap_addr; 354 io_remap_addr = data->io_remap_addr;
300 spsr = io_remap_addr + PCH_SPSR; 355 spsr = io_remap_addr + PCH_SPSR;
301 356
302 reg_spsr_val = ioread32(spsr); 357 reg_spsr_val = ioread32(spsr);
303 358
359 if (reg_spsr_val & SPSR_ORF_BIT)
360 dev_err(&board_dat->pdev->dev, "%s Over run error", __func__);
361
304 /* Check if the interrupt is for SPI device */ 362 /* Check if the interrupt is for SPI device */
305 if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { 363 if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
306 pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr); 364 pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
@@ -326,7 +384,7 @@ static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
326 if (n_spbr > PCH_MAX_SPBR) 384 if (n_spbr > PCH_MAX_SPBR)
327 n_spbr = PCH_MAX_SPBR; 385 n_spbr = PCH_MAX_SPBR;
328 386
329 pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, ~MASK_SPBRR_SPBR_BITS); 387 pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
330} 388}
331 389
332/** 390/**
@@ -435,26 +493,27 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
435 dev_dbg(&pspi->dev, "%s Transfer List not empty. " 493 dev_dbg(&pspi->dev, "%s Transfer List not empty. "
436 "Transfer Speed is set.\n", __func__); 494 "Transfer Speed is set.\n", __func__);
437 495
496 spin_lock_irqsave(&data->lock, flags);
438 /* validate Tx/Rx buffers and Transfer length */ 497 /* validate Tx/Rx buffers and Transfer length */
439 list_for_each_entry(transfer, &pmsg->transfers, transfer_list) { 498 list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
440 if (!transfer->tx_buf && !transfer->rx_buf) { 499 if (!transfer->tx_buf && !transfer->rx_buf) {
441 dev_err(&pspi->dev, 500 dev_err(&pspi->dev,
442 "%s Tx and Rx buffer NULL\n", __func__); 501 "%s Tx and Rx buffer NULL\n", __func__);
443 retval = -EINVAL; 502 retval = -EINVAL;
444 goto err_out; 503 goto err_return_spinlock;
445 } 504 }
446 505
447 if (!transfer->len) { 506 if (!transfer->len) {
448 dev_err(&pspi->dev, "%s Transfer length invalid\n", 507 dev_err(&pspi->dev, "%s Transfer length invalid\n",
449 __func__); 508 __func__);
450 retval = -EINVAL; 509 retval = -EINVAL;
451 goto err_out; 510 goto err_return_spinlock;
452 } 511 }
453 512
454 dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length" 513 dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length"
455 " valid\n", __func__); 514 " valid\n", __func__);
456 515
457 /* if baud rate hs been specified validate the same */ 516 /* if baud rate has been specified validate the same */
458 if (transfer->speed_hz > PCH_MAX_BAUDRATE) 517 if (transfer->speed_hz > PCH_MAX_BAUDRATE)
459 transfer->speed_hz = PCH_MAX_BAUDRATE; 518 transfer->speed_hz = PCH_MAX_BAUDRATE;
460 519
@@ -465,25 +524,24 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
465 retval = -EINVAL; 524 retval = -EINVAL;
466 dev_err(&pspi->dev, 525 dev_err(&pspi->dev,
467 "%s Invalid bits per word\n", __func__); 526 "%s Invalid bits per word\n", __func__);
468 goto err_out; 527 goto err_return_spinlock;
469 } 528 }
470 } 529 }
471 } 530 }
472 531 spin_unlock_irqrestore(&data->lock, flags);
473 spin_lock_irqsave(&data->lock, flags);
474 532
475 /* We won't process any messages if we have been asked to terminate */ 533 /* We won't process any messages if we have been asked to terminate */
476 if (data->status == STATUS_EXITING) { 534 if (data->status == STATUS_EXITING) {
477 dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__); 535 dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
478 retval = -ESHUTDOWN; 536 retval = -ESHUTDOWN;
479 goto err_return_spinlock; 537 goto err_out;
480 } 538 }
481 539
482 /* If suspended ,return -EINVAL */ 540 /* If suspended ,return -EINVAL */
483 if (data->board_dat->suspend_sts) { 541 if (data->board_dat->suspend_sts) {
484 dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__); 542 dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
485 retval = -EINVAL; 543 retval = -EINVAL;
486 goto err_return_spinlock; 544 goto err_out;
487 } 545 }
488 546
489 /* set status of message */ 547 /* set status of message */
@@ -491,9 +549,11 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
491 dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status); 549 dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
492 550
493 pmsg->status = -EINPROGRESS; 551 pmsg->status = -EINPROGRESS;
494 552 spin_lock_irqsave(&data->lock, flags);
495 /* add message to queue */ 553 /* add message to queue */
496 list_add_tail(&pmsg->queue, &data->queue); 554 list_add_tail(&pmsg->queue, &data->queue);
555 spin_unlock_irqrestore(&data->lock, flags);
556
497 dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__); 557 dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
498 558
499 /* schedule work queue to run */ 559 /* schedule work queue to run */
@@ -502,11 +562,13 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
502 562
503 retval = 0; 563 retval = 0;
504 564
505err_return_spinlock:
506 spin_unlock_irqrestore(&data->lock, flags);
507err_out: 565err_out:
508 dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval); 566 dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
509 return retval; 567 return retval;
568err_return_spinlock:
569 dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
570 spin_unlock_irqrestore(&data->lock, flags);
571 return retval;
510} 572}
511 573
512static inline void pch_spi_select_chip(struct pch_spi_data *data, 574static inline void pch_spi_select_chip(struct pch_spi_data *data,
@@ -527,8 +589,7 @@ static inline void pch_spi_select_chip(struct pch_spi_data *data,
527 pch_spi_setup_transfer(pspi); 589 pch_spi_setup_transfer(pspi);
528} 590}
529 591
530static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw, 592static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
531 struct spi_message **ppmsg)
532{ 593{
533 int size; 594 int size;
534 u32 n_writes; 595 u32 n_writes;
@@ -537,8 +598,6 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw,
537 const u8 *tx_buf; 598 const u8 *tx_buf;
538 const u16 *tx_sbuf; 599 const u16 *tx_sbuf;
539 600
540 pmsg = *ppmsg;
541
542 /* set baud rate if needed */ 601 /* set baud rate if needed */
543 if (data->cur_trans->speed_hz) { 602 if (data->cur_trans->speed_hz) {
544 dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__); 603 dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
@@ -621,10 +680,9 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw,
621 data->transfer_active = true; 680 data->transfer_active = true;
622} 681}
623 682
624 683static void pch_spi_nomore_transfer(struct pch_spi_data *data)
625static void pch_spi_nomore_transfer(struct pch_spi_data *data,
626 struct spi_message *pmsg)
627{ 684{
685 struct spi_message *pmsg;
628 dev_dbg(&data->master->dev, "%s called\n", __func__); 686 dev_dbg(&data->master->dev, "%s called\n", __func__);
629 /* Invoke complete callback 687 /* Invoke complete callback
630 * [To the spi core..indicating end of transfer] */ 688 * [To the spi core..indicating end of transfer] */
@@ -675,29 +733,21 @@ static void pch_spi_nomore_transfer(struct pch_spi_data *data,
675 733
676static void pch_spi_set_ir(struct pch_spi_data *data) 734static void pch_spi_set_ir(struct pch_spi_data *data)
677{ 735{
678 /* enable interrupts */ 736 /* enable interrupts, set threshold, enable SPI */
679 if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) { 737 if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
680 /* set receive threshold to PCH_RX_THOLD */ 738 /* set receive threshold to PCH_RX_THOLD */
681 pch_spi_setclr_reg(data->master, PCH_SPCR, 739 pch_spi_setclr_reg(data->master, PCH_SPCR,
682 PCH_RX_THOLD << SPCR_RFIC_FIELD, 740 PCH_RX_THOLD << SPCR_RFIC_FIELD |
683 ~MASK_RFIC_SPCR_BITS); 741 SPCR_FIE_BIT | SPCR_RFIE_BIT |
684 /* enable FI and RFI interrupts */ 742 SPCR_ORIE_BIT | SPCR_SPE_BIT,
685 pch_spi_setclr_reg(data->master, PCH_SPCR, 743 MASK_RFIC_SPCR_BITS | PCH_ALL);
686 SPCR_RFIE_BIT | SPCR_FIE_BIT, 0); 744 else
687 } else {
688 /* set receive threshold to maximum */ 745 /* set receive threshold to maximum */
689 pch_spi_setclr_reg(data->master, PCH_SPCR, 746 pch_spi_setclr_reg(data->master, PCH_SPCR,
690 PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD, 747 PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
691 ~MASK_TFIC_SPCR_BITS); 748 SPCR_FIE_BIT | SPCR_ORIE_BIT |
692 /* enable FI interrupt */ 749 SPCR_SPE_BIT,
693 pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_FIE_BIT, 0); 750 MASK_RFIC_SPCR_BITS | PCH_ALL);
694 }
695
696 dev_dbg(&data->master->dev,
697 "%s:invoking pch_spi_set_enable to enable SPI\n", __func__);
698
699 /* SPI set enable */
700 pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, SPCR_SPE_BIT, 0);
701 751
702 /* Wait until the transfer completes; go to sleep after 752 /* Wait until the transfer completes; go to sleep after
703 initiating the transfer. */ 753 initiating the transfer. */
@@ -710,15 +760,13 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
710 dev_dbg(&data->master->dev, 760 dev_dbg(&data->master->dev,
711 "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); 761 "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
712 762
713 data->transfer_active = false;
714 dev_dbg(&data->master->dev,
715 "%s set data->transfer_active = false\n", __func__);
716
717 /* clear all interrupts */ 763 /* clear all interrupts */
718 pch_spi_writereg(data->master, PCH_SPSR, 764 pch_spi_writereg(data->master, PCH_SPSR,
719 pch_spi_readreg(data->master, PCH_SPSR)); 765 pch_spi_readreg(data->master, PCH_SPSR));
720 /* disable interrupts */ 766 /* Disable interrupts and SPI transfer */
721 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); 767 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
768 /* clear FIFO */
769 pch_spi_clear_fifo(data->master);
722} 770}
723 771
724static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw) 772static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
@@ -742,6 +790,327 @@ static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
742 } 790 }
743} 791}
744 792
793static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
794{
795 int j;
796 u8 *rx_buf;
797 u16 *rx_sbuf;
798 const u8 *rx_dma_buf;
799 const u16 *rx_dma_sbuf;
800
801 /* copy Rx Data */
802 if (!data->cur_trans->rx_buf)
803 return;
804
805 if (bpw == 8) {
806 rx_buf = data->cur_trans->rx_buf;
807 rx_dma_buf = data->dma.rx_buf_virt;
808 for (j = 0; j < data->bpw_len; j++)
809 *rx_buf++ = *rx_dma_buf++ & 0xFF;
810 } else {
811 rx_sbuf = data->cur_trans->rx_buf;
812 rx_dma_sbuf = data->dma.rx_buf_virt;
813 for (j = 0; j < data->bpw_len; j++)
814 *rx_sbuf++ = *rx_dma_sbuf++;
815 }
816}
817
818static void pch_spi_start_transfer(struct pch_spi_data *data)
819{
820 struct pch_spi_dma_ctrl *dma;
821 unsigned long flags;
822
823 dma = &data->dma;
824
825 spin_lock_irqsave(&data->lock, flags);
826
827 /* disable interrupts, SPI set enable */
828 pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
829
830 spin_unlock_irqrestore(&data->lock, flags);
831
832 /* Wait until the transfer completes; go to sleep after
833 initiating the transfer. */
834 dev_dbg(&data->master->dev,
835 "%s:waiting for transfer to get over\n", __func__);
836 wait_event_interruptible(data->wait, data->transfer_complete);
837
838 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
839 DMA_FROM_DEVICE);
840 async_tx_ack(dma->desc_rx);
841 async_tx_ack(dma->desc_tx);
842 kfree(dma->sg_tx_p);
843 kfree(dma->sg_rx_p);
844
845 spin_lock_irqsave(&data->lock, flags);
846 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
847 dev_dbg(&data->master->dev,
848 "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
849
850 /* clear fifo threshold, disable interrupts, disable SPI transfer */
851 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
852 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
853 SPCR_SPE_BIT);
854 /* clear all interrupts */
855 pch_spi_writereg(data->master, PCH_SPSR,
856 pch_spi_readreg(data->master, PCH_SPSR));
857 /* clear FIFO */
858 pch_spi_clear_fifo(data->master);
859
860 spin_unlock_irqrestore(&data->lock, flags);
861}
862
863static void pch_dma_rx_complete(void *arg)
864{
865 struct pch_spi_data *data = arg;
866
867 /* transfer is completed;inform pch_spi_process_messages_dma */
868 data->transfer_complete = true;
869 wake_up_interruptible(&data->wait);
870}
871
872static bool pch_spi_filter(struct dma_chan *chan, void *slave)
873{
874 struct pch_dma_slave *param = slave;
875
876 if ((chan->chan_id == param->chan_id) &&
877 (param->dma_dev == chan->device->dev)) {
878 chan->private = param;
879 return true;
880 } else {
881 return false;
882 }
883}
884
885static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
886{
887 dma_cap_mask_t mask;
888 struct dma_chan *chan;
889 struct pci_dev *dma_dev;
890 struct pch_dma_slave *param;
891 struct pch_spi_dma_ctrl *dma;
892 unsigned int width;
893
894 if (bpw == 8)
895 width = PCH_DMA_WIDTH_1_BYTE;
896 else
897 width = PCH_DMA_WIDTH_2_BYTES;
898
899 dma = &data->dma;
900 dma_cap_zero(mask);
901 dma_cap_set(DMA_SLAVE, mask);
902
903 /* Get DMA's dev information */
904 dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(12, 0));
905
906 /* Set Tx DMA */
907 param = &dma->param_tx;
908 param->dma_dev = &dma_dev->dev;
909 param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */
910 param->tx_reg = data->io_base_addr + PCH_SPDWR;
911 param->width = width;
912 chan = dma_request_channel(mask, pch_spi_filter, param);
913 if (!chan) {
914 dev_err(&data->master->dev,
915 "ERROR: dma_request_channel FAILS(Tx)\n");
916 data->use_dma = 0;
917 return;
918 }
919 dma->chan_tx = chan;
920
921 /* Set Rx DMA */
922 param = &dma->param_rx;
923 param->dma_dev = &dma_dev->dev;
924 param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */
925 param->rx_reg = data->io_base_addr + PCH_SPDRR;
926 param->width = width;
927 chan = dma_request_channel(mask, pch_spi_filter, param);
928 if (!chan) {
929 dev_err(&data->master->dev,
930 "ERROR: dma_request_channel FAILS(Rx)\n");
931 dma_release_channel(dma->chan_tx);
932 dma->chan_tx = NULL;
933 data->use_dma = 0;
934 return;
935 }
936 dma->chan_rx = chan;
937}
938
939static void pch_spi_release_dma(struct pch_spi_data *data)
940{
941 struct pch_spi_dma_ctrl *dma;
942
943 dma = &data->dma;
944 if (dma->chan_tx) {
945 dma_release_channel(dma->chan_tx);
946 dma->chan_tx = NULL;
947 }
948 if (dma->chan_rx) {
949 dma_release_channel(dma->chan_rx);
950 dma->chan_rx = NULL;
951 }
952 return;
953}
954
955static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
956{
957 const u8 *tx_buf;
958 const u16 *tx_sbuf;
959 u8 *tx_dma_buf;
960 u16 *tx_dma_sbuf;
961 struct scatterlist *sg;
962 struct dma_async_tx_descriptor *desc_tx;
963 struct dma_async_tx_descriptor *desc_rx;
964 int num;
965 int i;
966 int size;
967 int rem;
968 unsigned long flags;
969 struct pch_spi_dma_ctrl *dma;
970
971 dma = &data->dma;
972
973 /* set baud rate if needed */
974 if (data->cur_trans->speed_hz) {
975 dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
976 spin_lock_irqsave(&data->lock, flags);
977 pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
978 spin_unlock_irqrestore(&data->lock, flags);
979 }
980
981 /* set bits per word if needed */
982 if (data->cur_trans->bits_per_word &&
983 (data->current_msg->spi->bits_per_word !=
984 data->cur_trans->bits_per_word)) {
985 dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
986 spin_lock_irqsave(&data->lock, flags);
987 pch_spi_set_bits_per_word(data->master,
988 data->cur_trans->bits_per_word);
989 spin_unlock_irqrestore(&data->lock, flags);
990 *bpw = data->cur_trans->bits_per_word;
991 } else {
992 *bpw = data->current_msg->spi->bits_per_word;
993 }
994 data->bpw_len = data->cur_trans->len / (*bpw / 8);
995
996 /* copy Tx Data */
997 if (data->cur_trans->tx_buf != NULL) {
998 if (*bpw == 8) {
999 tx_buf = data->cur_trans->tx_buf;
1000 tx_dma_buf = dma->tx_buf_virt;
1001 for (i = 0; i < data->bpw_len; i++)
1002 *tx_dma_buf++ = *tx_buf++;
1003 } else {
1004 tx_sbuf = data->cur_trans->tx_buf;
1005 tx_dma_sbuf = dma->tx_buf_virt;
1006 for (i = 0; i < data->bpw_len; i++)
1007 *tx_dma_sbuf++ = *tx_sbuf++;
1008 }
1009 }
1010 if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
1011 num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
1012 size = PCH_DMA_TRANS_SIZE;
1013 rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
1014 } else {
1015 num = 1;
1016 size = data->bpw_len;
1017 rem = data->bpw_len;
1018 }
1019 dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
1020 __func__, num, size, rem);
1021 spin_lock_irqsave(&data->lock, flags);
1022
1023 /* set receive fifo threshold and transmit fifo threshold */
1024 pch_spi_setclr_reg(data->master, PCH_SPCR,
1025 ((size - 1) << SPCR_RFIC_FIELD) |
1026 ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) <<
1027 SPCR_TFIC_FIELD),
1028 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
1029
1030 spin_unlock_irqrestore(&data->lock, flags);
1031
1032 /* RX */
1033 dma->sg_rx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
1034 sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
1035 /* offset, length setting */
1036 sg = dma->sg_rx_p;
1037 for (i = 0; i < num; i++, sg++) {
1038 if (i == 0) {
1039 sg->offset = 0;
1040 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
1041 sg->offset);
1042 sg_dma_len(sg) = rem;
1043 } else {
1044 sg->offset = rem + size * (i - 1);
1045 sg->offset = sg->offset * (*bpw / 8);
1046 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1047 sg->offset);
1048 sg_dma_len(sg) = size;
1049 }
1050 sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
1051 }
1052 sg = dma->sg_rx_p;
1053 desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg,
1054 num, DMA_FROM_DEVICE,
1055 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1056 if (!desc_rx) {
1057 dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
1058 __func__);
1059 return;
1060 }
1061 dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
1062 desc_rx->callback = pch_dma_rx_complete;
1063 desc_rx->callback_param = data;
1064 dma->nent = num;
1065 dma->desc_rx = desc_rx;
1066
1067 /* TX */
1068 dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
1069 sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
1070 /* offset, length setting */
1071 sg = dma->sg_tx_p;
1072 for (i = 0; i < num; i++, sg++) {
1073 if (i == 0) {
1074 sg->offset = 0;
1075 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
1076 sg->offset);
1077 sg_dma_len(sg) = rem;
1078 } else {
1079 sg->offset = rem + size * (i - 1);
1080 sg->offset = sg->offset * (*bpw / 8);
1081 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
1082 sg->offset);
1083 sg_dma_len(sg) = size;
1084 }
1085 sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
1086 }
1087 sg = dma->sg_tx_p;
1088 desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx,
1089 sg, num, DMA_TO_DEVICE,
1090 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1091 if (!desc_tx) {
1092 dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
1093 __func__);
1094 return;
1095 }
1096 dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
1097 desc_tx->callback = NULL;
1098 desc_tx->callback_param = data;
1099 dma->nent = num;
1100 dma->desc_tx = desc_tx;
1101
1102 dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
1103 "0x2 to SSNXCR\n", __func__);
1104
1105 spin_lock_irqsave(&data->lock, flags);
1106 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
1107 desc_rx->tx_submit(desc_rx);
1108 desc_tx->tx_submit(desc_tx);
1109 spin_unlock_irqrestore(&data->lock, flags);
1110
1111 /* reset transfer complete flag */
1112 data->transfer_complete = false;
1113}
745 1114
746static void pch_spi_process_messages(struct work_struct *pwork) 1115static void pch_spi_process_messages(struct work_struct *pwork)
747{ 1116{
@@ -753,13 +1122,10 @@ static void pch_spi_process_messages(struct work_struct *pwork)
753 dev_dbg(&data->master->dev, "%s data initialized\n", __func__); 1122 dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
754 1123
755 spin_lock(&data->lock); 1124 spin_lock(&data->lock);
756
757 /* check if suspend has been initiated;if yes flush queue */ 1125 /* check if suspend has been initiated;if yes flush queue */
758 if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) { 1126 if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
759 dev_dbg(&data->master->dev, 1127 dev_dbg(&data->master->dev, "%s suspend/remove initiated,"
760 "%s suspend/remove initiated,flushing queue\n", 1128 "flushing queue\n", __func__);
761 __func__);
762
763 list_for_each_entry(pmsg, data->queue.next, queue) { 1129 list_for_each_entry(pmsg, data->queue.next, queue) {
764 pmsg->status = -EIO; 1130 pmsg->status = -EIO;
765 1131
@@ -793,53 +1159,42 @@ static void pch_spi_process_messages(struct work_struct *pwork)
793 1159
794 spin_unlock(&data->lock); 1160 spin_unlock(&data->lock);
795 1161
1162 if (data->use_dma)
1163 pch_spi_request_dma(data,
1164 data->current_msg->spi->bits_per_word);
796 do { 1165 do {
797 /* If we are already processing a message get the next 1166 /* If we are already processing a message get the next
798 transfer structure from the message otherwise retrieve 1167 transfer structure from the message otherwise retrieve
799 the 1st transfer request from the message. */ 1168 the 1st transfer request from the message. */
800 spin_lock(&data->lock); 1169 spin_lock(&data->lock);
801
802 if (data->cur_trans == NULL) { 1170 if (data->cur_trans == NULL) {
803 data->cur_trans = 1171 data->cur_trans =
804 list_entry(data->current_msg->transfers. 1172 list_entry(data->current_msg->transfers.next,
805 next, struct spi_transfer, 1173 struct spi_transfer, transfer_list);
806 transfer_list); 1174 dev_dbg(&data->master->dev, "%s "
807 dev_dbg(&data->master->dev, 1175 ":Getting 1st transfer message\n", __func__);
808 "%s :Getting 1st transfer message\n", __func__);
809 } else { 1176 } else {
810 data->cur_trans = 1177 data->cur_trans =
811 list_entry(data->cur_trans->transfer_list.next, 1178 list_entry(data->cur_trans->transfer_list.next,
812 struct spi_transfer, 1179 struct spi_transfer, transfer_list);
813 transfer_list); 1180 dev_dbg(&data->master->dev, "%s "
814 dev_dbg(&data->master->dev, 1181 ":Getting next transfer message\n", __func__);
815 "%s :Getting next transfer message\n",
816 __func__);
817 } 1182 }
818
819 spin_unlock(&data->lock); 1183 spin_unlock(&data->lock);
820 1184
821 pch_spi_set_tx(data, &bpw, &pmsg); 1185 if (data->use_dma) {
822 1186 pch_spi_handle_dma(data, &bpw);
823 /* Control interrupt*/ 1187 pch_spi_start_transfer(data);
824 pch_spi_set_ir(data); 1188 pch_spi_copy_rx_data_for_dma(data, bpw);
825 1189 } else {
826 /* Disable SPI transfer */ 1190 pch_spi_set_tx(data, &bpw);
827 pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, 0, 1191 pch_spi_set_ir(data);
828 SPCR_SPE_BIT); 1192 pch_spi_copy_rx_data(data, bpw);
829 1193 kfree(data->pkt_rx_buff);
830 /* clear FIFO */ 1194 data->pkt_rx_buff = NULL;
831 pch_spi_clear_fifo(data->master); 1195 kfree(data->pkt_tx_buff);
832 1196 data->pkt_tx_buff = NULL;
833 /* copy Rx Data */ 1197 }
834 pch_spi_copy_rx_data(data, bpw);
835
836 /* free memory */
837 kfree(data->pkt_rx_buff);
838 data->pkt_rx_buff = NULL;
839
840 kfree(data->pkt_tx_buff);
841 data->pkt_tx_buff = NULL;
842
843 /* increment message count */ 1198 /* increment message count */
844 data->current_msg->actual_length += data->cur_trans->len; 1199 data->current_msg->actual_length += data->cur_trans->len;
845 1200
@@ -860,125 +1215,60 @@ static void pch_spi_process_messages(struct work_struct *pwork)
860 /* No more transfer in this message. */ 1215 /* No more transfer in this message. */
861 if ((data->cur_trans->transfer_list.next) == 1216 if ((data->cur_trans->transfer_list.next) ==
862 &(data->current_msg->transfers)) { 1217 &(data->current_msg->transfers)) {
863 pch_spi_nomore_transfer(data, pmsg); 1218 pch_spi_nomore_transfer(data);
864 } 1219 }
865 1220
866 spin_unlock(&data->lock); 1221 spin_unlock(&data->lock);
867 1222
868 } while (data->cur_trans != NULL); 1223 } while (data->cur_trans != NULL);
1224
1225 if (data->use_dma)
1226 pch_spi_release_dma(data);
869} 1227}
870 1228
871static void pch_spi_free_resources(struct pch_spi_board_data *board_dat) 1229static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
1230 struct pch_spi_data *data)
872{ 1231{
873 dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); 1232 dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
874 1233
875 /* free workqueue */ 1234 /* free workqueue */
876 if (board_dat->data->wk != NULL) { 1235 if (data->wk != NULL) {
877 destroy_workqueue(board_dat->data->wk); 1236 destroy_workqueue(data->wk);
878 board_dat->data->wk = NULL; 1237 data->wk = NULL;
879 dev_dbg(&board_dat->pdev->dev, 1238 dev_dbg(&board_dat->pdev->dev,
880 "%s destroy_workqueue invoked successfully\n", 1239 "%s destroy_workqueue invoked successfully\n",
881 __func__); 1240 __func__);
882 } 1241 }
883
884 /* disable interrupts & free IRQ */
885 if (board_dat->irq_reg_sts) {
886 /* disable interrupts */
887 pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0,
888 PCH_ALL);
889
890 /* free IRQ */
891 free_irq(board_dat->pdev->irq, board_dat);
892
893 dev_dbg(&board_dat->pdev->dev,
894 "%s free_irq invoked successfully\n", __func__);
895
896 board_dat->irq_reg_sts = false;
897 }
898
899 /* unmap PCI base address */
900 if (board_dat->data->io_remap_addr != 0) {
901 pci_iounmap(board_dat->pdev, board_dat->data->io_remap_addr);
902
903 board_dat->data->io_remap_addr = 0;
904
905 dev_dbg(&board_dat->pdev->dev,
906 "%s pci_iounmap invoked successfully\n", __func__);
907 }
908
909 /* release PCI region */
910 if (board_dat->pci_req_sts) {
911 pci_release_regions(board_dat->pdev);
912 dev_dbg(&board_dat->pdev->dev,
913 "%s pci_release_regions invoked successfully\n",
914 __func__);
915 board_dat->pci_req_sts = false;
916 }
917} 1242}
918 1243
919static int pch_spi_get_resources(struct pch_spi_board_data *board_dat) 1244static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
1245 struct pch_spi_data *data)
920{ 1246{
921 void __iomem *io_remap_addr; 1247 int retval = 0;
922 int retval; 1248
923 dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); 1249 dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
924 1250
925 /* create workqueue */ 1251 /* create workqueue */
926 board_dat->data->wk = create_singlethread_workqueue(KBUILD_MODNAME); 1252 data->wk = create_singlethread_workqueue(KBUILD_MODNAME);
927 if (!board_dat->data->wk) { 1253 if (!data->wk) {
928 dev_err(&board_dat->pdev->dev, 1254 dev_err(&board_dat->pdev->dev,
929 "%s create_singlet hread_workqueue failed\n", __func__); 1255 "%s create_singlet hread_workqueue failed\n", __func__);
930 retval = -EBUSY; 1256 retval = -EBUSY;
931 goto err_return; 1257 goto err_return;
932 } 1258 }
933 1259
934 dev_dbg(&board_dat->pdev->dev,
935 "%s create_singlethread_workqueue success\n", __func__);
936
937 retval = pci_request_regions(board_dat->pdev, KBUILD_MODNAME);
938 if (retval != 0) {
939 dev_err(&board_dat->pdev->dev,
940 "%s request_region failed\n", __func__);
941 goto err_return;
942 }
943
944 board_dat->pci_req_sts = true;
945
946 io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
947 if (io_remap_addr == 0) {
948 dev_err(&board_dat->pdev->dev,
949 "%s pci_iomap failed\n", __func__);
950 retval = -ENOMEM;
951 goto err_return;
952 }
953
954 /* calculate base address for all channels */
955 board_dat->data->io_remap_addr = io_remap_addr;
956
957 /* reset PCH SPI h/w */ 1260 /* reset PCH SPI h/w */
958 pch_spi_reset(board_dat->data->master); 1261 pch_spi_reset(data->master);
959 dev_dbg(&board_dat->pdev->dev, 1262 dev_dbg(&board_dat->pdev->dev,
960 "%s pch_spi_reset invoked successfully\n", __func__); 1263 "%s pch_spi_reset invoked successfully\n", __func__);
961 1264
962 /* register IRQ */
963 retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
964 IRQF_SHARED, KBUILD_MODNAME, board_dat);
965 if (retval != 0) {
966 dev_err(&board_dat->pdev->dev,
967 "%s request_irq failed\n", __func__);
968 goto err_return;
969 }
970
971 dev_dbg(&board_dat->pdev->dev, "%s request_irq returned=%d\n",
972 __func__, retval);
973
974 board_dat->irq_reg_sts = true;
975 dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__); 1265 dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
976 1266
977err_return: 1267err_return:
978 if (retval != 0) { 1268 if (retval != 0) {
979 dev_err(&board_dat->pdev->dev, 1269 dev_err(&board_dat->pdev->dev,
980 "%s FAIL:invoking pch_spi_free_resources\n", __func__); 1270 "%s FAIL:invoking pch_spi_free_resources\n", __func__);
981 pch_spi_free_resources(board_dat); 1271 pch_spi_free_resources(board_dat, data);
982 } 1272 }
983 1273
984 dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval); 1274 dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval);
@@ -986,255 +1276,387 @@ err_return:
986 return retval; 1276 return retval;
987} 1277}
988 1278
989static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1279static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
1280 struct pch_spi_data *data)
990{ 1281{
1282 struct pch_spi_dma_ctrl *dma;
1283
1284 dma = &data->dma;
1285 if (dma->tx_buf_dma)
1286 dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
1287 dma->tx_buf_virt, dma->tx_buf_dma);
1288 if (dma->rx_buf_dma)
1289 dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
1290 dma->rx_buf_virt, dma->rx_buf_dma);
1291 return;
1292}
991 1293
992 struct spi_master *master; 1294static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
993 1295 struct pch_spi_data *data)
994 struct pch_spi_board_data *board_dat; 1296{
995 int retval; 1297 struct pch_spi_dma_ctrl *dma;
996 1298
997 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); 1299 dma = &data->dma;
998 1300 /* Get Consistent memory for Tx DMA */
999 /* allocate memory for private data */ 1301 dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
1000 board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL); 1302 PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
1001 if (board_dat == NULL) { 1303 /* Get Consistent memory for Rx DMA */
1002 dev_err(&pdev->dev, 1304 dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
1003 " %s memory allocation for private data failed\n", 1305 PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
1004 __func__); 1306}
1005 retval = -ENOMEM;
1006 goto err_kmalloc;
1007 }
1008 1307
1009 dev_dbg(&pdev->dev, 1308static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
1010 "%s memory allocation for private data success\n", __func__); 1309{
1310 int ret;
1311 struct spi_master *master;
1312 struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
1313 struct pch_spi_data *data;
1011 1314
1012 /* enable PCI device */ 1315 dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
1013 retval = pci_enable_device(pdev);
1014 if (retval != 0) {
1015 dev_err(&pdev->dev, "%s pci_enable_device FAILED\n", __func__);
1016 1316
1017 goto err_pci_en_device; 1317 master = spi_alloc_master(&board_dat->pdev->dev,
1318 sizeof(struct pch_spi_data));
1319 if (!master) {
1320 dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
1321 plat_dev->id);
1322 return -ENOMEM;
1018 } 1323 }
1019 1324
1020 dev_dbg(&pdev->dev, "%s pci_enable_device returned=%d\n", 1325 data = spi_master_get_devdata(master);
1021 __func__, retval); 1326 data->master = master;
1022 1327
1023 board_dat->pdev = pdev; 1328 platform_set_drvdata(plat_dev, data);
1024 1329
1025 /* alllocate memory for SPI master */ 1330 /* baseaddress + address offset) */
1026 master = spi_alloc_master(&pdev->dev, sizeof(struct pch_spi_data)); 1331 data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
1027 if (master == NULL) { 1332 PCH_ADDRESS_SIZE * plat_dev->id;
1028 retval = -ENOMEM; 1333 data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0) +
1029 dev_err(&pdev->dev, "%s Fail.\n", __func__); 1334 PCH_ADDRESS_SIZE * plat_dev->id;
1030 goto err_spi_alloc_master; 1335 if (!data->io_remap_addr) {
1336 dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
1337 ret = -ENOMEM;
1338 goto err_pci_iomap;
1031 } 1339 }
1032 1340
1033 dev_dbg(&pdev->dev, 1341 dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
1034 "%s spi_alloc_master returned non NULL\n", __func__); 1342 plat_dev->id, data->io_remap_addr);
1035 1343
1036 /* initialize members of SPI master */ 1344 /* initialize members of SPI master */
1037 master->bus_num = -1; 1345 master->bus_num = -1;
1038 master->num_chipselect = PCH_MAX_CS; 1346 master->num_chipselect = PCH_MAX_CS;
1039 master->setup = pch_spi_setup; 1347 master->setup = pch_spi_setup;
1040 master->transfer = pch_spi_transfer; 1348 master->transfer = pch_spi_transfer;
1041 dev_dbg(&pdev->dev,
1042 "%s transfer member of SPI master initialized\n", __func__);
1043
1044 board_dat->data = spi_master_get_devdata(master);
1045
1046 board_dat->data->master = master;
1047 board_dat->data->n_curnt_chip = 255;
1048 board_dat->data->board_dat = board_dat;
1049 board_dat->data->status = STATUS_RUNNING;
1050
1051 INIT_LIST_HEAD(&board_dat->data->queue);
1052 spin_lock_init(&board_dat->data->lock);
1053 INIT_WORK(&board_dat->data->work, pch_spi_process_messages);
1054 init_waitqueue_head(&board_dat->data->wait);
1055 1349
1056 /* allocate resources for PCH SPI */ 1350 data->board_dat = board_dat;
1057 retval = pch_spi_get_resources(board_dat); 1351 data->plat_dev = plat_dev;
1058 if (retval) { 1352 data->n_curnt_chip = 255;
1059 dev_err(&pdev->dev, "%s fail(retval=%d)\n", __func__, retval); 1353 data->status = STATUS_RUNNING;
1354 data->ch = plat_dev->id;
1355 data->use_dma = use_dma;
1356
1357 INIT_LIST_HEAD(&data->queue);
1358 spin_lock_init(&data->lock);
1359 INIT_WORK(&data->work, pch_spi_process_messages);
1360 init_waitqueue_head(&data->wait);
1361
1362 ret = pch_spi_get_resources(board_dat, data);
1363 if (ret) {
1364 dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret);
1060 goto err_spi_get_resources; 1365 goto err_spi_get_resources;
1061 } 1366 }
1062 1367
1063 dev_dbg(&pdev->dev, "%s pch_spi_get_resources returned=%d\n", 1368 ret = request_irq(board_dat->pdev->irq, pch_spi_handler,
1064 __func__, retval); 1369 IRQF_SHARED, KBUILD_MODNAME, data);
1065 1370 if (ret) {
1066 /* save private data in dev */ 1371 dev_err(&plat_dev->dev,
1067 pci_set_drvdata(pdev, board_dat); 1372 "%s request_irq failed\n", __func__);
1068 dev_dbg(&pdev->dev, "%s invoked pci_set_drvdata\n", __func__); 1373 goto err_request_irq;
1374 }
1375 data->irq_reg_sts = true;
1069 1376
1070 /* set master mode */
1071 pch_spi_set_master_mode(master); 1377 pch_spi_set_master_mode(master);
1072 dev_dbg(&pdev->dev,
1073 "%s invoked pch_spi_set_master_mode\n", __func__);
1074 1378
1075 /* Register the controller with the SPI core. */ 1379 ret = spi_register_master(master);
1076 retval = spi_register_master(master); 1380 if (ret != 0) {
1077 if (retval != 0) { 1381 dev_err(&plat_dev->dev,
1078 dev_err(&pdev->dev,
1079 "%s spi_register_master FAILED\n", __func__); 1382 "%s spi_register_master FAILED\n", __func__);
1080 goto err_spi_reg_master; 1383 goto err_spi_register_master;
1081 } 1384 }
1082 1385
1083 dev_dbg(&pdev->dev, "%s spi_register_master returned=%d\n", 1386 if (use_dma) {
1084 __func__, retval); 1387 dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
1085 1388 pch_alloc_dma_buf(board_dat, data);
1389 }
1086 1390
1087 return 0; 1391 return 0;
1088 1392
1089err_spi_reg_master: 1393err_spi_register_master:
1090 spi_unregister_master(master); 1394 free_irq(board_dat->pdev->irq, board_dat);
1395err_request_irq:
1396 pch_spi_free_resources(board_dat, data);
1091err_spi_get_resources: 1397err_spi_get_resources:
1092err_spi_alloc_master: 1398 pci_iounmap(board_dat->pdev, data->io_remap_addr);
1399err_pci_iomap:
1093 spi_master_put(master); 1400 spi_master_put(master);
1094 pci_disable_device(pdev); 1401
1095err_pci_en_device: 1402 return ret;
1096 kfree(board_dat);
1097err_kmalloc:
1098 return retval;
1099} 1403}
1100 1404
1101static void pch_spi_remove(struct pci_dev *pdev) 1405static int __devexit pch_spi_pd_remove(struct platform_device *plat_dev)
1102{ 1406{
1103 struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); 1407 struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
1408 struct pch_spi_data *data = platform_get_drvdata(plat_dev);
1104 int count; 1409 int count;
1410 unsigned long flags;
1105 1411
1106 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); 1412 dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n",
1413 __func__, plat_dev->id, board_dat->pdev->irq);
1107 1414
1108 if (!board_dat) { 1415 if (use_dma)
1109 dev_err(&pdev->dev, 1416 pch_free_dma_buf(board_dat, data);
1110 "%s pci_get_drvdata returned NULL\n", __func__);
1111 return;
1112 }
1113 1417
1114 /* check for any pending messages; no action is taken if the queue 1418 /* check for any pending messages; no action is taken if the queue
1115 * is still full; but at least we tried. Unload anyway */ 1419 * is still full; but at least we tried. Unload anyway */
1116 count = 500; 1420 count = 500;
1117 spin_lock(&board_dat->data->lock); 1421 spin_lock_irqsave(&data->lock, flags);
1118 board_dat->data->status = STATUS_EXITING; 1422 data->status = STATUS_EXITING;
1119 while ((list_empty(&board_dat->data->queue) == 0) && --count) { 1423 while ((list_empty(&data->queue) == 0) && --count) {
1120 dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n", 1424 dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
1121 __func__); 1425 __func__);
1122 spin_unlock(&board_dat->data->lock); 1426 spin_unlock_irqrestore(&data->lock, flags);
1123 msleep(PCH_SLEEP_TIME); 1427 msleep(PCH_SLEEP_TIME);
1124 spin_lock(&board_dat->data->lock); 1428 spin_lock_irqsave(&data->lock, flags);
1125 } 1429 }
1126 spin_unlock(&board_dat->data->lock); 1430 spin_unlock_irqrestore(&data->lock, flags);
1127
1128 /* Free resources allocated for PCH SPI */
1129 pch_spi_free_resources(board_dat);
1130
1131 spi_unregister_master(board_dat->data->master);
1132
1133 /* free memory for private data */
1134 kfree(board_dat);
1135 1431
1136 pci_set_drvdata(pdev, NULL); 1432 pch_spi_free_resources(board_dat, data);
1433 /* disable interrupts & free IRQ */
1434 if (data->irq_reg_sts) {
1435 /* disable interrupts */
1436 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
1437 data->irq_reg_sts = false;
1438 free_irq(board_dat->pdev->irq, data);
1439 }
1137 1440
1138 /* disable PCI device */ 1441 pci_iounmap(board_dat->pdev, data->io_remap_addr);
1139 pci_disable_device(pdev); 1442 spi_unregister_master(data->master);
1443 spi_master_put(data->master);
1444 platform_set_drvdata(plat_dev, NULL);
1140 1445
1141 dev_dbg(&pdev->dev, "%s invoked pci_disable_device\n", __func__); 1446 return 0;
1142} 1447}
1143
1144#ifdef CONFIG_PM 1448#ifdef CONFIG_PM
1145static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state) 1449static int pch_spi_pd_suspend(struct platform_device *pd_dev,
1450 pm_message_t state)
1146{ 1451{
1147 u8 count; 1452 u8 count;
1148 int retval; 1453 struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
1149 1454 struct pch_spi_data *data = platform_get_drvdata(pd_dev);
1150 struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev);
1151 1455
1152 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); 1456 dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__);
1153 1457
1154 if (!board_dat) { 1458 if (!board_dat) {
1155 dev_err(&pdev->dev, 1459 dev_err(&pd_dev->dev,
1156 "%s pci_get_drvdata returned NULL\n", __func__); 1460 "%s pci_get_drvdata returned NULL\n", __func__);
1157 return -EFAULT; 1461 return -EFAULT;
1158 } 1462 }
1159 1463
1160 retval = 0;
1161 board_dat->suspend_sts = true;
1162
1163 /* check if the current message is processed: 1464 /* check if the current message is processed:
1164 Only after thats done the transfer will be suspended */ 1465 Only after thats done the transfer will be suspended */
1165 count = 255; 1466 count = 255;
1166 while ((--count) > 0) { 1467 while ((--count) > 0) {
1167 if (!(board_dat->data->bcurrent_msg_processing)) { 1468 if (!(data->bcurrent_msg_processing))
1168 dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_"
1169 "msg_processing = false\n", __func__);
1170 break; 1469 break;
1171 } else {
1172 dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_msg_"
1173 "processing = true\n", __func__);
1174 }
1175 msleep(PCH_SLEEP_TIME); 1470 msleep(PCH_SLEEP_TIME);
1176 } 1471 }
1177 1472
1178 /* Free IRQ */ 1473 /* Free IRQ */
1179 if (board_dat->irq_reg_sts) { 1474 if (data->irq_reg_sts) {
1180 /* disable all interrupts */ 1475 /* disable all interrupts */
1181 pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, 1476 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
1182 PCH_ALL); 1477 pch_spi_reset(data->master);
1183 pch_spi_reset(board_dat->data->master); 1478 free_irq(board_dat->pdev->irq, data);
1184
1185 free_irq(board_dat->pdev->irq, board_dat);
1186 1479
1187 board_dat->irq_reg_sts = false; 1480 data->irq_reg_sts = false;
1188 dev_dbg(&pdev->dev, 1481 dev_dbg(&pd_dev->dev,
1189 "%s free_irq invoked successfully.\n", __func__); 1482 "%s free_irq invoked successfully.\n", __func__);
1190 } 1483 }
1191 1484
1485 return 0;
1486}
1487
1488static int pch_spi_pd_resume(struct platform_device *pd_dev)
1489{
1490 struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
1491 struct pch_spi_data *data = platform_get_drvdata(pd_dev);
1492 int retval;
1493
1494 if (!board_dat) {
1495 dev_err(&pd_dev->dev,
1496 "%s pci_get_drvdata returned NULL\n", __func__);
1497 return -EFAULT;
1498 }
1499
1500 if (!data->irq_reg_sts) {
1501 /* register IRQ */
1502 retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
1503 IRQF_SHARED, KBUILD_MODNAME, data);
1504 if (retval < 0) {
1505 dev_err(&pd_dev->dev,
1506 "%s request_irq failed\n", __func__);
1507 return retval;
1508 }
1509
1510 /* reset PCH SPI h/w */
1511 pch_spi_reset(data->master);
1512 pch_spi_set_master_mode(data->master);
1513 data->irq_reg_sts = true;
1514 }
1515 return 0;
1516}
1517#else
1518#define pch_spi_pd_suspend NULL
1519#define pch_spi_pd_resume NULL
1520#endif
1521
1522static struct platform_driver pch_spi_pd_driver = {
1523 .driver = {
1524 .name = "pch-spi",
1525 .owner = THIS_MODULE,
1526 },
1527 .probe = pch_spi_pd_probe,
1528 .remove = __devexit_p(pch_spi_pd_remove),
1529 .suspend = pch_spi_pd_suspend,
1530 .resume = pch_spi_pd_resume
1531};
1532
1533static int __devinit pch_spi_probe(struct pci_dev *pdev,
1534 const struct pci_device_id *id)
1535{
1536 struct pch_spi_board_data *board_dat;
1537 struct platform_device *pd_dev = NULL;
1538 int retval;
1539 int i;
1540 struct pch_pd_dev_save *pd_dev_save;
1541
1542 pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL);
1543 if (!pd_dev_save) {
1544 dev_err(&pdev->dev, "%s Can't allocate pd_dev_sav\n", __func__);
1545 return -ENOMEM;
1546 }
1547
1548 board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
1549 if (!board_dat) {
1550 dev_err(&pdev->dev, "%s Can't allocate board_dat\n", __func__);
1551 retval = -ENOMEM;
1552 goto err_no_mem;
1553 }
1554
1555 retval = pci_request_regions(pdev, KBUILD_MODNAME);
1556 if (retval) {
1557 dev_err(&pdev->dev, "%s request_region failed\n", __func__);
1558 goto pci_request_regions;
1559 }
1560
1561 board_dat->pdev = pdev;
1562 board_dat->num = id->driver_data;
1563 pd_dev_save->num = id->driver_data;
1564 pd_dev_save->board_dat = board_dat;
1565
1566 retval = pci_enable_device(pdev);
1567 if (retval) {
1568 dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__);
1569 goto pci_enable_device;
1570 }
1571
1572 for (i = 0; i < board_dat->num; i++) {
1573 pd_dev = platform_device_alloc("pch-spi", i);
1574 if (!pd_dev) {
1575 dev_err(&pdev->dev, "platform_device_alloc failed\n");
1576 goto err_platform_device;
1577 }
1578 pd_dev_save->pd_save[i] = pd_dev;
1579 pd_dev->dev.parent = &pdev->dev;
1580
1581 retval = platform_device_add_data(pd_dev, board_dat,
1582 sizeof(*board_dat));
1583 if (retval) {
1584 dev_err(&pdev->dev,
1585 "platform_device_add_data failed\n");
1586 platform_device_put(pd_dev);
1587 goto err_platform_device;
1588 }
1589
1590 retval = platform_device_add(pd_dev);
1591 if (retval) {
1592 dev_err(&pdev->dev, "platform_device_add failed\n");
1593 platform_device_put(pd_dev);
1594 goto err_platform_device;
1595 }
1596 }
1597
1598 pci_set_drvdata(pdev, pd_dev_save);
1599
1600 return 0;
1601
1602err_platform_device:
1603 pci_disable_device(pdev);
1604pci_enable_device:
1605 pci_release_regions(pdev);
1606pci_request_regions:
1607 kfree(board_dat);
1608err_no_mem:
1609 kfree(pd_dev_save);
1610
1611 return retval;
1612}
1613
1614static void __devexit pch_spi_remove(struct pci_dev *pdev)
1615{
1616 int i;
1617 struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1618
1619 dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev);
1620
1621 for (i = 0; i < pd_dev_save->num; i++)
1622 platform_device_unregister(pd_dev_save->pd_save[i]);
1623
1624 pci_disable_device(pdev);
1625 pci_release_regions(pdev);
1626 kfree(pd_dev_save->board_dat);
1627 kfree(pd_dev_save);
1628}
1629
1630#ifdef CONFIG_PM
1631static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
1632{
1633 int retval;
1634 struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1635
1636 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
1637
1638 pd_dev_save->board_dat->suspend_sts = true;
1639
1192 /* save config space */ 1640 /* save config space */
1193 retval = pci_save_state(pdev); 1641 retval = pci_save_state(pdev);
1194
1195 if (retval == 0) { 1642 if (retval == 0) {
1196 dev_dbg(&pdev->dev, "%s pci_save_state returned=%d\n",
1197 __func__, retval);
1198 /* disable PM notifications */
1199 pci_enable_wake(pdev, PCI_D3hot, 0); 1643 pci_enable_wake(pdev, PCI_D3hot, 0);
1200 dev_dbg(&pdev->dev,
1201 "%s pci_enable_wake invoked successfully\n", __func__);
1202 /* disable PCI device */
1203 pci_disable_device(pdev); 1644 pci_disable_device(pdev);
1204 dev_dbg(&pdev->dev,
1205 "%s pci_disable_device invoked successfully\n",
1206 __func__);
1207 /* move device to D3hot state */
1208 pci_set_power_state(pdev, PCI_D3hot); 1645 pci_set_power_state(pdev, PCI_D3hot);
1209 dev_dbg(&pdev->dev,
1210 "%s pci_set_power_state invoked successfully\n",
1211 __func__);
1212 } else { 1646 } else {
1213 dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__); 1647 dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__);
1214 } 1648 }
1215 1649
1216 dev_dbg(&pdev->dev, "%s return=%d\n", __func__, retval);
1217
1218 return retval; 1650 return retval;
1219} 1651}
1220 1652
1221static int pch_spi_resume(struct pci_dev *pdev) 1653static int pch_spi_resume(struct pci_dev *pdev)
1222{ 1654{
1223 int retval; 1655 int retval;
1224 1656 struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1225 struct pch_spi_board_data *board = pci_get_drvdata(pdev);
1226 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); 1657 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
1227 1658
1228 if (!board) {
1229 dev_err(&pdev->dev,
1230 "%s pci_get_drvdata returned NULL\n", __func__);
1231 return -EFAULT;
1232 }
1233
1234 /* move device to DO power state */
1235 pci_set_power_state(pdev, PCI_D0); 1659 pci_set_power_state(pdev, PCI_D0);
1236
1237 /* restore state */
1238 pci_restore_state(pdev); 1660 pci_restore_state(pdev);
1239 1661
1240 retval = pci_enable_device(pdev); 1662 retval = pci_enable_device(pdev);
@@ -1242,34 +1664,12 @@ static int pch_spi_resume(struct pci_dev *pdev)
1242 dev_err(&pdev->dev, 1664 dev_err(&pdev->dev,
1243 "%s pci_enable_device failed\n", __func__); 1665 "%s pci_enable_device failed\n", __func__);
1244 } else { 1666 } else {
1245 /* disable PM notifications */
1246 pci_enable_wake(pdev, PCI_D3hot, 0); 1667 pci_enable_wake(pdev, PCI_D3hot, 0);
1247 1668
1248 /* register IRQ handler */ 1669 /* set suspend status to false */
1249 if (!board->irq_reg_sts) { 1670 pd_dev_save->board_dat->suspend_sts = false;
1250 /* register IRQ */
1251 retval = request_irq(board->pdev->irq, pch_spi_handler,
1252 IRQF_SHARED, KBUILD_MODNAME,
1253 board);
1254 if (retval < 0) {
1255 dev_err(&pdev->dev,
1256 "%s request_irq failed\n", __func__);
1257 return retval;
1258 }
1259 board->irq_reg_sts = true;
1260
1261 /* reset PCH SPI h/w */
1262 pch_spi_reset(board->data->master);
1263 pch_spi_set_master_mode(board->data->master);
1264
1265 /* set suspend status to false */
1266 board->suspend_sts = false;
1267
1268 }
1269 } 1671 }
1270 1672
1271 dev_dbg(&pdev->dev, "%s returning=%d\n", __func__, retval);
1272
1273 return retval; 1673 return retval;
1274} 1674}
1275#else 1675#else
@@ -1289,15 +1689,29 @@ static struct pci_driver pch_spi_pcidev = {
1289 1689
1290static int __init pch_spi_init(void) 1690static int __init pch_spi_init(void)
1291{ 1691{
1292 return pci_register_driver(&pch_spi_pcidev); 1692 int ret;
1693 ret = platform_driver_register(&pch_spi_pd_driver);
1694 if (ret)
1695 return ret;
1696
1697 ret = pci_register_driver(&pch_spi_pcidev);
1698 if (ret)
1699 return ret;
1700
1701 return 0;
1293} 1702}
1294module_init(pch_spi_init); 1703module_init(pch_spi_init);
1295 1704
1296static void __exit pch_spi_exit(void) 1705static void __exit pch_spi_exit(void)
1297{ 1706{
1298 pci_unregister_driver(&pch_spi_pcidev); 1707 pci_unregister_driver(&pch_spi_pcidev);
1708 platform_driver_unregister(&pch_spi_pd_driver);
1299} 1709}
1300module_exit(pch_spi_exit); 1710module_exit(pch_spi_exit);
1301 1711
1712module_param(use_dma, int, 0644);
1713MODULE_PARM_DESC(use_dma,
1714 "to use DMA for data transfers pass 1 else 0; default 1");
1715
1302MODULE_LICENSE("GPL"); 1716MODULE_LICENSE("GPL");
1303MODULE_DESCRIPTION("Topcliff PCH SPI PCI Driver"); 1717MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7xxx IOH SPI Driver");
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi-txx9.c
index dfa024b633e1..f0a2ab0428a3 100644
--- a/drivers/spi/spi_txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * spi_txx9.c - TXx9 SPI controller driver. 2 * TXx9 SPI controller driver.
3 * 3 *
4 * Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c 4 * Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c
5 * Copyright (C) 2000-2001 Toshiba Corporation 5 * Copyright (C) 2000-2001 Toshiba Corporation
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/spi-xilinx.c
index 4d2c75df886c..4d2c75df886c 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/spi-xilinx.c
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 2e13a14bba3f..4d1b9f517ce8 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * spi.c - SPI init/core code 2 * SPI init/core code
3 * 3 *
4 * Copyright (C) 2005 David Brownell 4 * Copyright (C) 2005 David Brownell
5 * 5 *
diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c
deleted file mode 100644
index be991359bf92..000000000000
--- a/drivers/spi/spi_s3c24xx_gpio.c
+++ /dev/null
@@ -1,201 +0,0 @@
1/* linux/drivers/spi/spi_s3c24xx_gpio.c
2 *
3 * Copyright (c) 2006 Ben Dooks
4 * Copyright (c) 2006 Simtec Electronics
5 *
6 * S3C24XX GPIO based SPI driver
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12*/
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/delay.h>
17#include <linux/spinlock.h>
18#include <linux/workqueue.h>
19#include <linux/platform_device.h>
20#include <linux/gpio.h>
21
22#include <linux/spi/spi.h>
23#include <linux/spi/spi_bitbang.h>
24
25#include <mach/regs-gpio.h>
26#include <mach/spi-gpio.h>
27#include <mach/hardware.h>
28
29struct s3c2410_spigpio {
30 struct spi_bitbang bitbang;
31
32 struct s3c2410_spigpio_info *info;
33 struct platform_device *dev;
34};
35
36static inline struct s3c2410_spigpio *spidev_to_sg(struct spi_device *spi)
37{
38 return spi_master_get_devdata(spi->master);
39}
40
41static inline void setsck(struct spi_device *dev, int on)
42{
43 struct s3c2410_spigpio *sg = spidev_to_sg(dev);
44 s3c2410_gpio_setpin(sg->info->pin_clk, on ? 1 : 0);
45}
46
47static inline void setmosi(struct spi_device *dev, int on)
48{
49 struct s3c2410_spigpio *sg = spidev_to_sg(dev);
50 s3c2410_gpio_setpin(sg->info->pin_mosi, on ? 1 : 0);
51}
52
53static inline u32 getmiso(struct spi_device *dev)
54{
55 struct s3c2410_spigpio *sg = spidev_to_sg(dev);
56 return s3c2410_gpio_getpin(sg->info->pin_miso) ? 1 : 0;
57}
58
59#define spidelay(x) ndelay(x)
60
61#include "spi_bitbang_txrx.h"
62
63
64static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi,
65 unsigned nsecs, u32 word, u8 bits)
66{
67 return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
68}
69
70static u32 s3c2410_spigpio_txrx_mode1(struct spi_device *spi,
71 unsigned nsecs, u32 word, u8 bits)
72{
73 return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits);
74}
75
76static u32 s3c2410_spigpio_txrx_mode2(struct spi_device *spi,
77 unsigned nsecs, u32 word, u8 bits)
78{
79 return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits);
80}
81
82static u32 s3c2410_spigpio_txrx_mode3(struct spi_device *spi,
83 unsigned nsecs, u32 word, u8 bits)
84{
85 return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits);
86}
87
88
89static void s3c2410_spigpio_chipselect(struct spi_device *dev, int value)
90{
91 struct s3c2410_spigpio *sg = spidev_to_sg(dev);
92
93 if (sg->info && sg->info->chip_select)
94 (sg->info->chip_select)(sg->info, value);
95}
96
97static int s3c2410_spigpio_probe(struct platform_device *dev)
98{
99 struct s3c2410_spigpio_info *info;
100 struct spi_master *master;
101 struct s3c2410_spigpio *sp;
102 int ret;
103
104 master = spi_alloc_master(&dev->dev, sizeof(struct s3c2410_spigpio));
105 if (master == NULL) {
106 dev_err(&dev->dev, "failed to allocate spi master\n");
107 ret = -ENOMEM;
108 goto err;
109 }
110
111 sp = spi_master_get_devdata(master);
112
113 platform_set_drvdata(dev, sp);
114
115 /* copy in the plkatform data */
116 info = sp->info = dev->dev.platform_data;
117
118 /* setup spi bitbang adaptor */
119 sp->bitbang.master = spi_master_get(master);
120 sp->bitbang.master->bus_num = info->bus_num;
121 sp->bitbang.master->num_chipselect = info->num_chipselect;
122 sp->bitbang.chipselect = s3c2410_spigpio_chipselect;
123
124 sp->bitbang.txrx_word[SPI_MODE_0] = s3c2410_spigpio_txrx_mode0;
125 sp->bitbang.txrx_word[SPI_MODE_1] = s3c2410_spigpio_txrx_mode1;
126 sp->bitbang.txrx_word[SPI_MODE_2] = s3c2410_spigpio_txrx_mode2;
127 sp->bitbang.txrx_word[SPI_MODE_3] = s3c2410_spigpio_txrx_mode3;
128
129 /* set state of spi pins, always assume that the clock is
130 * available, but do check the MOSI and MISO. */
131 s3c2410_gpio_setpin(info->pin_clk, 0);
132 s3c2410_gpio_cfgpin(info->pin_clk, S3C2410_GPIO_OUTPUT);
133
134 if (info->pin_mosi < S3C2410_GPH10) {
135 s3c2410_gpio_setpin(info->pin_mosi, 0);
136 s3c2410_gpio_cfgpin(info->pin_mosi, S3C2410_GPIO_OUTPUT);
137 }
138
139 if (info->pin_miso != S3C2410_GPA0 && info->pin_miso < S3C2410_GPH10)
140 s3c2410_gpio_cfgpin(info->pin_miso, S3C2410_GPIO_INPUT);
141
142 ret = spi_bitbang_start(&sp->bitbang);
143 if (ret)
144 goto err_no_bitbang;
145
146 return 0;
147
148 err_no_bitbang:
149 spi_master_put(sp->bitbang.master);
150 err:
151 return ret;
152
153}
154
155static int s3c2410_spigpio_remove(struct platform_device *dev)
156{
157 struct s3c2410_spigpio *sp = platform_get_drvdata(dev);
158
159 spi_bitbang_stop(&sp->bitbang);
160 spi_master_put(sp->bitbang.master);
161
162 return 0;
163}
164
165/* all gpio should be held over suspend/resume, so we should
166 * not need to deal with this
167*/
168
169#define s3c2410_spigpio_suspend NULL
170#define s3c2410_spigpio_resume NULL
171
172/* work with hotplug and coldplug */
173MODULE_ALIAS("platform:spi_s3c24xx_gpio");
174
175static struct platform_driver s3c2410_spigpio_drv = {
176 .probe = s3c2410_spigpio_probe,
177 .remove = s3c2410_spigpio_remove,
178 .suspend = s3c2410_spigpio_suspend,
179 .resume = s3c2410_spigpio_resume,
180 .driver = {
181 .name = "spi_s3c24xx_gpio",
182 .owner = THIS_MODULE,
183 },
184};
185
186static int __init s3c2410_spigpio_init(void)
187{
188 return platform_driver_register(&s3c2410_spigpio_drv);
189}
190
191static void __exit s3c2410_spigpio_exit(void)
192{
193 platform_driver_unregister(&s3c2410_spigpio_drv);
194}
195
196module_init(s3c2410_spigpio_init);
197module_exit(s3c2410_spigpio_exit);
198
199MODULE_DESCRIPTION("S3C24XX SPI Driver");
200MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
201MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index d9fd86211365..830adbed1d7a 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * spidev.c -- simple synchronous userspace interface to SPI devices 2 * Simple synchronous userspace interface to SPI devices
3 * 3 *
4 * Copyright (C) 2006 SWAPP 4 * Copyright (C) 2006 SWAPP
5 * Andrea Paterniani <a.paterniani@swapp-eng.it> 5 * Andrea Paterniani <a.paterniani@swapp-eng.it>
diff --git a/sound/soc/ep93xx/ep93xx-ac97.c b/sound/soc/ep93xx/ep93xx-ac97.c
index 104e95cda0ad..c7417c76552b 100644
--- a/sound/soc/ep93xx/ep93xx-ac97.c
+++ b/sound/soc/ep93xx/ep93xx-ac97.c
@@ -106,12 +106,12 @@ static struct ep93xx_ac97_info *ep93xx_ac97_info;
106 106
107static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_out = { 107static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_out = {
108 .name = "ac97-pcm-out", 108 .name = "ac97-pcm-out",
109 .dma_port = EP93XX_DMA_M2P_PORT_AAC1, 109 .dma_port = EP93XX_DMA_AAC1,
110}; 110};
111 111
112static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_in = { 112static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_in = {
113 .name = "ac97-pcm-in", 113 .name = "ac97-pcm-in",
114 .dma_port = EP93XX_DMA_M2P_PORT_AAC1, 114 .dma_port = EP93XX_DMA_AAC1,
115}; 115};
116 116
117static inline unsigned ep93xx_ac97_read_reg(struct ep93xx_ac97_info *info, 117static inline unsigned ep93xx_ac97_read_reg(struct ep93xx_ac97_info *info,
diff --git a/sound/soc/ep93xx/ep93xx-i2s.c b/sound/soc/ep93xx/ep93xx-i2s.c
index 042f4e93746f..30df42568dbb 100644
--- a/sound/soc/ep93xx/ep93xx-i2s.c
+++ b/sound/soc/ep93xx/ep93xx-i2s.c
@@ -70,11 +70,11 @@ struct ep93xx_i2s_info {
70struct ep93xx_pcm_dma_params ep93xx_i2s_dma_params[] = { 70struct ep93xx_pcm_dma_params ep93xx_i2s_dma_params[] = {
71 [SNDRV_PCM_STREAM_PLAYBACK] = { 71 [SNDRV_PCM_STREAM_PLAYBACK] = {
72 .name = "i2s-pcm-out", 72 .name = "i2s-pcm-out",
73 .dma_port = EP93XX_DMA_M2P_PORT_I2S1, 73 .dma_port = EP93XX_DMA_I2S1,
74 }, 74 },
75 [SNDRV_PCM_STREAM_CAPTURE] = { 75 [SNDRV_PCM_STREAM_CAPTURE] = {
76 .name = "i2s-pcm-in", 76 .name = "i2s-pcm-in",
77 .dma_port = EP93XX_DMA_M2P_PORT_I2S1, 77 .dma_port = EP93XX_DMA_I2S1,
78 }, 78 },
79}; 79};
80 80
diff --git a/sound/soc/ep93xx/ep93xx-pcm.c b/sound/soc/ep93xx/ep93xx-pcm.c
index a456e491155f..a07f99c9c375 100644
--- a/sound/soc/ep93xx/ep93xx-pcm.c
+++ b/sound/soc/ep93xx/ep93xx-pcm.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/dmaengine.h>
19#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
20 21
21#include <sound/core.h> 22#include <sound/core.h>
@@ -53,43 +54,34 @@ static const struct snd_pcm_hardware ep93xx_pcm_hardware = {
53 54
54struct ep93xx_runtime_data 55struct ep93xx_runtime_data
55{ 56{
56 struct ep93xx_dma_m2p_client cl;
57 struct ep93xx_pcm_dma_params *params;
58 int pointer_bytes; 57 int pointer_bytes;
59 struct tasklet_struct period_tasklet;
60 int periods; 58 int periods;
61 struct ep93xx_dma_buffer buf[32]; 59 int period_bytes;
60 struct dma_chan *dma_chan;
61 struct ep93xx_dma_data dma_data;
62}; 62};
63 63
64static void ep93xx_pcm_period_elapsed(unsigned long data) 64static void ep93xx_pcm_dma_callback(void *data)
65{ 65{
66 struct snd_pcm_substream *substream = (struct snd_pcm_substream *)data; 66 struct snd_pcm_substream *substream = data;
67 snd_pcm_period_elapsed(substream); 67 struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
68}
69 68
70static void ep93xx_pcm_buffer_started(void *cookie, 69 rtd->pointer_bytes += rtd->period_bytes;
71 struct ep93xx_dma_buffer *buf) 70 rtd->pointer_bytes %= rtd->period_bytes * rtd->periods;
72{ 71
72 snd_pcm_period_elapsed(substream);
73} 73}
74 74
75static void ep93xx_pcm_buffer_finished(void *cookie, 75static bool ep93xx_pcm_dma_filter(struct dma_chan *chan, void *filter_param)
76 struct ep93xx_dma_buffer *buf,
77 int bytes, int error)
78{ 76{
79 struct snd_pcm_substream *substream = cookie; 77 struct ep93xx_dma_data *data = filter_param;
80 struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
81
82 if (buf == rtd->buf + rtd->periods - 1)
83 rtd->pointer_bytes = 0;
84 else
85 rtd->pointer_bytes += buf->size;
86 78
87 if (!error) { 79 if (data->direction == ep93xx_dma_chan_direction(chan)) {
88 ep93xx_dma_m2p_submit_recursive(&rtd->cl, buf); 80 chan->private = data;
89 tasklet_schedule(&rtd->period_tasklet); 81 return true;
90 } else {
91 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
92 } 82 }
83
84 return false;
93} 85}
94 86
95static int ep93xx_pcm_open(struct snd_pcm_substream *substream) 87static int ep93xx_pcm_open(struct snd_pcm_substream *substream)
@@ -98,30 +90,38 @@ static int ep93xx_pcm_open(struct snd_pcm_substream *substream)
98 struct snd_soc_dai *cpu_dai = soc_rtd->cpu_dai; 90 struct snd_soc_dai *cpu_dai = soc_rtd->cpu_dai;
99 struct ep93xx_pcm_dma_params *dma_params; 91 struct ep93xx_pcm_dma_params *dma_params;
100 struct ep93xx_runtime_data *rtd; 92 struct ep93xx_runtime_data *rtd;
93 dma_cap_mask_t mask;
101 int ret; 94 int ret;
102 95
103 dma_params = snd_soc_dai_get_dma_data(cpu_dai, substream); 96 ret = snd_pcm_hw_constraint_integer(substream->runtime,
97 SNDRV_PCM_HW_PARAM_PERIODS);
98 if (ret < 0)
99 return ret;
100
104 snd_soc_set_runtime_hwparams(substream, &ep93xx_pcm_hardware); 101 snd_soc_set_runtime_hwparams(substream, &ep93xx_pcm_hardware);
105 102
106 rtd = kmalloc(sizeof(*rtd), GFP_KERNEL); 103 rtd = kmalloc(sizeof(*rtd), GFP_KERNEL);
107 if (!rtd) 104 if (!rtd)
108 return -ENOMEM; 105 return -ENOMEM;
109 106
110 memset(&rtd->period_tasklet, 0, sizeof(rtd->period_tasklet)); 107 dma_cap_zero(mask);
111 rtd->period_tasklet.func = ep93xx_pcm_period_elapsed; 108 dma_cap_set(DMA_SLAVE, mask);
112 rtd->period_tasklet.data = (unsigned long)substream; 109 dma_cap_set(DMA_CYCLIC, mask);
113 110
114 rtd->cl.name = dma_params->name; 111 dma_params = snd_soc_dai_get_dma_data(cpu_dai, substream);
115 rtd->cl.flags = dma_params->dma_port | EP93XX_DMA_M2P_IGNORE_ERROR | 112 rtd->dma_data.port = dma_params->dma_port;
116 ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? 113 rtd->dma_data.name = dma_params->name;
117 EP93XX_DMA_M2P_TX : EP93XX_DMA_M2P_RX); 114
118 rtd->cl.cookie = substream; 115 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
119 rtd->cl.buffer_started = ep93xx_pcm_buffer_started; 116 rtd->dma_data.direction = DMA_TO_DEVICE;
120 rtd->cl.buffer_finished = ep93xx_pcm_buffer_finished; 117 else
121 ret = ep93xx_dma_m2p_client_register(&rtd->cl); 118 rtd->dma_data.direction = DMA_FROM_DEVICE;
122 if (ret < 0) { 119
120 rtd->dma_chan = dma_request_channel(mask, ep93xx_pcm_dma_filter,
121 &rtd->dma_data);
122 if (!rtd->dma_chan) {
123 kfree(rtd); 123 kfree(rtd);
124 return ret; 124 return -EINVAL;
125 } 125 }
126 126
127 substream->runtime->private_data = rtd; 127 substream->runtime->private_data = rtd;
@@ -132,31 +132,52 @@ static int ep93xx_pcm_close(struct snd_pcm_substream *substream)
132{ 132{
133 struct ep93xx_runtime_data *rtd = substream->runtime->private_data; 133 struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
134 134
135 ep93xx_dma_m2p_client_unregister(&rtd->cl); 135 dma_release_channel(rtd->dma_chan);
136 kfree(rtd); 136 kfree(rtd);
137 return 0; 137 return 0;
138} 138}
139 139
140static int ep93xx_pcm_dma_submit(struct snd_pcm_substream *substream)
141{
142 struct snd_pcm_runtime *runtime = substream->runtime;
143 struct ep93xx_runtime_data *rtd = runtime->private_data;
144 struct dma_chan *chan = rtd->dma_chan;
145 struct dma_device *dma_dev = chan->device;
146 struct dma_async_tx_descriptor *desc;
147
148 rtd->pointer_bytes = 0;
149 desc = dma_dev->device_prep_dma_cyclic(chan, runtime->dma_addr,
150 rtd->period_bytes * rtd->periods,
151 rtd->period_bytes,
152 rtd->dma_data.direction);
153 if (!desc)
154 return -EINVAL;
155
156 desc->callback = ep93xx_pcm_dma_callback;
157 desc->callback_param = substream;
158
159 dmaengine_submit(desc);
160 return 0;
161}
162
163static void ep93xx_pcm_dma_flush(struct snd_pcm_substream *substream)
164{
165 struct snd_pcm_runtime *runtime = substream->runtime;
166 struct ep93xx_runtime_data *rtd = runtime->private_data;
167
168 dmaengine_terminate_all(rtd->dma_chan);
169}
170
140static int ep93xx_pcm_hw_params(struct snd_pcm_substream *substream, 171static int ep93xx_pcm_hw_params(struct snd_pcm_substream *substream,
141 struct snd_pcm_hw_params *params) 172 struct snd_pcm_hw_params *params)
142{ 173{
143 struct snd_pcm_runtime *runtime = substream->runtime; 174 struct snd_pcm_runtime *runtime = substream->runtime;
144 struct ep93xx_runtime_data *rtd = runtime->private_data; 175 struct ep93xx_runtime_data *rtd = runtime->private_data;
145 size_t totsize = params_buffer_bytes(params);
146 size_t period = params_period_bytes(params);
147 int i;
148 176
149 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); 177 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
150 runtime->dma_bytes = totsize;
151
152 rtd->periods = (totsize + period - 1) / period;
153 for (i = 0; i < rtd->periods; i++) {
154 rtd->buf[i].bus_addr = runtime->dma_addr + (i * period);
155 rtd->buf[i].size = period;
156 if ((i + 1) * period > totsize)
157 rtd->buf[i].size = totsize - (i * period);
158 }
159 178
179 rtd->periods = params_periods(params);
180 rtd->period_bytes = params_period_bytes(params);
160 return 0; 181 return 0;
161} 182}
162 183
@@ -168,24 +189,20 @@ static int ep93xx_pcm_hw_free(struct snd_pcm_substream *substream)
168 189
169static int ep93xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) 190static int ep93xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
170{ 191{
171 struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
172 int ret; 192 int ret;
173 int i;
174 193
175 ret = 0; 194 ret = 0;
176 switch (cmd) { 195 switch (cmd) {
177 case SNDRV_PCM_TRIGGER_START: 196 case SNDRV_PCM_TRIGGER_START:
178 case SNDRV_PCM_TRIGGER_RESUME: 197 case SNDRV_PCM_TRIGGER_RESUME:
179 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 198 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
180 rtd->pointer_bytes = 0; 199 ret = ep93xx_pcm_dma_submit(substream);
181 for (i = 0; i < rtd->periods; i++)
182 ep93xx_dma_m2p_submit(&rtd->cl, rtd->buf + i);
183 break; 200 break;
184 201
185 case SNDRV_PCM_TRIGGER_STOP: 202 case SNDRV_PCM_TRIGGER_STOP:
186 case SNDRV_PCM_TRIGGER_SUSPEND: 203 case SNDRV_PCM_TRIGGER_SUSPEND:
187 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 204 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
188 ep93xx_dma_m2p_flush(&rtd->cl); 205 ep93xx_pcm_dma_flush(substream);
189 break; 206 break;
190 207
191 default: 208 default: