aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 17:33:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 17:33:21 -0400
commit0bf6a210a43f7118d858806200127e421649fc4e (patch)
tree9a17d88ebd1b9bc693fba7f39c12123dec96e930 /drivers
parentee1a8d402e7e204d57fb108aa40003b6d1633036 (diff)
parent5c913a9a9772f4b434aaea7328836419287b5d1c (diff)
Merge tag 'drivers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC driver specific changes from Arnd Bergmann: "These changes are all driver specific and cross over between arm-soc contents and some other subsystem, in these cases cpufreq, crypto, dma, pinctrl, mailbox and usb, and the subsystem owners agreed to have these changes merged through arm-soc. As we proceed to untangle the dependencies between platform code and driver code, the amount of changes in this category is fortunately shrinking, for 3.11 we have 16 branches here and 101 non-merge changesets, the majority of which are for the stedma40 dma engine driver used in the ux500 platform. Cleaning up that code touches multiple subsystems, but gets rid of the dependency in the end. The mailbox code moved out from mach-omap2 to drivers/mailbox is an intermediate step and is still omap specific at the moment. Patches exist to generalize the subsystem and add other drivers with the same API, but those did not make it for 3.11." * tag 'drivers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (101 commits) crypto: ux500: use dmaengine_submit API crypto: ux500: use dmaengine_prep_slave_sg API crypto: ux500: use dmaengine_device_control API crypto: ux500/crypt: add missing __iomem qualifiers crypto: ux500/hash: add missing static qualifiers crypto: ux500/hash: use readl on iomem addresses dmaengine: ste_dma40: Declare memcpy config as static ARM: ux500: Remove mop500_snowball_ethernet_clock_enable() ARM: ux500: Correct the EN_3v3 regulator's on/off GPIO ARM: ux500: Provide a AB8500 GPIO Device Tree node gpio: rcar: fix gpio_rcar_of_table gpio-rcar: Remove #ifdef CONFIG_OF around OF-specific sections gpio-rcar: Reference core gpio documentation in the DT bindings clk: exynos5250: Add enum entries for divider clock of i2s1 and i2s2 ARM: dts: Update Samsung I2S documentation ARM: dts: add clock provider information for i2s controllers in Exynos5250 ARM: dts: add Exynos audio subsystem clock controller node clk: samsung: register audio subsystem clocks using common clock framework ARM: dts: use #include for all device trees for Samsung pinctrl: s3c24xx: use correct header for chained_irq functions ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/clk/samsung/Makefile1
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c133
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c5
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c11
-rw-r--r--drivers/cpufreq/Kconfig.arm58
-rw-r--r--drivers/cpufreq/Makefile5
-rw-r--r--drivers/cpufreq/s3c2410-cpufreq.c160
-rw-r--r--drivers/cpufreq/s3c2412-cpufreq.c257
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c312
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq-debugfs.c198
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c711
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c4
-rw-r--r--drivers/crypto/ux500/cryp/cryp.h7
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c57
-rw-r--r--drivers/crypto/ux500/hash/hash_alg.h5
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c57
-rw-r--r--drivers/dma/ste_dma40.c533
-rw-r--r--drivers/dma/ste_dma40_ll.c189
-rw-r--r--drivers/dma/ste_dma40_ll.h3
-rw-r--r--drivers/gpio/gpio-rcar.c63
-rw-r--r--drivers/gpio/gpio-samsung.c67
-rw-r--r--drivers/mailbox/Kconfig34
-rw-r--r--drivers/mailbox/Makefile6
-rw-r--r--drivers/mailbox/mailbox-omap1.c203
-rw-r--r--drivers/mailbox/mailbox-omap2.c358
-rw-r--r--drivers/mailbox/omap-mailbox.c469
-rw-r--r--drivers/mailbox/omap-mbox.h67
-rw-r--r--drivers/pinctrl/Kconfig5
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/pinctrl-s3c24xx.c651
-rw-r--r--drivers/pinctrl/pinctrl-samsung.c10
-rw-r--r--drivers/pinctrl/pinctrl-samsung.h4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a73a4.c198
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c227
-rw-r--r--drivers/remoteproc/Kconfig3
-rw-r--r--drivers/remoteproc/omap_remoteproc.c2
-rw-r--r--drivers/staging/tidspbridge/Kconfig3
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/host_os.h2
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c264
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.h2
-rw-r--r--drivers/usb/musb/ux500.c61
-rw-r--r--drivers/usb/musb/ux500_dma.c59
44 files changed, 4899 insertions, 572 deletions
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index b7c232e67425..187681013bdb 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o
6obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o 6obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o
7obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o 7obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
8obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o 8obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
9obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
new file mode 100644
index 000000000000..9b1bbd52fd1f
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -0,0 +1,133 @@
1/*
2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * Author: Padmavathi Venna <padma.v@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Common Clock Framework support for Audio Subsystem Clock Controller.
10*/
11
12#include <linux/clkdev.h>
13#include <linux/io.h>
14#include <linux/clk-provider.h>
15#include <linux/of_address.h>
16#include <linux/syscore_ops.h>
17
18#include <dt-bindings/clk/exynos-audss-clk.h>
19
20static DEFINE_SPINLOCK(lock);
21static struct clk **clk_table;
22static void __iomem *reg_base;
23static struct clk_onecell_data clk_data;
24
25#define ASS_CLK_SRC 0x0
26#define ASS_CLK_DIV 0x4
27#define ASS_CLK_GATE 0x8
28
29static unsigned long reg_save[][2] = {
30 {ASS_CLK_SRC, 0},
31 {ASS_CLK_DIV, 0},
32 {ASS_CLK_GATE, 0},
33};
34
35/* list of all parent clock list */
36static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
37static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
38
39#ifdef CONFIG_PM_SLEEP
40static int exynos_audss_clk_suspend(void)
41{
42 int i;
43
44 for (i = 0; i < ARRAY_SIZE(reg_save); i++)
45 reg_save[i][1] = readl(reg_base + reg_save[i][0]);
46
47 return 0;
48}
49
50static void exynos_audss_clk_resume(void)
51{
52 int i;
53
54 for (i = 0; i < ARRAY_SIZE(reg_save); i++)
55 writel(reg_save[i][1], reg_base + reg_save[i][0]);
56}
57
58static struct syscore_ops exynos_audss_clk_syscore_ops = {
59 .suspend = exynos_audss_clk_suspend,
60 .resume = exynos_audss_clk_resume,
61};
62#endif /* CONFIG_PM_SLEEP */
63
64/* register exynos_audss clocks */
65void __init exynos_audss_clk_init(struct device_node *np)
66{
67 reg_base = of_iomap(np, 0);
68 if (!reg_base) {
69 pr_err("%s: failed to map audss registers\n", __func__);
70 return;
71 }
72
73 clk_table = kzalloc(sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS,
74 GFP_KERNEL);
75 if (!clk_table) {
76 pr_err("%s: could not allocate clk lookup table\n", __func__);
77 return;
78 }
79
80 clk_data.clks = clk_table;
81 clk_data.clk_num = EXYNOS_AUDSS_MAX_CLKS;
82 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
83
84 clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss",
85 mout_audss_p, ARRAY_SIZE(mout_audss_p), 0,
86 reg_base + ASS_CLK_SRC, 0, 1, 0, &lock);
87
88 clk_table[EXYNOS_MOUT_I2S] = clk_register_mux(NULL, "mout_i2s",
89 mout_i2s_p, ARRAY_SIZE(mout_i2s_p), 0,
90 reg_base + ASS_CLK_SRC, 2, 2, 0, &lock);
91
92 clk_table[EXYNOS_DOUT_SRP] = clk_register_divider(NULL, "dout_srp",
93 "mout_audss", 0, reg_base + ASS_CLK_DIV, 0, 4,
94 0, &lock);
95
96 clk_table[EXYNOS_DOUT_AUD_BUS] = clk_register_divider(NULL,
97 "dout_aud_bus", "dout_srp", 0,
98 reg_base + ASS_CLK_DIV, 4, 4, 0, &lock);
99
100 clk_table[EXYNOS_DOUT_I2S] = clk_register_divider(NULL, "dout_i2s",
101 "mout_i2s", 0, reg_base + ASS_CLK_DIV, 8, 4, 0,
102 &lock);
103
104 clk_table[EXYNOS_SRP_CLK] = clk_register_gate(NULL, "srp_clk",
105 "dout_srp", CLK_SET_RATE_PARENT,
106 reg_base + ASS_CLK_GATE, 0, 0, &lock);
107
108 clk_table[EXYNOS_I2S_BUS] = clk_register_gate(NULL, "i2s_bus",
109 "dout_aud_bus", CLK_SET_RATE_PARENT,
110 reg_base + ASS_CLK_GATE, 2, 0, &lock);
111
112 clk_table[EXYNOS_SCLK_I2S] = clk_register_gate(NULL, "sclk_i2s",
113 "dout_i2s", CLK_SET_RATE_PARENT,
114 reg_base + ASS_CLK_GATE, 3, 0, &lock);
115
116 clk_table[EXYNOS_PCM_BUS] = clk_register_gate(NULL, "pcm_bus",
117 "sclk_pcm", CLK_SET_RATE_PARENT,
118 reg_base + ASS_CLK_GATE, 4, 0, &lock);
119
120 clk_table[EXYNOS_SCLK_PCM] = clk_register_gate(NULL, "sclk_pcm",
121 "div_pcm0", CLK_SET_RATE_PARENT,
122 reg_base + ASS_CLK_GATE, 5, 0, &lock);
123
124#ifdef CONFIG_PM_SLEEP
125 register_syscore_ops(&exynos_audss_clk_syscore_ops);
126#endif
127
128 pr_info("Exynos: Audss: clock setup completed\n");
129}
130CLK_OF_DECLARE(exynos4210_audss_clk, "samsung,exynos4210-audss-clock",
131 exynos_audss_clk_init);
132CLK_OF_DECLARE(exynos5250_audss_clk, "samsung,exynos5250-audss-clock",
133 exynos_audss_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 22d7699e7ced..6f767c515ec7 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -87,6 +87,7 @@ enum exynos5250_clks {
87 sclk_mmc0, sclk_mmc1, sclk_mmc2, sclk_mmc3, sclk_sata, sclk_usb3, 87 sclk_mmc0, sclk_mmc1, sclk_mmc2, sclk_mmc3, sclk_sata, sclk_usb3,
88 sclk_jpeg, sclk_uart0, sclk_uart1, sclk_uart2, sclk_uart3, sclk_pwm, 88 sclk_jpeg, sclk_uart0, sclk_uart1, sclk_uart2, sclk_uart3, sclk_pwm,
89 sclk_audio1, sclk_audio2, sclk_spdif, sclk_spi0, sclk_spi1, sclk_spi2, 89 sclk_audio1, sclk_audio2, sclk_spdif, sclk_spi0, sclk_spi1, sclk_spi2,
90 div_i2s1, div_i2s2,
90 91
91 /* gate clocks */ 92 /* gate clocks */
92 gscl0 = 256, gscl1, gscl2, gscl3, gscl_wa, gscl_wb, smmu_gscl0, 93 gscl0 = 256, gscl1, gscl2, gscl3, gscl_wa, gscl_wb, smmu_gscl0,
@@ -291,8 +292,8 @@ struct samsung_div_clock exynos5250_div_clks[] __initdata = {
291 DIV(none, "div_pcm1", "sclk_audio1", DIV_PERIC4, 4, 8), 292 DIV(none, "div_pcm1", "sclk_audio1", DIV_PERIC4, 4, 8),
292 DIV(none, "div_audio2", "mout_audio2", DIV_PERIC4, 16, 4), 293 DIV(none, "div_audio2", "mout_audio2", DIV_PERIC4, 16, 4),
293 DIV(none, "div_pcm2", "sclk_audio2", DIV_PERIC4, 20, 8), 294 DIV(none, "div_pcm2", "sclk_audio2", DIV_PERIC4, 20, 8),
294 DIV(none, "div_i2s1", "sclk_audio1", DIV_PERIC5, 0, 6), 295 DIV(div_i2s1, "div_i2s1", "sclk_audio1", DIV_PERIC5, 0, 6),
295 DIV(none, "div_i2s2", "sclk_audio2", DIV_PERIC5, 8, 6), 296 DIV(div_i2s2, "div_i2s2", "sclk_audio2", DIV_PERIC5, 8, 6),
296 DIV(sclk_pixel, "div_hdmi_pixel", "sclk_vpll", DIV_DISP1_0, 28, 4), 297 DIV(sclk_pixel, "div_hdmi_pixel", "sclk_vpll", DIV_DISP1_0, 28, 4),
297 DIV_A(none, "armclk", "div_arm", DIV_CPU0, 28, 3, "armclk"), 298 DIV_A(none, "armclk", "div_arm", DIV_CPU0, 28, 3, "armclk"),
298 DIV_F(none, "div_mipi1_pre", "div_mipi1", 299 DIV_F(none, "div_mipi1_pre", "div_mipi1",
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index 54f3d119d99c..77398f8c19a0 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -10,7 +10,7 @@
10 * DBx500-PRCMU Timer 10 * DBx500-PRCMU Timer
11 * The PRCMU has 5 timers which are available in a always-on 11 * The PRCMU has 5 timers which are available in a always-on
12 * power domain. We use the Timer 4 for our always-on clock 12 * power domain. We use the Timer 4 for our always-on clock
13 * source on DB8500 and Timer 3 on DB5500. 13 * source on DB8500.
14 */ 14 */
15#include <linux/clockchips.h> 15#include <linux/clockchips.h>
16#include <linux/clksrc-dbx500-prcmu.h> 16#include <linux/clksrc-dbx500-prcmu.h>
@@ -30,15 +30,14 @@
30 30
31static void __iomem *clksrc_dbx500_timer_base; 31static void __iomem *clksrc_dbx500_timer_base;
32 32
33static cycle_t clksrc_dbx500_prcmu_read(struct clocksource *cs) 33static cycle_t notrace clksrc_dbx500_prcmu_read(struct clocksource *cs)
34{ 34{
35 void __iomem *base = clksrc_dbx500_timer_base;
35 u32 count, count2; 36 u32 count, count2;
36 37
37 do { 38 do {
38 count = readl(clksrc_dbx500_timer_base + 39 count = readl_relaxed(base + PRCMU_TIMER_DOWNCOUNT);
39 PRCMU_TIMER_DOWNCOUNT); 40 count2 = readl_relaxed(base + PRCMU_TIMER_DOWNCOUNT);
40 count2 = readl(clksrc_dbx500_timer_base +
41 PRCMU_TIMER_DOWNCOUNT);
42 } while (count2 != count); 41 } while (count2 != count);
43 42
44 /* Negate because the timer is a decrementing counter */ 43 /* Negate because the timer is a decrementing counter */
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 6e57543fe0b9..a92440896868 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -96,6 +96,56 @@ config ARM_OMAP2PLUS_CPUFREQ
96 default ARCH_OMAP2PLUS 96 default ARCH_OMAP2PLUS
97 select CPU_FREQ_TABLE 97 select CPU_FREQ_TABLE
98 98
99config ARM_S3C_CPUFREQ
100 bool
101 help
102 Internal configuration node for common cpufreq on Samsung SoC
103
104config ARM_S3C24XX_CPUFREQ
105 bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)"
106 depends on ARCH_S3C24XX
107 select ARM_S3C_CPUFREQ
108 help
109 This enables the CPUfreq driver for the Samsung S3C24XX family
110 of CPUs.
111
112 For details, take a look at <file:Documentation/cpu-freq>.
113
114 If in doubt, say N.
115
116config ARM_S3C24XX_CPUFREQ_DEBUG
117 bool "Debug CPUfreq Samsung driver core"
118 depends on ARM_S3C24XX_CPUFREQ
119 help
120 Enable s3c_freq_dbg for the Samsung S3C CPUfreq core
121
122config ARM_S3C24XX_CPUFREQ_IODEBUG
123 bool "Debug CPUfreq Samsung driver IO timing"
124 depends on ARM_S3C24XX_CPUFREQ
125 help
126 Enable s3c_freq_iodbg for the Samsung S3C CPUfreq core
127
128config ARM_S3C24XX_CPUFREQ_DEBUGFS
129 bool "Export debugfs for CPUFreq"
130 depends on ARM_S3C24XX_CPUFREQ && DEBUG_FS
131 help
132 Export status information via debugfs.
133
134config ARM_S3C2410_CPUFREQ
135 bool
136 depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2410
137 select S3C2410_CPUFREQ_UTILS
138 help
139 CPU Frequency scaling support for S3C2410
140
141config ARM_S3C2412_CPUFREQ
142 bool
143 depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2412
144 default y
145 select S3C2412_IOTIMING
146 help
147 CPU Frequency scaling support for S3C2412 and S3C2413 SoC CPUs.
148
99config ARM_S3C2416_CPUFREQ 149config ARM_S3C2416_CPUFREQ
100 bool "S3C2416 CPU Frequency scaling support" 150 bool "S3C2416 CPU Frequency scaling support"
101 depends on CPU_S3C2416 151 depends on CPU_S3C2416
@@ -118,6 +168,14 @@ config ARM_S3C2416_CPUFREQ_VCORESCALE
118 168
119 If in doubt, say N. 169 If in doubt, say N.
120 170
171config ARM_S3C2440_CPUFREQ
172 bool "S3C2440/S3C2442 CPU Frequency scaling support"
173 depends on ARM_S3C24XX_CPUFREQ && (CPU_S3C2440 || CPU_S3C2442)
174 select S3C2410_CPUFREQ_UTILS
175 default y
176 help
177 CPU Frequency scaling support for S3C2440 and S3C2442 SoC CPUs.
178
121config ARM_S3C64XX_CPUFREQ 179config ARM_S3C64XX_CPUFREQ
122 bool "Samsung S3C64XX" 180 bool "Samsung S3C64XX"
123 depends on CPU_S3C6410 181 depends on CPU_S3C6410
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 315b9231feb1..6ad0b913ca17 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -65,7 +65,12 @@ obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
65obj-$(CONFIG_PXA25x) += pxa2xx-cpufreq.o 65obj-$(CONFIG_PXA25x) += pxa2xx-cpufreq.o
66obj-$(CONFIG_PXA27x) += pxa2xx-cpufreq.o 66obj-$(CONFIG_PXA27x) += pxa2xx-cpufreq.o
67obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o 67obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
68obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
69obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
70obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
71obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
68obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o 72obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
73obj-$(CONFIG_ARM_S3C2440_CPUFREQ) += s3c2440-cpufreq.o
69obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o 74obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
70obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o 75obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
71obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o 76obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
diff --git a/drivers/cpufreq/s3c2410-cpufreq.c b/drivers/cpufreq/s3c2410-cpufreq.c
new file mode 100644
index 000000000000..cfa0dd8723ec
--- /dev/null
+++ b/drivers/cpufreq/s3c2410-cpufreq.c
@@ -0,0 +1,160 @@
1/*
2 * Copyright (c) 2006-2008 Simtec Electronics
3 * http://armlinux.simtec.co.uk/
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S3C2410 CPU Frequency scaling
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/interrupt.h>
16#include <linux/ioport.h>
17#include <linux/cpufreq.h>
18#include <linux/device.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/io.h>
22
23#include <asm/mach/arch.h>
24#include <asm/mach/map.h>
25
26#include <mach/regs-clock.h>
27
28#include <plat/cpu.h>
29#include <plat/clock.h>
30#include <plat/cpu-freq-core.h>
31
32/* Note, 2410A has an extra mode for 1:4:4 ratio, bit 2 of CLKDIV */
33
34static void s3c2410_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
35{
36 u32 clkdiv = 0;
37
38 if (cfg->divs.h_divisor == 2)
39 clkdiv |= S3C2410_CLKDIVN_HDIVN;
40
41 if (cfg->divs.p_divisor != cfg->divs.h_divisor)
42 clkdiv |= S3C2410_CLKDIVN_PDIVN;
43
44 __raw_writel(clkdiv, S3C2410_CLKDIVN);
45}
46
47static int s3c2410_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
48{
49 unsigned long hclk, fclk, pclk;
50 unsigned int hdiv, pdiv;
51 unsigned long hclk_max;
52
53 fclk = cfg->freq.fclk;
54 hclk_max = cfg->max.hclk;
55
56 cfg->freq.armclk = fclk;
57
58 s3c_freq_dbg("%s: fclk is %lu, max hclk %lu\n",
59 __func__, fclk, hclk_max);
60
61 hdiv = (fclk > cfg->max.hclk) ? 2 : 1;
62 hclk = fclk / hdiv;
63
64 if (hclk > cfg->max.hclk) {
65 s3c_freq_dbg("%s: hclk too big\n", __func__);
66 return -EINVAL;
67 }
68
69 pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
70 pclk = hclk / pdiv;
71
72 if (pclk > cfg->max.pclk) {
73 s3c_freq_dbg("%s: pclk too big\n", __func__);
74 return -EINVAL;
75 }
76
77 pdiv *= hdiv;
78
79 /* record the result */
80 cfg->divs.p_divisor = pdiv;
81 cfg->divs.h_divisor = hdiv;
82
83 return 0;
84}
85
86static struct s3c_cpufreq_info s3c2410_cpufreq_info = {
87 .max = {
88 .fclk = 200000000,
89 .hclk = 100000000,
90 .pclk = 50000000,
91 },
92
93 /* transition latency is about 5ms worst-case, so
94 * set 10ms to be sure */
95 .latency = 10000000,
96
97 .locktime_m = 150,
98 .locktime_u = 150,
99 .locktime_bits = 12,
100
101 .need_pll = 1,
102
103 .name = "s3c2410",
104 .calc_iotiming = s3c2410_iotiming_calc,
105 .set_iotiming = s3c2410_iotiming_set,
106 .get_iotiming = s3c2410_iotiming_get,
107 .resume_clocks = s3c2410_setup_clocks,
108
109 .set_fvco = s3c2410_set_fvco,
110 .set_refresh = s3c2410_cpufreq_setrefresh,
111 .set_divs = s3c2410_cpufreq_setdivs,
112 .calc_divs = s3c2410_cpufreq_calcdivs,
113
114 .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
115};
116
117static int s3c2410_cpufreq_add(struct device *dev,
118 struct subsys_interface *sif)
119{
120 return s3c_cpufreq_register(&s3c2410_cpufreq_info);
121}
122
123static struct subsys_interface s3c2410_cpufreq_interface = {
124 .name = "s3c2410_cpufreq",
125 .subsys = &s3c2410_subsys,
126 .add_dev = s3c2410_cpufreq_add,
127};
128
129static int __init s3c2410_cpufreq_init(void)
130{
131 return subsys_interface_register(&s3c2410_cpufreq_interface);
132}
133arch_initcall(s3c2410_cpufreq_init);
134
135static int s3c2410a_cpufreq_add(struct device *dev,
136 struct subsys_interface *sif)
137{
138 /* alter the maximum freq settings for S3C2410A. If a board knows
139 * it only has a maximum of 200, then it should register its own
140 * limits. */
141
142 s3c2410_cpufreq_info.max.fclk = 266000000;
143 s3c2410_cpufreq_info.max.hclk = 133000000;
144 s3c2410_cpufreq_info.max.pclk = 66500000;
145 s3c2410_cpufreq_info.name = "s3c2410a";
146
147 return s3c2410_cpufreq_add(dev, sif);
148}
149
150static struct subsys_interface s3c2410a_cpufreq_interface = {
151 .name = "s3c2410a_cpufreq",
152 .subsys = &s3c2410a_subsys,
153 .add_dev = s3c2410a_cpufreq_add,
154};
155
156static int __init s3c2410a_cpufreq_init(void)
157{
158 return subsys_interface_register(&s3c2410a_cpufreq_interface);
159}
160arch_initcall(s3c2410a_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2412-cpufreq.c b/drivers/cpufreq/s3c2412-cpufreq.c
new file mode 100644
index 000000000000..4645b4898996
--- /dev/null
+++ b/drivers/cpufreq/s3c2412-cpufreq.c
@@ -0,0 +1,257 @@
1/*
2 * Copyright 2008 Simtec Electronics
3 * http://armlinux.simtec.co.uk/
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S3C2412 CPU Frequency scalling
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/interrupt.h>
16#include <linux/ioport.h>
17#include <linux/cpufreq.h>
18#include <linux/device.h>
19#include <linux/delay.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/io.h>
23
24#include <asm/mach/arch.h>
25#include <asm/mach/map.h>
26
27#include <mach/regs-clock.h>
28#include <mach/s3c2412.h>
29
30#include <plat/cpu.h>
31#include <plat/clock.h>
32#include <plat/cpu-freq-core.h>
33
34/* our clock resources. */
35static struct clk *xtal;
36static struct clk *fclk;
37static struct clk *hclk;
38static struct clk *armclk;
39
40/* HDIV: 1, 2, 3, 4, 6, 8 */
41
42static int s3c2412_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
43{
44 unsigned int hdiv, pdiv, armdiv, dvs;
45 unsigned long hclk, fclk, armclk, armdiv_clk;
46 unsigned long hclk_max;
47
48 fclk = cfg->freq.fclk;
49 armclk = cfg->freq.armclk;
50 hclk_max = cfg->max.hclk;
51
52 /* We can't run hclk above armclk as at the best we have to
53 * have armclk and hclk in dvs mode. */
54
55 if (hclk_max > armclk)
56 hclk_max = armclk;
57
58 s3c_freq_dbg("%s: fclk=%lu, armclk=%lu, hclk_max=%lu\n",
59 __func__, fclk, armclk, hclk_max);
60 s3c_freq_dbg("%s: want f=%lu, arm=%lu, h=%lu, p=%lu\n",
61 __func__, cfg->freq.fclk, cfg->freq.armclk,
62 cfg->freq.hclk, cfg->freq.pclk);
63
64 armdiv = fclk / armclk;
65
66 if (armdiv < 1)
67 armdiv = 1;
68 if (armdiv > 2)
69 armdiv = 2;
70
71 cfg->divs.arm_divisor = armdiv;
72 armdiv_clk = fclk / armdiv;
73
74 hdiv = armdiv_clk / hclk_max;
75 if (hdiv < 1)
76 hdiv = 1;
77
78 cfg->freq.hclk = hclk = armdiv_clk / hdiv;
79
80 /* set dvs depending on whether we reached armclk or not. */
81 cfg->divs.dvs = dvs = armclk < armdiv_clk;
82
83 /* update the actual armclk we achieved. */
84 cfg->freq.armclk = dvs ? hclk : armdiv_clk;
85
86 s3c_freq_dbg("%s: armclk %lu, hclk %lu, armdiv %d, hdiv %d, dvs %d\n",
87 __func__, armclk, hclk, armdiv, hdiv, cfg->divs.dvs);
88
89 if (hdiv > 4)
90 goto invalid;
91
92 pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
93
94 if ((hclk / pdiv) > cfg->max.pclk)
95 pdiv++;
96
97 cfg->freq.pclk = hclk / pdiv;
98
99 s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv);
100
101 if (pdiv > 2)
102 goto invalid;
103
104 pdiv *= hdiv;
105
106 /* store the result, and then return */
107
108 cfg->divs.h_divisor = hdiv * armdiv;
109 cfg->divs.p_divisor = pdiv * armdiv;
110
111 return 0;
112
113invalid:
114 return -EINVAL;
115}
116
117static void s3c2412_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
118{
119 unsigned long clkdiv;
120 unsigned long olddiv;
121
122 olddiv = clkdiv = __raw_readl(S3C2410_CLKDIVN);
123
124 /* clear off current clock info */
125
126 clkdiv &= ~S3C2412_CLKDIVN_ARMDIVN;
127 clkdiv &= ~S3C2412_CLKDIVN_HDIVN_MASK;
128 clkdiv &= ~S3C2412_CLKDIVN_PDIVN;
129
130 if (cfg->divs.arm_divisor == 2)
131 clkdiv |= S3C2412_CLKDIVN_ARMDIVN;
132
133 clkdiv |= ((cfg->divs.h_divisor / cfg->divs.arm_divisor) - 1);
134
135 if (cfg->divs.p_divisor != cfg->divs.h_divisor)
136 clkdiv |= S3C2412_CLKDIVN_PDIVN;
137
138 s3c_freq_dbg("%s: div %08lx => %08lx\n", __func__, olddiv, clkdiv);
139 __raw_writel(clkdiv, S3C2410_CLKDIVN);
140
141 clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
142}
143
144static void s3c2412_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg)
145{
146 struct s3c_cpufreq_board *board = cfg->board;
147 unsigned long refresh;
148
149 s3c_freq_dbg("%s: refresh %u ns, hclk %lu\n", __func__,
150 board->refresh, cfg->freq.hclk);
151
152 /* Reduce both the refresh time (in ns) and the frequency (in MHz)
153 * by 10 each to ensure that we do not overflow 32 bit numbers. This
154 * should work for HCLK up to 133MHz and refresh period up to 30usec.
155 */
156
157 refresh = (board->refresh / 10);
158 refresh *= (cfg->freq.hclk / 100);
159 refresh /= (1 * 1000 * 1000); /* 10^6 */
160
161 s3c_freq_dbg("%s: setting refresh 0x%08lx\n", __func__, refresh);
162 __raw_writel(refresh, S3C2412_REFRESH);
163}
164
165/* set the default cpu frequency information, based on an 200MHz part
166 * as we have no other way of detecting the speed rating in software.
167 */
168
169static struct s3c_cpufreq_info s3c2412_cpufreq_info = {
170 .max = {
171 .fclk = 200000000,
172 .hclk = 100000000,
173 .pclk = 50000000,
174 },
175
176 .latency = 5000000, /* 5ms */
177
178 .locktime_m = 150,
179 .locktime_u = 150,
180 .locktime_bits = 16,
181
182 .name = "s3c2412",
183 .set_refresh = s3c2412_cpufreq_setrefresh,
184 .set_divs = s3c2412_cpufreq_setdivs,
185 .calc_divs = s3c2412_cpufreq_calcdivs,
186
187 .calc_iotiming = s3c2412_iotiming_calc,
188 .set_iotiming = s3c2412_iotiming_set,
189 .get_iotiming = s3c2412_iotiming_get,
190
191 .resume_clocks = s3c2412_setup_clocks,
192
193 .debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs),
194};
195
196static int s3c2412_cpufreq_add(struct device *dev,
197 struct subsys_interface *sif)
198{
199 unsigned long fclk_rate;
200
201 hclk = clk_get(NULL, "hclk");
202 if (IS_ERR(hclk)) {
203 printk(KERN_ERR "%s: cannot find hclk clock\n", __func__);
204 return -ENOENT;
205 }
206
207 fclk = clk_get(NULL, "fclk");
208 if (IS_ERR(fclk)) {
209 printk(KERN_ERR "%s: cannot find fclk clock\n", __func__);
210 goto err_fclk;
211 }
212
213 fclk_rate = clk_get_rate(fclk);
214 if (fclk_rate > 200000000) {
215 printk(KERN_INFO
216 "%s: fclk %ld MHz, assuming 266MHz capable part\n",
217 __func__, fclk_rate / 1000000);
218 s3c2412_cpufreq_info.max.fclk = 266000000;
219 s3c2412_cpufreq_info.max.hclk = 133000000;
220 s3c2412_cpufreq_info.max.pclk = 66000000;
221 }
222
223 armclk = clk_get(NULL, "armclk");
224 if (IS_ERR(armclk)) {
225 printk(KERN_ERR "%s: cannot find arm clock\n", __func__);
226 goto err_armclk;
227 }
228
229 xtal = clk_get(NULL, "xtal");
230 if (IS_ERR(xtal)) {
231 printk(KERN_ERR "%s: cannot find xtal clock\n", __func__);
232 goto err_xtal;
233 }
234
235 return s3c_cpufreq_register(&s3c2412_cpufreq_info);
236
237err_xtal:
238 clk_put(armclk);
239err_armclk:
240 clk_put(fclk);
241err_fclk:
242 clk_put(hclk);
243
244 return -ENOENT;
245}
246
247static struct subsys_interface s3c2412_cpufreq_interface = {
248 .name = "s3c2412_cpufreq",
249 .subsys = &s3c2412_subsys,
250 .add_dev = s3c2412_cpufreq_add,
251};
252
253static int s3c2412_cpufreq_init(void)
254{
255 return subsys_interface_register(&s3c2412_cpufreq_interface);
256}
257arch_initcall(s3c2412_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
new file mode 100644
index 000000000000..72b2cc8a5a85
--- /dev/null
+++ b/drivers/cpufreq/s3c2440-cpufreq.c
@@ -0,0 +1,312 @@
1/*
2 * Copyright (c) 2006-2009 Simtec Electronics
3 * http://armlinux.simtec.co.uk/
4 * Ben Dooks <ben@simtec.co.uk>
5 * Vincent Sanders <vince@simtec.co.uk>
6 *
7 * S3C2440/S3C2442 CPU Frequency scaling
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/ioport.h>
18#include <linux/cpufreq.h>
19#include <linux/device.h>
20#include <linux/delay.h>
21#include <linux/clk.h>
22#include <linux/err.h>
23#include <linux/io.h>
24
25#include <mach/hardware.h>
26
27#include <asm/mach/arch.h>
28#include <asm/mach/map.h>
29
30#include <mach/regs-clock.h>
31
32#include <plat/cpu.h>
33#include <plat/cpu-freq-core.h>
34#include <plat/clock.h>
35
36static struct clk *xtal;
37static struct clk *fclk;
38static struct clk *hclk;
39static struct clk *armclk;
40
41/* HDIV: 1, 2, 3, 4, 6, 8 */
42
43static inline int within_khz(unsigned long a, unsigned long b)
44{
45 long diff = a - b;
46
47 return (diff >= -1000 && diff <= 1000);
48}
49
50/**
51 * s3c2440_cpufreq_calcdivs - calculate divider settings
52 * @cfg: The cpu frequency settings.
53 *
54 * Calcualte the divider values for the given frequency settings
55 * specified in @cfg. The values are stored in @cfg for later use
56 * by the relevant set routine if the request settings can be reached.
57 */
58int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
59{
60 unsigned int hdiv, pdiv;
61 unsigned long hclk, fclk, armclk;
62 unsigned long hclk_max;
63
64 fclk = cfg->freq.fclk;
65 armclk = cfg->freq.armclk;
66 hclk_max = cfg->max.hclk;
67
68 s3c_freq_dbg("%s: fclk is %lu, armclk %lu, max hclk %lu\n",
69 __func__, fclk, armclk, hclk_max);
70
71 if (armclk > fclk) {
72 printk(KERN_WARNING "%s: armclk > fclk\n", __func__);
73 armclk = fclk;
74 }
75
76 /* if we are in DVS, we need HCLK to be <= ARMCLK */
77 if (armclk < fclk && armclk < hclk_max)
78 hclk_max = armclk;
79
80 for (hdiv = 1; hdiv < 9; hdiv++) {
81 if (hdiv == 5 || hdiv == 7)
82 hdiv++;
83
84 hclk = (fclk / hdiv);
85 if (hclk <= hclk_max || within_khz(hclk, hclk_max))
86 break;
87 }
88
89 s3c_freq_dbg("%s: hclk %lu, div %d\n", __func__, hclk, hdiv);
90
91 if (hdiv > 8)
92 goto invalid;
93
94 pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
95
96 if ((hclk / pdiv) > cfg->max.pclk)
97 pdiv++;
98
99 s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv);
100
101 if (pdiv > 2)
102 goto invalid;
103
104 pdiv *= hdiv;
105
106 /* calculate a valid armclk */
107
108 if (armclk < hclk)
109 armclk = hclk;
110
111 /* if we're running armclk lower than fclk, this really means
112 * that the system should go into dvs mode, which means that
113 * armclk is connected to hclk. */
114 if (armclk < fclk) {
115 cfg->divs.dvs = 1;
116 armclk = hclk;
117 } else
118 cfg->divs.dvs = 0;
119
120 cfg->freq.armclk = armclk;
121
122 /* store the result, and then return */
123
124 cfg->divs.h_divisor = hdiv;
125 cfg->divs.p_divisor = pdiv;
126
127 return 0;
128
129 invalid:
130 return -EINVAL;
131}
132
133#define CAMDIVN_HCLK_HALF (S3C2440_CAMDIVN_HCLK3_HALF | \
134 S3C2440_CAMDIVN_HCLK4_HALF)
135
136/**
137 * s3c2440_cpufreq_setdivs - set the cpu frequency divider settings
138 * @cfg: The cpu frequency settings.
139 *
140 * Set the divisors from the settings in @cfg, which where generated
141 * during the calculation phase by s3c2440_cpufreq_calcdivs().
142 */
143static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
144{
145 unsigned long clkdiv, camdiv;
146
147 s3c_freq_dbg("%s: divsiors: h=%d, p=%d\n", __func__,
148 cfg->divs.h_divisor, cfg->divs.p_divisor);
149
150 clkdiv = __raw_readl(S3C2410_CLKDIVN);
151 camdiv = __raw_readl(S3C2440_CAMDIVN);
152
153 clkdiv &= ~(S3C2440_CLKDIVN_HDIVN_MASK | S3C2440_CLKDIVN_PDIVN);
154 camdiv &= ~CAMDIVN_HCLK_HALF;
155
156 switch (cfg->divs.h_divisor) {
157 case 1:
158 clkdiv |= S3C2440_CLKDIVN_HDIVN_1;
159 break;
160
161 case 2:
162 clkdiv |= S3C2440_CLKDIVN_HDIVN_2;
163 break;
164
165 case 6:
166 camdiv |= S3C2440_CAMDIVN_HCLK3_HALF;
167 case 3:
168 clkdiv |= S3C2440_CLKDIVN_HDIVN_3_6;
169 break;
170
171 case 8:
172 camdiv |= S3C2440_CAMDIVN_HCLK4_HALF;
173 case 4:
174 clkdiv |= S3C2440_CLKDIVN_HDIVN_4_8;
175 break;
176
177 default:
178 BUG(); /* we don't expect to get here. */
179 }
180
181 if (cfg->divs.p_divisor != cfg->divs.h_divisor)
182 clkdiv |= S3C2440_CLKDIVN_PDIVN;
183
184 /* todo - set pclk. */
185
186 /* Write the divisors first with hclk intentionally halved so that
187 * when we write clkdiv we will under-frequency instead of over. We
188 * then make a short delay and remove the hclk halving if necessary.
189 */
190
191 __raw_writel(camdiv | CAMDIVN_HCLK_HALF, S3C2440_CAMDIVN);
192 __raw_writel(clkdiv, S3C2410_CLKDIVN);
193
194 ndelay(20);
195 __raw_writel(camdiv, S3C2440_CAMDIVN);
196
197 clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
198}
199
200static int run_freq_for(unsigned long max_hclk, unsigned long fclk,
201 int *divs,
202 struct cpufreq_frequency_table *table,
203 size_t table_size)
204{
205 unsigned long freq;
206 int index = 0;
207 int div;
208
209 for (div = *divs; div > 0; div = *divs++) {
210 freq = fclk / div;
211
212 if (freq > max_hclk && div != 1)
213 continue;
214
215 freq /= 1000; /* table is in kHz */
216 index = s3c_cpufreq_addfreq(table, index, table_size, freq);
217 if (index < 0)
218 break;
219 }
220
221 return index;
222}
223
224static int hclk_divs[] = { 1, 2, 3, 4, 6, 8, -1 };
225
226static int s3c2440_cpufreq_calctable(struct s3c_cpufreq_config *cfg,
227 struct cpufreq_frequency_table *table,
228 size_t table_size)
229{
230 int ret;
231
232 WARN_ON(cfg->info == NULL);
233 WARN_ON(cfg->board == NULL);
234
235 ret = run_freq_for(cfg->info->max.hclk,
236 cfg->info->max.fclk,
237 hclk_divs,
238 table, table_size);
239
240 s3c_freq_dbg("%s: returning %d\n", __func__, ret);
241
242 return ret;
243}
244
245struct s3c_cpufreq_info s3c2440_cpufreq_info = {
246 .max = {
247 .fclk = 400000000,
248 .hclk = 133333333,
249 .pclk = 66666666,
250 },
251
252 .locktime_m = 300,
253 .locktime_u = 300,
254 .locktime_bits = 16,
255
256 .name = "s3c244x",
257 .calc_iotiming = s3c2410_iotiming_calc,
258 .set_iotiming = s3c2410_iotiming_set,
259 .get_iotiming = s3c2410_iotiming_get,
260 .set_fvco = s3c2410_set_fvco,
261
262 .set_refresh = s3c2410_cpufreq_setrefresh,
263 .set_divs = s3c2440_cpufreq_setdivs,
264 .calc_divs = s3c2440_cpufreq_calcdivs,
265 .calc_freqtable = s3c2440_cpufreq_calctable,
266
267 .resume_clocks = s3c244x_setup_clocks,
268
269 .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
270};
271
272static int s3c2440_cpufreq_add(struct device *dev,
273 struct subsys_interface *sif)
274{
275 xtal = s3c_cpufreq_clk_get(NULL, "xtal");
276 hclk = s3c_cpufreq_clk_get(NULL, "hclk");
277 fclk = s3c_cpufreq_clk_get(NULL, "fclk");
278 armclk = s3c_cpufreq_clk_get(NULL, "armclk");
279
280 if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) {
281 printk(KERN_ERR "%s: failed to get clocks\n", __func__);
282 return -ENOENT;
283 }
284
285 return s3c_cpufreq_register(&s3c2440_cpufreq_info);
286}
287
288static struct subsys_interface s3c2440_cpufreq_interface = {
289 .name = "s3c2440_cpufreq",
290 .subsys = &s3c2440_subsys,
291 .add_dev = s3c2440_cpufreq_add,
292};
293
294static int s3c2440_cpufreq_init(void)
295{
296 return subsys_interface_register(&s3c2440_cpufreq_interface);
297}
298
299/* arch_initcall adds the clocks we need, so use subsys_initcall. */
300subsys_initcall(s3c2440_cpufreq_init);
301
302static struct subsys_interface s3c2442_cpufreq_interface = {
303 .name = "s3c2442_cpufreq",
304 .subsys = &s3c2442_subsys,
305 .add_dev = s3c2440_cpufreq_add,
306};
307
308static int s3c2442_cpufreq_init(void)
309{
310 return subsys_interface_register(&s3c2442_cpufreq_interface);
311}
312subsys_initcall(s3c2442_cpufreq_init);
diff --git a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
new file mode 100644
index 000000000000..9b7b4289d66c
--- /dev/null
+++ b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
@@ -0,0 +1,198 @@
1/*
2 * Copyright (c) 2009 Simtec Electronics
3 * http://armlinux.simtec.co.uk/
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S3C24XX CPU Frequency scaling - debugfs status support
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <linux/init.h>
14#include <linux/export.h>
15#include <linux/interrupt.h>
16#include <linux/ioport.h>
17#include <linux/cpufreq.h>
18#include <linux/debugfs.h>
19#include <linux/seq_file.h>
20#include <linux/err.h>
21
22#include <plat/cpu-freq-core.h>
23
24static struct dentry *dbgfs_root;
25static struct dentry *dbgfs_file_io;
26static struct dentry *dbgfs_file_info;
27static struct dentry *dbgfs_file_board;
28
29#define print_ns(x) ((x) / 10), ((x) % 10)
30
31static void show_max(struct seq_file *seq, struct s3c_freq *f)
32{
33 seq_printf(seq, "MAX: F=%lu, H=%lu, P=%lu, A=%lu\n",
34 f->fclk, f->hclk, f->pclk, f->armclk);
35}
36
37static int board_show(struct seq_file *seq, void *p)
38{
39 struct s3c_cpufreq_config *cfg;
40 struct s3c_cpufreq_board *brd;
41
42 cfg = s3c_cpufreq_getconfig();
43 if (!cfg) {
44 seq_printf(seq, "no configuration registered\n");
45 return 0;
46 }
47
48 brd = cfg->board;
49 if (!brd) {
50 seq_printf(seq, "no board definition set?\n");
51 return 0;
52 }
53
54 seq_printf(seq, "SDRAM refresh %u ns\n", brd->refresh);
55 seq_printf(seq, "auto_io=%u\n", brd->auto_io);
56 seq_printf(seq, "need_io=%u\n", brd->need_io);
57
58 show_max(seq, &brd->max);
59
60
61 return 0;
62}
63
64static int fops_board_open(struct inode *inode, struct file *file)
65{
66 return single_open(file, board_show, NULL);
67}
68
69static const struct file_operations fops_board = {
70 .open = fops_board_open,
71 .read = seq_read,
72 .llseek = seq_lseek,
73 .release = single_release,
74 .owner = THIS_MODULE,
75};
76
77static int info_show(struct seq_file *seq, void *p)
78{
79 struct s3c_cpufreq_config *cfg;
80
81 cfg = s3c_cpufreq_getconfig();
82 if (!cfg) {
83 seq_printf(seq, "no configuration registered\n");
84 return 0;
85 }
86
87 seq_printf(seq, " FCLK %ld Hz\n", cfg->freq.fclk);
88 seq_printf(seq, " HCLK %ld Hz (%lu.%lu ns)\n",
89 cfg->freq.hclk, print_ns(cfg->freq.hclk_tns));
90 seq_printf(seq, " PCLK %ld Hz\n", cfg->freq.hclk);
91 seq_printf(seq, "ARMCLK %ld Hz\n", cfg->freq.armclk);
92 seq_printf(seq, "\n");
93
94 show_max(seq, &cfg->max);
95
96 seq_printf(seq, "Divisors: P=%d, H=%d, A=%d, dvs=%s\n",
97 cfg->divs.h_divisor, cfg->divs.p_divisor,
98 cfg->divs.arm_divisor, cfg->divs.dvs ? "on" : "off");
99 seq_printf(seq, "\n");
100
101 seq_printf(seq, "lock_pll=%u\n", cfg->lock_pll);
102
103 return 0;
104}
105
106static int fops_info_open(struct inode *inode, struct file *file)
107{
108 return single_open(file, info_show, NULL);
109}
110
111static const struct file_operations fops_info = {
112 .open = fops_info_open,
113 .read = seq_read,
114 .llseek = seq_lseek,
115 .release = single_release,
116 .owner = THIS_MODULE,
117};
118
119static int io_show(struct seq_file *seq, void *p)
120{
121 void (*show_bank)(struct seq_file *, struct s3c_cpufreq_config *, union s3c_iobank *);
122 struct s3c_cpufreq_config *cfg;
123 struct s3c_iotimings *iot;
124 union s3c_iobank *iob;
125 int bank;
126
127 cfg = s3c_cpufreq_getconfig();
128 if (!cfg) {
129 seq_printf(seq, "no configuration registered\n");
130 return 0;
131 }
132
133 show_bank = cfg->info->debug_io_show;
134 if (!show_bank) {
135 seq_printf(seq, "no code to show bank timing\n");
136 return 0;
137 }
138
139 iot = s3c_cpufreq_getiotimings();
140 if (!iot) {
141 seq_printf(seq, "no io timings registered\n");
142 return 0;
143 }
144
145 seq_printf(seq, "hclk period is %lu.%lu ns\n", print_ns(cfg->freq.hclk_tns));
146
147 for (bank = 0; bank < MAX_BANKS; bank++) {
148 iob = &iot->bank[bank];
149
150 seq_printf(seq, "bank %d: ", bank);
151
152 if (!iob->io_2410) {
153 seq_printf(seq, "nothing set\n");
154 continue;
155 }
156
157 show_bank(seq, cfg, iob);
158 }
159
160 return 0;
161}
162
163static int fops_io_open(struct inode *inode, struct file *file)
164{
165 return single_open(file, io_show, NULL);
166}
167
168static const struct file_operations fops_io = {
169 .open = fops_io_open,
170 .read = seq_read,
171 .llseek = seq_lseek,
172 .release = single_release,
173 .owner = THIS_MODULE,
174};
175
176
177static int __init s3c_freq_debugfs_init(void)
178{
179 dbgfs_root = debugfs_create_dir("s3c-cpufreq", NULL);
180 if (IS_ERR(dbgfs_root)) {
181 printk(KERN_ERR "%s: error creating debugfs root\n", __func__);
182 return PTR_ERR(dbgfs_root);
183 }
184
185 dbgfs_file_io = debugfs_create_file("io-timing", S_IRUGO, dbgfs_root,
186 NULL, &fops_io);
187
188 dbgfs_file_info = debugfs_create_file("info", S_IRUGO, dbgfs_root,
189 NULL, &fops_info);
190
191 dbgfs_file_board = debugfs_create_file("board", S_IRUGO, dbgfs_root,
192 NULL, &fops_board);
193
194 return 0;
195}
196
197late_initcall(s3c_freq_debugfs_init);
198
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
new file mode 100644
index 000000000000..3c0e78ede0da
--- /dev/null
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -0,0 +1,711 @@
1/*
2 * Copyright (c) 2006-2008 Simtec Electronics
3 * http://armlinux.simtec.co.uk/
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S3C24XX CPU Frequency scaling
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/interrupt.h>
16#include <linux/ioport.h>
17#include <linux/cpufreq.h>
18#include <linux/cpu.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/io.h>
22#include <linux/device.h>
23#include <linux/sysfs.h>
24#include <linux/slab.h>
25
26#include <asm/mach/arch.h>
27#include <asm/mach/map.h>
28
29#include <plat/cpu.h>
30#include <plat/clock.h>
31#include <plat/cpu-freq-core.h>
32
33#include <mach/regs-clock.h>
34
35/* note, cpufreq support deals in kHz, no Hz */
36
37static struct cpufreq_driver s3c24xx_driver;
38static struct s3c_cpufreq_config cpu_cur;
39static struct s3c_iotimings s3c24xx_iotiming;
40static struct cpufreq_frequency_table *pll_reg;
41static unsigned int last_target = ~0;
42static unsigned int ftab_size;
43static struct cpufreq_frequency_table *ftab;
44
45static struct clk *_clk_mpll;
46static struct clk *_clk_xtal;
47static struct clk *clk_fclk;
48static struct clk *clk_hclk;
49static struct clk *clk_pclk;
50static struct clk *clk_arm;
51
52#ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUGFS
53struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void)
54{
55 return &cpu_cur;
56}
57
58struct s3c_iotimings *s3c_cpufreq_getiotimings(void)
59{
60 return &s3c24xx_iotiming;
61}
62#endif /* CONFIG_CPU_FREQ_S3C24XX_DEBUGFS */
63
64static void s3c_cpufreq_getcur(struct s3c_cpufreq_config *cfg)
65{
66 unsigned long fclk, pclk, hclk, armclk;
67
68 cfg->freq.fclk = fclk = clk_get_rate(clk_fclk);
69 cfg->freq.hclk = hclk = clk_get_rate(clk_hclk);
70 cfg->freq.pclk = pclk = clk_get_rate(clk_pclk);
71 cfg->freq.armclk = armclk = clk_get_rate(clk_arm);
72
73 cfg->pll.index = __raw_readl(S3C2410_MPLLCON);
74 cfg->pll.frequency = fclk;
75
76 cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
77
78 cfg->divs.h_divisor = fclk / hclk;
79 cfg->divs.p_divisor = fclk / pclk;
80}
81
82static inline void s3c_cpufreq_calc(struct s3c_cpufreq_config *cfg)
83{
84 unsigned long pll = cfg->pll.frequency;
85
86 cfg->freq.fclk = pll;
87 cfg->freq.hclk = pll / cfg->divs.h_divisor;
88 cfg->freq.pclk = pll / cfg->divs.p_divisor;
89
90 /* convert hclk into 10ths of nanoseconds for io calcs */
91 cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
92}
93
94static inline int closer(unsigned int target, unsigned int n, unsigned int c)
95{
96 int diff_cur = abs(target - c);
97 int diff_new = abs(target - n);
98
99 return (diff_new < diff_cur);
100}
101
102static void s3c_cpufreq_show(const char *pfx,
103 struct s3c_cpufreq_config *cfg)
104{
105 s3c_freq_dbg("%s: Fvco=%u, F=%lu, A=%lu, H=%lu (%u), P=%lu (%u)\n",
106 pfx, cfg->pll.frequency, cfg->freq.fclk, cfg->freq.armclk,
107 cfg->freq.hclk, cfg->divs.h_divisor,
108 cfg->freq.pclk, cfg->divs.p_divisor);
109}
110
111/* functions to wrapper the driver info calls to do the cpu specific work */
112
113static void s3c_cpufreq_setio(struct s3c_cpufreq_config *cfg)
114{
115 if (cfg->info->set_iotiming)
116 (cfg->info->set_iotiming)(cfg, &s3c24xx_iotiming);
117}
118
119static int s3c_cpufreq_calcio(struct s3c_cpufreq_config *cfg)
120{
121 if (cfg->info->calc_iotiming)
122 return (cfg->info->calc_iotiming)(cfg, &s3c24xx_iotiming);
123
124 return 0;
125}
126
127static void s3c_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg)
128{
129 (cfg->info->set_refresh)(cfg);
130}
131
132static void s3c_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
133{
134 (cfg->info->set_divs)(cfg);
135}
136
137static int s3c_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
138{
139 return (cfg->info->calc_divs)(cfg);
140}
141
142static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
143{
144 (cfg->info->set_fvco)(cfg);
145}
146
147static inline void s3c_cpufreq_resume_clocks(void)
148{
149 cpu_cur.info->resume_clocks();
150}
151
152static inline void s3c_cpufreq_updateclk(struct clk *clk,
153 unsigned int freq)
154{
155 clk_set_rate(clk, freq);
156}
157
158static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
159 unsigned int target_freq,
160 struct cpufreq_frequency_table *pll)
161{
162 struct s3c_cpufreq_freqs freqs;
163 struct s3c_cpufreq_config cpu_new;
164 unsigned long flags;
165
166 cpu_new = cpu_cur; /* copy new from current */
167
168 s3c_cpufreq_show("cur", &cpu_cur);
169
170 /* TODO - check for DMA currently outstanding */
171
172 cpu_new.pll = pll ? *pll : cpu_cur.pll;
173
174 if (pll)
175 freqs.pll_changing = 1;
176
177 /* update our frequencies */
178
179 cpu_new.freq.armclk = target_freq;
180 cpu_new.freq.fclk = cpu_new.pll.frequency;
181
182 if (s3c_cpufreq_calcdivs(&cpu_new) < 0) {
183 printk(KERN_ERR "no divisors for %d\n", target_freq);
184 goto err_notpossible;
185 }
186
187 s3c_freq_dbg("%s: got divs\n", __func__);
188
189 s3c_cpufreq_calc(&cpu_new);
190
191 s3c_freq_dbg("%s: calculated frequencies for new\n", __func__);
192
193 if (cpu_new.freq.hclk != cpu_cur.freq.hclk) {
194 if (s3c_cpufreq_calcio(&cpu_new) < 0) {
195 printk(KERN_ERR "%s: no IO timings\n", __func__);
196 goto err_notpossible;
197 }
198 }
199
200 s3c_cpufreq_show("new", &cpu_new);
201
202 /* setup our cpufreq parameters */
203
204 freqs.old = cpu_cur.freq;
205 freqs.new = cpu_new.freq;
206
207 freqs.freqs.old = cpu_cur.freq.armclk / 1000;
208 freqs.freqs.new = cpu_new.freq.armclk / 1000;
209
210 /* update f/h/p clock settings before we issue the change
211 * notification, so that drivers do not need to do anything
212 * special if they want to recalculate on CPUFREQ_PRECHANGE. */
213
214 s3c_cpufreq_updateclk(_clk_mpll, cpu_new.pll.frequency);
215 s3c_cpufreq_updateclk(clk_fclk, cpu_new.freq.fclk);
216 s3c_cpufreq_updateclk(clk_hclk, cpu_new.freq.hclk);
217 s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk);
218
219 /* start the frequency change */
220 cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_PRECHANGE);
221
222 /* If hclk is staying the same, then we do not need to
223 * re-write the IO or the refresh timings whilst we are changing
224 * speed. */
225
226 local_irq_save(flags);
227
228 /* is our memory clock slowing down? */
229 if (cpu_new.freq.hclk < cpu_cur.freq.hclk) {
230 s3c_cpufreq_setrefresh(&cpu_new);
231 s3c_cpufreq_setio(&cpu_new);
232 }
233
234 if (cpu_new.freq.fclk == cpu_cur.freq.fclk) {
235 /* not changing PLL, just set the divisors */
236
237 s3c_cpufreq_setdivs(&cpu_new);
238 } else {
239 if (cpu_new.freq.fclk < cpu_cur.freq.fclk) {
240 /* slow the cpu down, then set divisors */
241
242 s3c_cpufreq_setfvco(&cpu_new);
243 s3c_cpufreq_setdivs(&cpu_new);
244 } else {
245 /* set the divisors, then speed up */
246
247 s3c_cpufreq_setdivs(&cpu_new);
248 s3c_cpufreq_setfvco(&cpu_new);
249 }
250 }
251
252 /* did our memory clock speed up */
253 if (cpu_new.freq.hclk > cpu_cur.freq.hclk) {
254 s3c_cpufreq_setrefresh(&cpu_new);
255 s3c_cpufreq_setio(&cpu_new);
256 }
257
258 /* update our current settings */
259 cpu_cur = cpu_new;
260
261 local_irq_restore(flags);
262
263 /* notify everyone we've done this */
264 cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_POSTCHANGE);
265
266 s3c_freq_dbg("%s: finished\n", __func__);
267 return 0;
268
269 err_notpossible:
270 printk(KERN_ERR "no compatible settings for %d\n", target_freq);
271 return -EINVAL;
272}
273
274/* s3c_cpufreq_target
275 *
276 * called by the cpufreq core to adjust the frequency that the CPU
277 * is currently running at.
278 */
279
280static int s3c_cpufreq_target(struct cpufreq_policy *policy,
281 unsigned int target_freq,
282 unsigned int relation)
283{
284 struct cpufreq_frequency_table *pll;
285 unsigned int index;
286
287 /* avoid repeated calls which cause a needless amout of duplicated
288 * logging output (and CPU time as the calculation process is
289 * done) */
290 if (target_freq == last_target)
291 return 0;
292
293 last_target = target_freq;
294
295 s3c_freq_dbg("%s: policy %p, target %u, relation %u\n",
296 __func__, policy, target_freq, relation);
297
298 if (ftab) {
299 if (cpufreq_frequency_table_target(policy, ftab,
300 target_freq, relation,
301 &index)) {
302 s3c_freq_dbg("%s: table failed\n", __func__);
303 return -EINVAL;
304 }
305
306 s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__,
307 target_freq, index, ftab[index].frequency);
308 target_freq = ftab[index].frequency;
309 }
310
311 target_freq *= 1000; /* convert target to Hz */
312
313 /* find the settings for our new frequency */
314
315 if (!pll_reg || cpu_cur.lock_pll) {
316 /* either we've not got any PLL values, or we've locked
317 * to the current one. */
318 pll = NULL;
319 } else {
320 struct cpufreq_policy tmp_policy;
321 int ret;
322
323 /* we keep the cpu pll table in Hz, to ensure we get an
324 * accurate value for the PLL output. */
325
326 tmp_policy.min = policy->min * 1000;
327 tmp_policy.max = policy->max * 1000;
328 tmp_policy.cpu = policy->cpu;
329
330 /* cpufreq_frequency_table_target uses a pointer to 'index'
331 * which is the number of the table entry, not the value of
332 * the table entry's index field. */
333
334 ret = cpufreq_frequency_table_target(&tmp_policy, pll_reg,
335 target_freq, relation,
336 &index);
337
338 if (ret < 0) {
339 printk(KERN_ERR "%s: no PLL available\n", __func__);
340 goto err_notpossible;
341 }
342
343 pll = pll_reg + index;
344
345 s3c_freq_dbg("%s: target %u => %u\n",
346 __func__, target_freq, pll->frequency);
347
348 target_freq = pll->frequency;
349 }
350
351 return s3c_cpufreq_settarget(policy, target_freq, pll);
352
353 err_notpossible:
354 printk(KERN_ERR "no compatible settings for %d\n", target_freq);
355 return -EINVAL;
356}
357
358static unsigned int s3c_cpufreq_get(unsigned int cpu)
359{
360 return clk_get_rate(clk_arm) / 1000;
361}
362
363struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
364{
365 struct clk *clk;
366
367 clk = clk_get(dev, name);
368 if (IS_ERR(clk))
369 printk(KERN_ERR "cpufreq: failed to get clock '%s'\n", name);
370
371 return clk;
372}
373
374static int s3c_cpufreq_init(struct cpufreq_policy *policy)
375{
376 printk(KERN_INFO "%s: initialising policy %p\n", __func__, policy);
377
378 if (policy->cpu != 0)
379 return -EINVAL;
380
381 policy->cur = s3c_cpufreq_get(0);
382 policy->min = policy->cpuinfo.min_freq = 0;
383 policy->max = policy->cpuinfo.max_freq = cpu_cur.info->max.fclk / 1000;
384 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
385
386 /* feed the latency information from the cpu driver */
387 policy->cpuinfo.transition_latency = cpu_cur.info->latency;
388
389 if (ftab)
390 cpufreq_frequency_table_cpuinfo(policy, ftab);
391
392 return 0;
393}
394
395static __init int s3c_cpufreq_initclks(void)
396{
397 _clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll");
398 _clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal");
399 clk_fclk = s3c_cpufreq_clk_get(NULL, "fclk");
400 clk_hclk = s3c_cpufreq_clk_get(NULL, "hclk");
401 clk_pclk = s3c_cpufreq_clk_get(NULL, "pclk");
402 clk_arm = s3c_cpufreq_clk_get(NULL, "armclk");
403
404 if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) ||
405 IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) {
406 printk(KERN_ERR "%s: could not get clock(s)\n", __func__);
407 return -ENOENT;
408 }
409
410 printk(KERN_INFO "%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n", __func__,
411 clk_get_rate(clk_fclk) / 1000,
412 clk_get_rate(clk_hclk) / 1000,
413 clk_get_rate(clk_pclk) / 1000,
414 clk_get_rate(clk_arm) / 1000);
415
416 return 0;
417}
418
419static int s3c_cpufreq_verify(struct cpufreq_policy *policy)
420{
421 if (policy->cpu != 0)
422 return -EINVAL;
423
424 return 0;
425}
426
427#ifdef CONFIG_PM
428static struct cpufreq_frequency_table suspend_pll;
429static unsigned int suspend_freq;
430
431static int s3c_cpufreq_suspend(struct cpufreq_policy *policy)
432{
433 suspend_pll.frequency = clk_get_rate(_clk_mpll);
434 suspend_pll.index = __raw_readl(S3C2410_MPLLCON);
435 suspend_freq = s3c_cpufreq_get(0) * 1000;
436
437 return 0;
438}
439
440static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
441{
442 int ret;
443
444 s3c_freq_dbg("%s: resuming with policy %p\n", __func__, policy);
445
446 last_target = ~0; /* invalidate last_target setting */
447
448 /* first, find out what speed we resumed at. */
449 s3c_cpufreq_resume_clocks();
450
451 /* whilst we will be called later on, we try and re-set the
452 * cpu frequencies as soon as possible so that we do not end
453 * up resuming devices and then immediately having to re-set
454 * a number of settings once these devices have restarted.
455 *
456 * as a note, it is expected devices are not used until they
457 * have been un-suspended and at that time they should have
458 * used the updated clock settings.
459 */
460
461 ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll);
462 if (ret) {
463 printk(KERN_ERR "%s: failed to reset pll/freq\n", __func__);
464 return ret;
465 }
466
467 return 0;
468}
469#else
470#define s3c_cpufreq_resume NULL
471#define s3c_cpufreq_suspend NULL
472#endif
473
474static struct cpufreq_driver s3c24xx_driver = {
475 .flags = CPUFREQ_STICKY,
476 .verify = s3c_cpufreq_verify,
477 .target = s3c_cpufreq_target,
478 .get = s3c_cpufreq_get,
479 .init = s3c_cpufreq_init,
480 .suspend = s3c_cpufreq_suspend,
481 .resume = s3c_cpufreq_resume,
482 .name = "s3c24xx",
483};
484
485
486int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info)
487{
488 if (!info || !info->name) {
489 printk(KERN_ERR "%s: failed to pass valid information\n",
490 __func__);
491 return -EINVAL;
492 }
493
494 printk(KERN_INFO "S3C24XX CPU Frequency driver, %s cpu support\n",
495 info->name);
496
497 /* check our driver info has valid data */
498
499 BUG_ON(info->set_refresh == NULL);
500 BUG_ON(info->set_divs == NULL);
501 BUG_ON(info->calc_divs == NULL);
502
503 /* info->set_fvco is optional, depending on whether there
504 * is a need to set the clock code. */
505
506 cpu_cur.info = info;
507
508 /* Note, driver registering should probably update locktime */
509
510 return 0;
511}
512
513int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
514{
515 struct s3c_cpufreq_board *ours;
516
517 if (!board) {
518 printk(KERN_INFO "%s: no board data\n", __func__);
519 return -EINVAL;
520 }
521
522 /* Copy the board information so that each board can make this
523 * initdata. */
524
525 ours = kzalloc(sizeof(struct s3c_cpufreq_board), GFP_KERNEL);
526 if (ours == NULL) {
527 printk(KERN_ERR "%s: no memory\n", __func__);
528 return -ENOMEM;
529 }
530
531 *ours = *board;
532 cpu_cur.board = ours;
533
534 return 0;
535}
536
537int __init s3c_cpufreq_auto_io(void)
538{
539 int ret;
540
541 if (!cpu_cur.info->get_iotiming) {
542 printk(KERN_ERR "%s: get_iotiming undefined\n", __func__);
543 return -ENOENT;
544 }
545
546 printk(KERN_INFO "%s: working out IO settings\n", __func__);
547
548 ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming);
549 if (ret)
550 printk(KERN_ERR "%s: failed to get timings\n", __func__);
551
552 return ret;
553}
554
555/* if one or is zero, then return the other, otherwise return the min */
556#define do_min(_a, _b) ((_a) == 0 ? (_b) : (_b) == 0 ? (_a) : min(_a, _b))
557
558/**
559 * s3c_cpufreq_freq_min - find the minimum settings for the given freq.
560 * @dst: The destination structure
561 * @a: One argument.
562 * @b: The other argument.
563 *
564 * Create a minimum of each frequency entry in the 'struct s3c_freq',
565 * unless the entry is zero when it is ignored and the non-zero argument
566 * used.
567 */
568static void s3c_cpufreq_freq_min(struct s3c_freq *dst,
569 struct s3c_freq *a, struct s3c_freq *b)
570{
571 dst->fclk = do_min(a->fclk, b->fclk);
572 dst->hclk = do_min(a->hclk, b->hclk);
573 dst->pclk = do_min(a->pclk, b->pclk);
574 dst->armclk = do_min(a->armclk, b->armclk);
575}
576
577static inline u32 calc_locktime(u32 freq, u32 time_us)
578{
579 u32 result;
580
581 result = freq * time_us;
582 result = DIV_ROUND_UP(result, 1000 * 1000);
583
584 return result;
585}
586
587static void s3c_cpufreq_update_loctkime(void)
588{
589 unsigned int bits = cpu_cur.info->locktime_bits;
590 u32 rate = (u32)clk_get_rate(_clk_xtal);
591 u32 val;
592
593 if (bits == 0) {
594 WARN_ON(1);
595 return;
596 }
597
598 val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits;
599 val |= calc_locktime(rate, cpu_cur.info->locktime_m);
600
601 printk(KERN_INFO "%s: new locktime is 0x%08x\n", __func__, val);
602 __raw_writel(val, S3C2410_LOCKTIME);
603}
604
605static int s3c_cpufreq_build_freq(void)
606{
607 int size, ret;
608
609 if (!cpu_cur.info->calc_freqtable)
610 return -EINVAL;
611
612 kfree(ftab);
613 ftab = NULL;
614
615 size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
616 size++;
617
618 ftab = kmalloc(sizeof(struct cpufreq_frequency_table) * size, GFP_KERNEL);
619 if (!ftab) {
620 printk(KERN_ERR "%s: no memory for tables\n", __func__);
621 return -ENOMEM;
622 }
623
624 ftab_size = size;
625
626 ret = cpu_cur.info->calc_freqtable(&cpu_cur, ftab, size);
627 s3c_cpufreq_addfreq(ftab, ret, size, CPUFREQ_TABLE_END);
628
629 return 0;
630}
631
632static int __init s3c_cpufreq_initcall(void)
633{
634 int ret = 0;
635
636 if (cpu_cur.info && cpu_cur.board) {
637 ret = s3c_cpufreq_initclks();
638 if (ret)
639 goto out;
640
641 /* get current settings */
642 s3c_cpufreq_getcur(&cpu_cur);
643 s3c_cpufreq_show("cur", &cpu_cur);
644
645 if (cpu_cur.board->auto_io) {
646 ret = s3c_cpufreq_auto_io();
647 if (ret) {
648 printk(KERN_ERR "%s: failed to get io timing\n",
649 __func__);
650 goto out;
651 }
652 }
653
654 if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) {
655 printk(KERN_ERR "%s: no IO support registered\n",
656 __func__);
657 ret = -EINVAL;
658 goto out;
659 }
660
661 if (!cpu_cur.info->need_pll)
662 cpu_cur.lock_pll = 1;
663
664 s3c_cpufreq_update_loctkime();
665
666 s3c_cpufreq_freq_min(&cpu_cur.max, &cpu_cur.board->max,
667 &cpu_cur.info->max);
668
669 if (cpu_cur.info->calc_freqtable)
670 s3c_cpufreq_build_freq();
671
672 ret = cpufreq_register_driver(&s3c24xx_driver);
673 }
674
675 out:
676 return ret;
677}
678
679late_initcall(s3c_cpufreq_initcall);
680
681/**
682 * s3c_plltab_register - register CPU PLL table.
683 * @plls: The list of PLL entries.
684 * @plls_no: The size of the PLL entries @plls.
685 *
686 * Register the given set of PLLs with the system.
687 */
688int __init s3c_plltab_register(struct cpufreq_frequency_table *plls,
689 unsigned int plls_no)
690{
691 struct cpufreq_frequency_table *vals;
692 unsigned int size;
693
694 size = sizeof(struct cpufreq_frequency_table) * (plls_no + 1);
695
696 vals = kmalloc(size, GFP_KERNEL);
697 if (vals) {
698 memcpy(vals, plls, size);
699 pll_reg = vals;
700
701 /* write a terminating entry, we don't store it in the
702 * table that is stored in the kernel */
703 vals += plls_no;
704 vals->frequency = CPUFREQ_TABLE_END;
705
706 printk(KERN_INFO "cpufreq: %d PLL entries\n", plls_no);
707 } else
708 printk(KERN_ERR "cpufreq: no memory for PLL tables\n");
709
710 return vals ? 0 : -ENOMEM;
711}
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
index 3eafa903ebcd..43a0c8a26ab0 100644
--- a/drivers/crypto/ux500/cryp/cryp.c
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -291,7 +291,7 @@ void cryp_save_device_context(struct cryp_device_data *device_data,
291 int cryp_mode) 291 int cryp_mode)
292{ 292{
293 enum cryp_algo_mode algomode; 293 enum cryp_algo_mode algomode;
294 struct cryp_register *src_reg = device_data->base; 294 struct cryp_register __iomem *src_reg = device_data->base;
295 struct cryp_config *config = 295 struct cryp_config *config =
296 (struct cryp_config *)device_data->current_ctx; 296 (struct cryp_config *)device_data->current_ctx;
297 297
@@ -349,7 +349,7 @@ void cryp_save_device_context(struct cryp_device_data *device_data,
349void cryp_restore_device_context(struct cryp_device_data *device_data, 349void cryp_restore_device_context(struct cryp_device_data *device_data,
350 struct cryp_device_context *ctx) 350 struct cryp_device_context *ctx)
351{ 351{
352 struct cryp_register *reg = device_data->base; 352 struct cryp_register __iomem *reg = device_data->base;
353 struct cryp_config *config = 353 struct cryp_config *config =
354 (struct cryp_config *)device_data->current_ctx; 354 (struct cryp_config *)device_data->current_ctx;
355 355
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h
index 14cfd05b777a..d1d6606fe56c 100644
--- a/drivers/crypto/ux500/cryp/cryp.h
+++ b/drivers/crypto/ux500/cryp/cryp.h
@@ -114,6 +114,9 @@ enum cryp_status_id {
114}; 114};
115 115
116/* Cryp DMA interface */ 116/* Cryp DMA interface */
117#define CRYP_DMA_TX_FIFO 0x08
118#define CRYP_DMA_RX_FIFO 0x10
119
117enum cryp_dma_req_type { 120enum cryp_dma_req_type {
118 CRYP_DMA_DISABLE_BOTH, 121 CRYP_DMA_DISABLE_BOTH,
119 CRYP_DMA_ENABLE_IN_DATA, 122 CRYP_DMA_ENABLE_IN_DATA,
@@ -217,7 +220,8 @@ struct cryp_dma {
217 220
218/** 221/**
219 * struct cryp_device_data - structure for a cryp device. 222 * struct cryp_device_data - structure for a cryp device.
220 * @base: Pointer to the hardware base address. 223 * @base: Pointer to virtual base address of the cryp device.
224 * @phybase: Pointer to physical memory location of the cryp device.
221 * @dev: Pointer to the devices dev structure. 225 * @dev: Pointer to the devices dev structure.
222 * @clk: Pointer to the device's clock control. 226 * @clk: Pointer to the device's clock control.
223 * @pwr_regulator: Pointer to the device's power control. 227 * @pwr_regulator: Pointer to the device's power control.
@@ -232,6 +236,7 @@ struct cryp_dma {
232 */ 236 */
233struct cryp_device_data { 237struct cryp_device_data {
234 struct cryp_register __iomem *base; 238 struct cryp_register __iomem *base;
239 phys_addr_t phybase;
235 struct device *dev; 240 struct device *dev;
236 struct clk *clk; 241 struct clk *clk;
237 struct regulator *pwr_regulator; 242 struct regulator *pwr_regulator;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 8c2777cf02f6..83d79b964d12 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -475,6 +475,19 @@ static int cryp_get_device_data(struct cryp_ctx *ctx,
475static void cryp_dma_setup_channel(struct cryp_device_data *device_data, 475static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
476 struct device *dev) 476 struct device *dev)
477{ 477{
478 struct dma_slave_config mem2cryp = {
479 .direction = DMA_MEM_TO_DEV,
480 .dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO,
481 .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
482 .dst_maxburst = 4,
483 };
484 struct dma_slave_config cryp2mem = {
485 .direction = DMA_DEV_TO_MEM,
486 .src_addr = device_data->phybase + CRYP_DMA_RX_FIFO,
487 .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
488 .src_maxburst = 4,
489 };
490
478 dma_cap_zero(device_data->dma.mask); 491 dma_cap_zero(device_data->dma.mask);
479 dma_cap_set(DMA_SLAVE, device_data->dma.mask); 492 dma_cap_set(DMA_SLAVE, device_data->dma.mask);
480 493
@@ -490,6 +503,9 @@ static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
490 stedma40_filter, 503 stedma40_filter,
491 device_data->dma.cfg_cryp2mem); 504 device_data->dma.cfg_cryp2mem);
492 505
506 dmaengine_slave_config(device_data->dma.chan_mem2cryp, &mem2cryp);
507 dmaengine_slave_config(device_data->dma.chan_cryp2mem, &cryp2mem);
508
493 init_completion(&device_data->dma.cryp_dma_complete); 509 init_completion(&device_data->dma.cryp_dma_complete);
494} 510}
495 511
@@ -537,10 +553,10 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
537 dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " 553 dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
538 "(TO_DEVICE)", __func__); 554 "(TO_DEVICE)", __func__);
539 555
540 desc = channel->device->device_prep_slave_sg(channel, 556 desc = dmaengine_prep_slave_sg(channel,
541 ctx->device->dma.sg_src, 557 ctx->device->dma.sg_src,
542 ctx->device->dma.sg_src_len, 558 ctx->device->dma.sg_src_len,
543 direction, DMA_CTRL_ACK, NULL); 559 direction, DMA_CTRL_ACK);
544 break; 560 break;
545 561
546 case DMA_FROM_DEVICE: 562 case DMA_FROM_DEVICE:
@@ -561,12 +577,12 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
561 dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " 577 dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
562 "(FROM_DEVICE)", __func__); 578 "(FROM_DEVICE)", __func__);
563 579
564 desc = channel->device->device_prep_slave_sg(channel, 580 desc = dmaengine_prep_slave_sg(channel,
565 ctx->device->dma.sg_dst, 581 ctx->device->dma.sg_dst,
566 ctx->device->dma.sg_dst_len, 582 ctx->device->dma.sg_dst_len,
567 direction, 583 direction,
568 DMA_CTRL_ACK | 584 DMA_CTRL_ACK |
569 DMA_PREP_INTERRUPT, NULL); 585 DMA_PREP_INTERRUPT);
570 586
571 desc->callback = cryp_dma_out_callback; 587 desc->callback = cryp_dma_out_callback;
572 desc->callback_param = ctx; 588 desc->callback_param = ctx;
@@ -578,7 +594,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
578 return -EFAULT; 594 return -EFAULT;
579 } 595 }
580 596
581 cookie = desc->tx_submit(desc); 597 cookie = dmaengine_submit(desc);
582 dma_async_issue_pending(channel); 598 dma_async_issue_pending(channel);
583 599
584 return 0; 600 return 0;
@@ -591,12 +607,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx)
591 dev_dbg(ctx->device->dev, "[%s]: ", __func__); 607 dev_dbg(ctx->device->dev, "[%s]: ", __func__);
592 608
593 chan = ctx->device->dma.chan_mem2cryp; 609 chan = ctx->device->dma.chan_mem2cryp;
594 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 610 dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
595 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, 611 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
596 ctx->device->dma.sg_src_len, DMA_TO_DEVICE); 612 ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
597 613
598 chan = ctx->device->dma.chan_cryp2mem; 614 chan = ctx->device->dma.chan_cryp2mem;
599 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 615 dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
600 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, 616 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
601 ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); 617 ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
602} 618}
@@ -1431,6 +1447,7 @@ static int ux500_cryp_probe(struct platform_device *pdev)
1431 goto out_kfree; 1447 goto out_kfree;
1432 } 1448 }
1433 1449
1450 device_data->phybase = res->start;
1434 device_data->base = ioremap(res->start, resource_size(res)); 1451 device_data->base = ioremap(res->start, resource_size(res));
1435 if (!device_data->base) { 1452 if (!device_data->base) {
1436 dev_err(dev, "[%s]: ioremap failed!", __func__); 1453 dev_err(dev, "[%s]: ioremap failed!", __func__);
@@ -1458,11 +1475,17 @@ static int ux500_cryp_probe(struct platform_device *pdev)
1458 goto out_regulator; 1475 goto out_regulator;
1459 } 1476 }
1460 1477
1478 ret = clk_prepare(device_data->clk);
1479 if (ret) {
1480 dev_err(dev, "[%s]: clk_prepare() failed!", __func__);
1481 goto out_clk;
1482 }
1483
1461 /* Enable device power (and clock) */ 1484 /* Enable device power (and clock) */
1462 ret = cryp_enable_power(device_data->dev, device_data, false); 1485 ret = cryp_enable_power(device_data->dev, device_data, false);
1463 if (ret) { 1486 if (ret) {
1464 dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__); 1487 dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
1465 goto out_clk; 1488 goto out_clk_unprepare;
1466 } 1489 }
1467 1490
1468 cryp_error = cryp_check(device_data); 1491 cryp_error = cryp_check(device_data);
@@ -1518,11 +1541,16 @@ static int ux500_cryp_probe(struct platform_device *pdev)
1518 goto out_power; 1541 goto out_power;
1519 } 1542 }
1520 1543
1544 dev_info(dev, "successfully registered\n");
1545
1521 return 0; 1546 return 0;
1522 1547
1523out_power: 1548out_power:
1524 cryp_disable_power(device_data->dev, device_data, false); 1549 cryp_disable_power(device_data->dev, device_data, false);
1525 1550
1551out_clk_unprepare:
1552 clk_unprepare(device_data->clk);
1553
1526out_clk: 1554out_clk:
1527 clk_put(device_data->clk); 1555 clk_put(device_data->clk);
1528 1556
@@ -1593,6 +1621,7 @@ static int ux500_cryp_remove(struct platform_device *pdev)
1593 dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed", 1621 dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
1594 __func__); 1622 __func__);
1595 1623
1624 clk_unprepare(device_data->clk);
1596 clk_put(device_data->clk); 1625 clk_put(device_data->clk);
1597 regulator_put(device_data->pwr_regulator); 1626 regulator_put(device_data->pwr_regulator);
1598 1627
diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h
index cd9351cb24df..be6eb54da40f 100644
--- a/drivers/crypto/ux500/hash/hash_alg.h
+++ b/drivers/crypto/ux500/hash/hash_alg.h
@@ -11,6 +11,7 @@
11#include <linux/bitops.h> 11#include <linux/bitops.h>
12 12
13#define HASH_BLOCK_SIZE 64 13#define HASH_BLOCK_SIZE 64
14#define HASH_DMA_FIFO 4
14#define HASH_DMA_ALIGN_SIZE 4 15#define HASH_DMA_ALIGN_SIZE 4
15#define HASH_DMA_PERFORMANCE_MIN_SIZE 1024 16#define HASH_DMA_PERFORMANCE_MIN_SIZE 1024
16#define HASH_BYTES_PER_WORD 4 17#define HASH_BYTES_PER_WORD 4
@@ -347,7 +348,8 @@ struct hash_req_ctx {
347 348
348/** 349/**
349 * struct hash_device_data - structure for a hash device. 350 * struct hash_device_data - structure for a hash device.
350 * @base: Pointer to the hardware base address. 351 * @base: Pointer to virtual base address of the hash device.
352 * @phybase: Pointer to physical memory location of the hash device.
351 * @list_node: For inclusion in klist. 353 * @list_node: For inclusion in klist.
352 * @dev: Pointer to the device dev structure. 354 * @dev: Pointer to the device dev structure.
353 * @ctx_lock: Spinlock for current_ctx. 355 * @ctx_lock: Spinlock for current_ctx.
@@ -361,6 +363,7 @@ struct hash_req_ctx {
361 */ 363 */
362struct hash_device_data { 364struct hash_device_data {
363 struct hash_register __iomem *base; 365 struct hash_register __iomem *base;
366 phys_addr_t phybase;
364 struct klist_node list_node; 367 struct klist_node list_node;
365 struct device *dev; 368 struct device *dev;
366 struct spinlock ctx_lock; 369 struct spinlock ctx_lock;
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 3b8f661d0edf..496ae6aae316 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -122,6 +122,13 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data,
122 struct device *dev) 122 struct device *dev)
123{ 123{
124 struct hash_platform_data *platform_data = dev->platform_data; 124 struct hash_platform_data *platform_data = dev->platform_data;
125 struct dma_slave_config conf = {
126 .direction = DMA_MEM_TO_DEV,
127 .dst_addr = device_data->phybase + HASH_DMA_FIFO,
128 .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
129 .dst_maxburst = 16,
130 };
131
125 dma_cap_zero(device_data->dma.mask); 132 dma_cap_zero(device_data->dma.mask);
126 dma_cap_set(DMA_SLAVE, device_data->dma.mask); 133 dma_cap_set(DMA_SLAVE, device_data->dma.mask);
127 134
@@ -131,6 +138,8 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data,
131 platform_data->dma_filter, 138 platform_data->dma_filter,
132 device_data->dma.cfg_mem2hash); 139 device_data->dma.cfg_mem2hash);
133 140
141 dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
142
134 init_completion(&device_data->dma.complete); 143 init_completion(&device_data->dma.complete);
135} 144}
136 145
@@ -171,9 +180,9 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
171 180
172 dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " 181 dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
173 "(TO_DEVICE)", __func__); 182 "(TO_DEVICE)", __func__);
174 desc = channel->device->device_prep_slave_sg(channel, 183 desc = dmaengine_prep_slave_sg(channel,
175 ctx->device->dma.sg, ctx->device->dma.sg_len, 184 ctx->device->dma.sg, ctx->device->dma.sg_len,
176 direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT, NULL); 185 direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
177 if (!desc) { 186 if (!desc) {
178 dev_err(ctx->device->dev, 187 dev_err(ctx->device->dev,
179 "[%s]: device_prep_slave_sg() failed!", __func__); 188 "[%s]: device_prep_slave_sg() failed!", __func__);
@@ -183,7 +192,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
183 desc->callback = hash_dma_callback; 192 desc->callback = hash_dma_callback;
184 desc->callback_param = ctx; 193 desc->callback_param = ctx;
185 194
186 cookie = desc->tx_submit(desc); 195 cookie = dmaengine_submit(desc);
187 dma_async_issue_pending(channel); 196 dma_async_issue_pending(channel);
188 197
189 return 0; 198 return 0;
@@ -194,7 +203,7 @@ static void hash_dma_done(struct hash_ctx *ctx)
194 struct dma_chan *chan; 203 struct dma_chan *chan;
195 204
196 chan = ctx->device->dma.chan_mem2hash; 205 chan = ctx->device->dma.chan_mem2hash;
197 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 206 dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
198 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, 207 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
199 ctx->device->dma.sg_len, DMA_TO_DEVICE); 208 ctx->device->dma.sg_len, DMA_TO_DEVICE);
200 209
@@ -464,12 +473,12 @@ static void hash_hw_write_key(struct hash_device_data *device_data,
464 HASH_SET_DIN(&word, nwords); 473 HASH_SET_DIN(&word, nwords);
465 } 474 }
466 475
467 while (device_data->base->str & HASH_STR_DCAL_MASK) 476 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
468 cpu_relax(); 477 cpu_relax();
469 478
470 HASH_SET_DCAL; 479 HASH_SET_DCAL;
471 480
472 while (device_data->base->str & HASH_STR_DCAL_MASK) 481 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
473 cpu_relax(); 482 cpu_relax();
474} 483}
475 484
@@ -652,7 +661,7 @@ static void hash_messagepad(struct hash_device_data *device_data,
652 if (index_bytes) 661 if (index_bytes)
653 HASH_SET_DIN(message, nwords); 662 HASH_SET_DIN(message, nwords);
654 663
655 while (device_data->base->str & HASH_STR_DCAL_MASK) 664 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
656 cpu_relax(); 665 cpu_relax();
657 666
658 /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ 667 /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
@@ -667,7 +676,7 @@ static void hash_messagepad(struct hash_device_data *device_data,
667 (int)(readl_relaxed(&device_data->base->str) & 676 (int)(readl_relaxed(&device_data->base->str) &
668 HASH_STR_NBLW_MASK)); 677 HASH_STR_NBLW_MASK));
669 678
670 while (device_data->base->str & HASH_STR_DCAL_MASK) 679 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
671 cpu_relax(); 680 cpu_relax();
672} 681}
673 682
@@ -767,7 +776,7 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
767 /* HW and SW initializations */ 776 /* HW and SW initializations */
768 /* Note: there is no need to initialize buffer and digest members */ 777 /* Note: there is no need to initialize buffer and digest members */
769 778
770 while (device_data->base->str & HASH_STR_DCAL_MASK) 779 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
771 cpu_relax(); 780 cpu_relax();
772 781
773 /* 782 /*
@@ -783,8 +792,7 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
783 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); 792 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
784} 793}
785 794
786int hash_process_data( 795static int hash_process_data(struct hash_device_data *device_data,
787 struct hash_device_data *device_data,
788 struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, 796 struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
789 int msg_length, u8 *data_buffer, u8 *buffer, u8 *index) 797 int msg_length, u8 *data_buffer, u8 *buffer, u8 *index)
790{ 798{
@@ -953,7 +961,7 @@ static int hash_dma_final(struct ahash_request *req)
953 wait_for_completion(&ctx->device->dma.complete); 961 wait_for_completion(&ctx->device->dma.complete);
954 hash_dma_done(ctx); 962 hash_dma_done(ctx);
955 963
956 while (device_data->base->str & HASH_STR_DCAL_MASK) 964 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
957 cpu_relax(); 965 cpu_relax();
958 966
959 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { 967 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
@@ -983,7 +991,7 @@ out:
983 * hash_hw_final - The final hash calculation function 991 * hash_hw_final - The final hash calculation function
984 * @req: The hash request for the job. 992 * @req: The hash request for the job.
985 */ 993 */
986int hash_hw_final(struct ahash_request *req) 994static int hash_hw_final(struct ahash_request *req)
987{ 995{
988 int ret = 0; 996 int ret = 0;
989 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 997 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -1051,7 +1059,7 @@ int hash_hw_final(struct ahash_request *req)
1051 req_ctx->state.index); 1059 req_ctx->state.index);
1052 } else { 1060 } else {
1053 HASH_SET_DCAL; 1061 HASH_SET_DCAL;
1054 while (device_data->base->str & HASH_STR_DCAL_MASK) 1062 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1055 cpu_relax(); 1063 cpu_relax();
1056 } 1064 }
1057 1065
@@ -1180,7 +1188,7 @@ int hash_resume_state(struct hash_device_data *device_data,
1180 temp_cr = device_state->temp_cr; 1188 temp_cr = device_state->temp_cr;
1181 writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr); 1189 writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
1182 1190
1183 if (device_data->base->cr & HASH_CR_MODE_MASK) 1191 if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1184 hash_mode = HASH_OPER_MODE_HMAC; 1192 hash_mode = HASH_OPER_MODE_HMAC;
1185 else 1193 else
1186 hash_mode = HASH_OPER_MODE_HASH; 1194 hash_mode = HASH_OPER_MODE_HASH;
@@ -1224,7 +1232,7 @@ int hash_save_state(struct hash_device_data *device_data,
1224 * actually makes sure that there isn't any ongoing calculation in the 1232 * actually makes sure that there isn't any ongoing calculation in the
1225 * hardware. 1233 * hardware.
1226 */ 1234 */
1227 while (device_data->base->str & HASH_STR_DCAL_MASK) 1235 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1228 cpu_relax(); 1236 cpu_relax();
1229 1237
1230 temp_cr = readl_relaxed(&device_data->base->cr); 1238 temp_cr = readl_relaxed(&device_data->base->cr);
@@ -1233,7 +1241,7 @@ int hash_save_state(struct hash_device_data *device_data,
1233 1241
1234 device_state->din_reg = readl_relaxed(&device_data->base->din); 1242 device_state->din_reg = readl_relaxed(&device_data->base->din);
1235 1243
1236 if (device_data->base->cr & HASH_CR_MODE_MASK) 1244 if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1237 hash_mode = HASH_OPER_MODE_HMAC; 1245 hash_mode = HASH_OPER_MODE_HMAC;
1238 else 1246 else
1239 hash_mode = HASH_OPER_MODE_HASH; 1247 hash_mode = HASH_OPER_MODE_HASH;
@@ -1699,6 +1707,7 @@ static int ux500_hash_probe(struct platform_device *pdev)
1699 goto out_kfree; 1707 goto out_kfree;
1700 } 1708 }
1701 1709
1710 device_data->phybase = res->start;
1702 device_data->base = ioremap(res->start, resource_size(res)); 1711 device_data->base = ioremap(res->start, resource_size(res));
1703 if (!device_data->base) { 1712 if (!device_data->base) {
1704 dev_err(dev, "[%s] ioremap() failed!", 1713 dev_err(dev, "[%s] ioremap() failed!",
@@ -1726,11 +1735,17 @@ static int ux500_hash_probe(struct platform_device *pdev)
1726 goto out_regulator; 1735 goto out_regulator;
1727 } 1736 }
1728 1737
1738 ret = clk_prepare(device_data->clk);
1739 if (ret) {
1740 dev_err(dev, "[%s] clk_prepare() failed!", __func__);
1741 goto out_clk;
1742 }
1743
1729 /* Enable device power (and clock) */ 1744 /* Enable device power (and clock) */
1730 ret = hash_enable_power(device_data, false); 1745 ret = hash_enable_power(device_data, false);
1731 if (ret) { 1746 if (ret) {
1732 dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); 1747 dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
1733 goto out_clk; 1748 goto out_clk_unprepare;
1734 } 1749 }
1735 1750
1736 ret = hash_check_hw(device_data); 1751 ret = hash_check_hw(device_data);
@@ -1756,12 +1771,15 @@ static int ux500_hash_probe(struct platform_device *pdev)
1756 goto out_power; 1771 goto out_power;
1757 } 1772 }
1758 1773
1759 dev_info(dev, "[%s] successfully probed\n", __func__); 1774 dev_info(dev, "successfully registered\n");
1760 return 0; 1775 return 0;
1761 1776
1762out_power: 1777out_power:
1763 hash_disable_power(device_data, false); 1778 hash_disable_power(device_data, false);
1764 1779
1780out_clk_unprepare:
1781 clk_unprepare(device_data->clk);
1782
1765out_clk: 1783out_clk:
1766 clk_put(device_data->clk); 1784 clk_put(device_data->clk);
1767 1785
@@ -1826,6 +1844,7 @@ static int ux500_hash_remove(struct platform_device *pdev)
1826 dev_err(dev, "[%s]: hash_disable_power() failed", 1844 dev_err(dev, "[%s]: hash_disable_power() failed",
1827 __func__); 1845 __func__);
1828 1846
1847 clk_unprepare(device_data->clk);
1829 clk_put(device_data->clk); 1848 clk_put(device_data->clk);
1830 regulator_put(device_data->regulator); 1849 regulator_put(device_data->regulator);
1831 1850
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 71bf4ec300ea..5ab5880d5c90 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -17,6 +17,8 @@
17#include <linux/pm.h> 17#include <linux/pm.h>
18#include <linux/pm_runtime.h> 18#include <linux/pm_runtime.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/of.h>
21#include <linux/of_dma.h>
20#include <linux/amba/bus.h> 22#include <linux/amba/bus.h>
21#include <linux/regulator/consumer.h> 23#include <linux/regulator/consumer.h>
22#include <linux/platform_data/dma-ste-dma40.h> 24#include <linux/platform_data/dma-ste-dma40.h>
@@ -45,15 +47,63 @@
45#define D40_LCLA_LINK_PER_EVENT_GRP 128 47#define D40_LCLA_LINK_PER_EVENT_GRP 128
46#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP 48#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
47 49
50/* Max number of logical channels per physical channel */
51#define D40_MAX_LOG_CHAN_PER_PHY 32
52
48/* Attempts before giving up to trying to get pages that are aligned */ 53/* Attempts before giving up to trying to get pages that are aligned */
49#define MAX_LCLA_ALLOC_ATTEMPTS 256 54#define MAX_LCLA_ALLOC_ATTEMPTS 256
50 55
51/* Bit markings for allocation map */ 56/* Bit markings for allocation map */
52#define D40_ALLOC_FREE (1 << 31) 57#define D40_ALLOC_FREE BIT(31)
53#define D40_ALLOC_PHY (1 << 30) 58#define D40_ALLOC_PHY BIT(30)
54#define D40_ALLOC_LOG_FREE 0 59#define D40_ALLOC_LOG_FREE 0
55 60
56#define MAX(a, b) (((a) < (b)) ? (b) : (a)) 61#define D40_MEMCPY_MAX_CHANS 8
62
63/* Reserved event lines for memcpy only. */
64#define DB8500_DMA_MEMCPY_EV_0 51
65#define DB8500_DMA_MEMCPY_EV_1 56
66#define DB8500_DMA_MEMCPY_EV_2 57
67#define DB8500_DMA_MEMCPY_EV_3 58
68#define DB8500_DMA_MEMCPY_EV_4 59
69#define DB8500_DMA_MEMCPY_EV_5 60
70
71static int dma40_memcpy_channels[] = {
72 DB8500_DMA_MEMCPY_EV_0,
73 DB8500_DMA_MEMCPY_EV_1,
74 DB8500_DMA_MEMCPY_EV_2,
75 DB8500_DMA_MEMCPY_EV_3,
76 DB8500_DMA_MEMCPY_EV_4,
77 DB8500_DMA_MEMCPY_EV_5,
78};
79
80/* Default configuration for physcial memcpy */
81static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
82 .mode = STEDMA40_MODE_PHYSICAL,
83 .dir = DMA_MEM_TO_MEM,
84
85 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
86 .src_info.psize = STEDMA40_PSIZE_PHY_1,
87 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
88
89 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
90 .dst_info.psize = STEDMA40_PSIZE_PHY_1,
91 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
92};
93
94/* Default configuration for logical memcpy */
95static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
96 .mode = STEDMA40_MODE_LOGICAL,
97 .dir = DMA_MEM_TO_MEM,
98
99 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
100 .src_info.psize = STEDMA40_PSIZE_LOG_1,
101 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
102
103 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
104 .dst_info.psize = STEDMA40_PSIZE_LOG_1,
105 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
106};
57 107
58/** 108/**
59 * enum 40_command - The different commands and/or statuses. 109 * enum 40_command - The different commands and/or statuses.
@@ -171,6 +221,9 @@ static u32 d40_backup_regs_chan[] = {
171 D40_CHAN_REG_SDLNK, 221 D40_CHAN_REG_SDLNK,
172}; 222};
173 223
224#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
225 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
226
174/** 227/**
175 * struct d40_interrupt_lookup - lookup table for interrupt handler 228 * struct d40_interrupt_lookup - lookup table for interrupt handler
176 * 229 *
@@ -471,6 +524,8 @@ struct d40_gen_dmac {
471 * @phy_start: Physical memory start of the DMA registers. 524 * @phy_start: Physical memory start of the DMA registers.
472 * @phy_size: Size of the DMA register map. 525 * @phy_size: Size of the DMA register map.
473 * @irq: The IRQ number. 526 * @irq: The IRQ number.
527 * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
528 * transfers).
474 * @num_phy_chans: The number of physical channels. Read from HW. This 529 * @num_phy_chans: The number of physical channels. Read from HW. This
475 * is the number of available channels for this driver, not counting "Secure 530 * is the number of available channels for this driver, not counting "Secure
476 * mode" allocated physical channels. 531 * mode" allocated physical channels.
@@ -514,6 +569,7 @@ struct d40_base {
514 phys_addr_t phy_start; 569 phys_addr_t phy_start;
515 resource_size_t phy_size; 570 resource_size_t phy_size;
516 int irq; 571 int irq;
572 int num_memcpy_chans;
517 int num_phy_chans; 573 int num_phy_chans;
518 int num_log_chans; 574 int num_log_chans;
519 struct device_dma_parameters dma_parms; 575 struct device_dma_parameters dma_parms;
@@ -534,7 +590,7 @@ struct d40_base {
534 resource_size_t lcpa_size; 590 resource_size_t lcpa_size;
535 struct kmem_cache *desc_slab; 591 struct kmem_cache *desc_slab;
536 u32 reg_val_backup[BACKUP_REGS_SZ]; 592 u32 reg_val_backup[BACKUP_REGS_SZ];
537 u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)]; 593 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
538 u32 *reg_val_backup_chan; 594 u32 *reg_val_backup_chan;
539 u16 gcc_pwr_off_mask; 595 u16 gcc_pwr_off_mask;
540 bool initialized; 596 bool initialized;
@@ -792,7 +848,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
792 * that uses linked lists. 848 * that uses linked lists.
793 */ 849 */
794 if (!(chan->phy_chan->use_soft_lli && 850 if (!(chan->phy_chan->use_soft_lli &&
795 chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)) 851 chan->dma_cfg.dir == DMA_DEV_TO_MEM))
796 curr_lcla = d40_lcla_alloc_one(chan, desc); 852 curr_lcla = d40_lcla_alloc_one(chan, desc);
797 853
798 first_lcla = curr_lcla; 854 first_lcla = curr_lcla;
@@ -954,20 +1010,21 @@ static int d40_psize_2_burst_size(bool is_log, int psize)
954 1010
955/* 1011/*
956 * The dma only supports transmitting packages up to 1012 * The dma only supports transmitting packages up to
957 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of 1013 * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes.
958 * dma elements required to send the entire sg list 1014 *
1015 * Calculate the total number of dma elements required to send the entire sg list.
959 */ 1016 */
960static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) 1017static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
961{ 1018{
962 int dmalen; 1019 int dmalen;
963 u32 max_w = max(data_width1, data_width2); 1020 u32 max_w = max(data_width1, data_width2);
964 u32 min_w = min(data_width1, data_width2); 1021 u32 min_w = min(data_width1, data_width2);
965 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); 1022 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
966 1023
967 if (seg_max > STEDMA40_MAX_SEG_SIZE) 1024 if (seg_max > STEDMA40_MAX_SEG_SIZE)
968 seg_max -= (1 << max_w); 1025 seg_max -= max_w;
969 1026
970 if (!IS_ALIGNED(size, 1 << max_w)) 1027 if (!IS_ALIGNED(size, max_w))
971 return -EINVAL; 1028 return -EINVAL;
972 1029
973 if (size <= seg_max) 1030 if (size <= seg_max)
@@ -1257,21 +1314,17 @@ static void __d40_config_set_event(struct d40_chan *d40c,
1257static void d40_config_set_event(struct d40_chan *d40c, 1314static void d40_config_set_event(struct d40_chan *d40c,
1258 enum d40_events event_type) 1315 enum d40_events event_type)
1259{ 1316{
1260 /* Enable event line connected to device (or memcpy) */ 1317 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1261 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
1262 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
1263 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1264 1318
1319 /* Enable event line connected to device (or memcpy) */
1320 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1321 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1265 __d40_config_set_event(d40c, event_type, event, 1322 __d40_config_set_event(d40c, event_type, event,
1266 D40_CHAN_REG_SSLNK); 1323 D40_CHAN_REG_SSLNK);
1267 }
1268
1269 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
1270 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1271 1324
1325 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
1272 __d40_config_set_event(d40c, event_type, event, 1326 __d40_config_set_event(d40c, event_type, event,
1273 D40_CHAN_REG_SDLNK); 1327 D40_CHAN_REG_SDLNK);
1274 }
1275} 1328}
1276 1329
1277static u32 d40_chan_has_events(struct d40_chan *d40c) 1330static u32 d40_chan_has_events(struct d40_chan *d40c)
@@ -1417,7 +1470,7 @@ static u32 d40_residue(struct d40_chan *d40c)
1417 >> D40_SREG_ELEM_PHY_ECNT_POS; 1470 >> D40_SREG_ELEM_PHY_ECNT_POS;
1418 } 1471 }
1419 1472
1420 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); 1473 return num_elt * d40c->dma_cfg.dst_info.data_width;
1421} 1474}
1422 1475
1423static bool d40_tx_is_linked(struct d40_chan *d40c) 1476static bool d40_tx_is_linked(struct d40_chan *d40c)
@@ -1693,7 +1746,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
1693 } 1746 }
1694 1747
1695 /* ACK interrupt */ 1748 /* ACK interrupt */
1696 writel(1 << idx, base->virtbase + il[row].clr); 1749 writel(BIT(idx), base->virtbase + il[row].clr);
1697 1750
1698 spin_lock(&d40c->lock); 1751 spin_lock(&d40c->lock);
1699 1752
@@ -1715,8 +1768,6 @@ static int d40_validate_conf(struct d40_chan *d40c,
1715 struct stedma40_chan_cfg *conf) 1768 struct stedma40_chan_cfg *conf)
1716{ 1769{
1717 int res = 0; 1770 int res = 0;
1718 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1719 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1720 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; 1771 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1721 1772
1722 if (!conf->dir) { 1773 if (!conf->dir) {
@@ -1724,48 +1775,14 @@ static int d40_validate_conf(struct d40_chan *d40c,
1724 res = -EINVAL; 1775 res = -EINVAL;
1725 } 1776 }
1726 1777
1727 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY && 1778 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1728 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && 1779 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1729 d40c->runtime_addr == 0) { 1780 (conf->dev_type < 0)) {
1730 1781 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1731 chan_err(d40c, "Invalid TX channel address (%d)\n",
1732 conf->dst_dev_type);
1733 res = -EINVAL;
1734 }
1735
1736 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1737 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1738 d40c->runtime_addr == 0) {
1739 chan_err(d40c, "Invalid RX channel address (%d)\n",
1740 conf->src_dev_type);
1741 res = -EINVAL;
1742 }
1743
1744 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1745 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1746 chan_err(d40c, "Invalid dst\n");
1747 res = -EINVAL; 1782 res = -EINVAL;
1748 } 1783 }
1749 1784
1750 if (conf->dir == STEDMA40_PERIPH_TO_MEM && 1785 if (conf->dir == DMA_DEV_TO_DEV) {
1751 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1752 chan_err(d40c, "Invalid src\n");
1753 res = -EINVAL;
1754 }
1755
1756 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1757 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1758 chan_err(d40c, "No event line\n");
1759 res = -EINVAL;
1760 }
1761
1762 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1763 (src_event_group != dst_event_group)) {
1764 chan_err(d40c, "Invalid event group\n");
1765 res = -EINVAL;
1766 }
1767
1768 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1769 /* 1786 /*
1770 * DMAC HW supports it. Will be added to this driver, 1787 * DMAC HW supports it. Will be added to this driver,
1771 * in case any dma client requires it. 1788 * in case any dma client requires it.
@@ -1775,9 +1792,9 @@ static int d40_validate_conf(struct d40_chan *d40c,
1775 } 1792 }
1776 1793
1777 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * 1794 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1778 (1 << conf->src_info.data_width) != 1795 conf->src_info.data_width !=
1779 d40_psize_2_burst_size(is_log, conf->dst_info.psize) * 1796 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1780 (1 << conf->dst_info.data_width)) { 1797 conf->dst_info.data_width) {
1781 /* 1798 /*
1782 * The DMAC hardware only supports 1799 * The DMAC hardware only supports
1783 * src (burst x width) == dst (burst x width) 1800 * src (burst x width) == dst (burst x width)
@@ -1819,8 +1836,8 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1819 if (phy->allocated_src == D40_ALLOC_FREE) 1836 if (phy->allocated_src == D40_ALLOC_FREE)
1820 phy->allocated_src = D40_ALLOC_LOG_FREE; 1837 phy->allocated_src = D40_ALLOC_LOG_FREE;
1821 1838
1822 if (!(phy->allocated_src & (1 << log_event_line))) { 1839 if (!(phy->allocated_src & BIT(log_event_line))) {
1823 phy->allocated_src |= 1 << log_event_line; 1840 phy->allocated_src |= BIT(log_event_line);
1824 goto found; 1841 goto found;
1825 } else 1842 } else
1826 goto not_found; 1843 goto not_found;
@@ -1831,8 +1848,8 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1831 if (phy->allocated_dst == D40_ALLOC_FREE) 1848 if (phy->allocated_dst == D40_ALLOC_FREE)
1832 phy->allocated_dst = D40_ALLOC_LOG_FREE; 1849 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1833 1850
1834 if (!(phy->allocated_dst & (1 << log_event_line))) { 1851 if (!(phy->allocated_dst & BIT(log_event_line))) {
1835 phy->allocated_dst |= 1 << log_event_line; 1852 phy->allocated_dst |= BIT(log_event_line);
1836 goto found; 1853 goto found;
1837 } else 1854 } else
1838 goto not_found; 1855 goto not_found;
@@ -1862,11 +1879,11 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1862 1879
1863 /* Logical channel */ 1880 /* Logical channel */
1864 if (is_src) { 1881 if (is_src) {
1865 phy->allocated_src &= ~(1 << log_event_line); 1882 phy->allocated_src &= ~BIT(log_event_line);
1866 if (phy->allocated_src == D40_ALLOC_LOG_FREE) 1883 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1867 phy->allocated_src = D40_ALLOC_FREE; 1884 phy->allocated_src = D40_ALLOC_FREE;
1868 } else { 1885 } else {
1869 phy->allocated_dst &= ~(1 << log_event_line); 1886 phy->allocated_dst &= ~BIT(log_event_line);
1870 if (phy->allocated_dst == D40_ALLOC_LOG_FREE) 1887 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1871 phy->allocated_dst = D40_ALLOC_FREE; 1888 phy->allocated_dst = D40_ALLOC_FREE;
1872 } 1889 }
@@ -1882,7 +1899,7 @@ out:
1882 1899
1883static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) 1900static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1884{ 1901{
1885 int dev_type; 1902 int dev_type = d40c->dma_cfg.dev_type;
1886 int event_group; 1903 int event_group;
1887 int event_line; 1904 int event_line;
1888 struct d40_phy_res *phys; 1905 struct d40_phy_res *phys;
@@ -1896,14 +1913,12 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1896 phys = d40c->base->phy_res; 1913 phys = d40c->base->phy_res;
1897 num_phy_chans = d40c->base->num_phy_chans; 1914 num_phy_chans = d40c->base->num_phy_chans;
1898 1915
1899 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 1916 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1900 dev_type = d40c->dma_cfg.src_dev_type;
1901 log_num = 2 * dev_type; 1917 log_num = 2 * dev_type;
1902 is_src = true; 1918 is_src = true;
1903 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1919 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1904 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1920 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1905 /* dst event lines are used for logical memcpy */ 1921 /* dst event lines are used for logical memcpy */
1906 dev_type = d40c->dma_cfg.dst_dev_type;
1907 log_num = 2 * dev_type + 1; 1922 log_num = 2 * dev_type + 1;
1908 is_src = false; 1923 is_src = false;
1909 } else 1924 } else
@@ -1913,7 +1928,7 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1913 event_line = D40_TYPE_TO_EVENT(dev_type); 1928 event_line = D40_TYPE_TO_EVENT(dev_type);
1914 1929
1915 if (!is_log) { 1930 if (!is_log) {
1916 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1931 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1917 /* Find physical half channel */ 1932 /* Find physical half channel */
1918 if (d40c->dma_cfg.use_fixed_channel) { 1933 if (d40c->dma_cfg.use_fixed_channel) {
1919 i = d40c->dma_cfg.phy_channel; 1934 i = d40c->dma_cfg.phy_channel;
@@ -2014,14 +2029,23 @@ static int d40_config_memcpy(struct d40_chan *d40c)
2014 dma_cap_mask_t cap = d40c->chan.device->cap_mask; 2029 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
2015 2030
2016 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { 2031 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
2017 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; 2032 d40c->dma_cfg = dma40_memcpy_conf_log;
2018 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; 2033 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
2019 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> 2034
2020 memcpy[d40c->chan.chan_id]; 2035 d40_log_cfg(&d40c->dma_cfg,
2036 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2021 2037
2022 } else if (dma_has_cap(DMA_MEMCPY, cap) && 2038 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
2023 dma_has_cap(DMA_SLAVE, cap)) { 2039 dma_has_cap(DMA_SLAVE, cap)) {
2024 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; 2040 d40c->dma_cfg = dma40_memcpy_conf_phy;
2041
2042 /* Generate interrrupt at end of transfer or relink. */
2043 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
2044
2045 /* Generate interrupt on error. */
2046 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
2047 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
2048
2025 } else { 2049 } else {
2026 chan_err(d40c, "No memcpy\n"); 2050 chan_err(d40c, "No memcpy\n");
2027 return -EINVAL; 2051 return -EINVAL;
@@ -2034,7 +2058,7 @@ static int d40_free_dma(struct d40_chan *d40c)
2034{ 2058{
2035 2059
2036 int res = 0; 2060 int res = 0;
2037 u32 event; 2061 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2038 struct d40_phy_res *phy = d40c->phy_chan; 2062 struct d40_phy_res *phy = d40c->phy_chan;
2039 bool is_src; 2063 bool is_src;
2040 2064
@@ -2052,14 +2076,12 @@ static int d40_free_dma(struct d40_chan *d40c)
2052 return -EINVAL; 2076 return -EINVAL;
2053 } 2077 }
2054 2078
2055 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 2079 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2056 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 2080 d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2057 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
2058 is_src = false; 2081 is_src = false;
2059 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 2082 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2060 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
2061 is_src = true; 2083 is_src = true;
2062 } else { 2084 else {
2063 chan_err(d40c, "Unknown direction\n"); 2085 chan_err(d40c, "Unknown direction\n");
2064 return -EINVAL; 2086 return -EINVAL;
2065 } 2087 }
@@ -2100,7 +2122,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
2100 unsigned long flags; 2122 unsigned long flags;
2101 void __iomem *active_reg; 2123 void __iomem *active_reg;
2102 u32 status; 2124 u32 status;
2103 u32 event; 2125 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2104 2126
2105 spin_lock_irqsave(&d40c->lock, flags); 2127 spin_lock_irqsave(&d40c->lock, flags);
2106 2128
@@ -2119,12 +2141,10 @@ static bool d40_is_paused(struct d40_chan *d40c)
2119 goto _exit; 2141 goto _exit;
2120 } 2142 }
2121 2143
2122 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 2144 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2123 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 2145 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2124 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
2125 status = readl(chanbase + D40_CHAN_REG_SDLNK); 2146 status = readl(chanbase + D40_CHAN_REG_SDLNK);
2126 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 2147 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2127 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
2128 status = readl(chanbase + D40_CHAN_REG_SSLNK); 2148 status = readl(chanbase + D40_CHAN_REG_SSLNK);
2129 } else { 2149 } else {
2130 chan_err(d40c, "Unknown direction\n"); 2150 chan_err(d40c, "Unknown direction\n");
@@ -2255,24 +2275,6 @@ err:
2255 return NULL; 2275 return NULL;
2256} 2276}
2257 2277
2258static dma_addr_t
2259d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
2260{
2261 struct stedma40_platform_data *plat = chan->base->plat_data;
2262 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2263 dma_addr_t addr = 0;
2264
2265 if (chan->runtime_addr)
2266 return chan->runtime_addr;
2267
2268 if (direction == DMA_DEV_TO_MEM)
2269 addr = plat->dev_rx[cfg->src_dev_type];
2270 else if (direction == DMA_MEM_TO_DEV)
2271 addr = plat->dev_tx[cfg->dst_dev_type];
2272
2273 return addr;
2274}
2275
2276static struct dma_async_tx_descriptor * 2278static struct dma_async_tx_descriptor *
2277d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, 2279d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2278 struct scatterlist *sg_dst, unsigned int sg_len, 2280 struct scatterlist *sg_dst, unsigned int sg_len,
@@ -2299,14 +2301,10 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2299 if (sg_next(&sg_src[sg_len - 1]) == sg_src) 2301 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2300 desc->cyclic = true; 2302 desc->cyclic = true;
2301 2303
2302 if (direction != DMA_TRANS_NONE) { 2304 if (direction == DMA_DEV_TO_MEM)
2303 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); 2305 src_dev_addr = chan->runtime_addr;
2304 2306 else if (direction == DMA_MEM_TO_DEV)
2305 if (direction == DMA_DEV_TO_MEM) 2307 dst_dev_addr = chan->runtime_addr;
2306 src_dev_addr = dev_addr;
2307 else if (direction == DMA_MEM_TO_DEV)
2308 dst_dev_addr = dev_addr;
2309 }
2310 2308
2311 if (chan_is_logical(chan)) 2309 if (chan_is_logical(chan))
2312 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, 2310 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
@@ -2366,7 +2364,7 @@ static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2366 u32 rtreg; 2364 u32 rtreg;
2367 u32 event = D40_TYPE_TO_EVENT(dev_type); 2365 u32 event = D40_TYPE_TO_EVENT(dev_type);
2368 u32 group = D40_TYPE_TO_GROUP(dev_type); 2366 u32 group = D40_TYPE_TO_GROUP(dev_type);
2369 u32 bit = 1 << event; 2367 u32 bit = BIT(event);
2370 u32 prioreg; 2368 u32 prioreg;
2371 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac; 2369 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2372 2370
@@ -2397,13 +2395,57 @@ static void d40_set_prio_realtime(struct d40_chan *d40c)
2397 if (d40c->base->rev < 3) 2395 if (d40c->base->rev < 3)
2398 return; 2396 return;
2399 2397
2400 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 2398 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
2401 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) 2399 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2402 __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); 2400 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2403 2401
2404 if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || 2402 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
2405 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) 2403 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2406 __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); 2404 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2405}
2406
2407#define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1)
2408#define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
2409#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2410#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2411
2412static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2413 struct of_dma *ofdma)
2414{
2415 struct stedma40_chan_cfg cfg;
2416 dma_cap_mask_t cap;
2417 u32 flags;
2418
2419 memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2420
2421 dma_cap_zero(cap);
2422 dma_cap_set(DMA_SLAVE, cap);
2423
2424 cfg.dev_type = dma_spec->args[0];
2425 flags = dma_spec->args[2];
2426
2427 switch (D40_DT_FLAGS_MODE(flags)) {
2428 case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2429 case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2430 }
2431
2432 switch (D40_DT_FLAGS_DIR(flags)) {
2433 case 0:
2434 cfg.dir = DMA_MEM_TO_DEV;
2435 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2436 break;
2437 case 1:
2438 cfg.dir = DMA_DEV_TO_MEM;
2439 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2440 break;
2441 }
2442
2443 if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2444 cfg.phy_channel = dma_spec->args[1];
2445 cfg.use_fixed_channel = true;
2446 }
2447
2448 return dma_request_channel(cap, stedma40_filter, &cfg);
2407} 2449}
2408 2450
2409/* DMA ENGINE functions */ 2451/* DMA ENGINE functions */
@@ -2435,23 +2477,21 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
2435 } 2477 }
2436 2478
2437 pm_runtime_get_sync(d40c->base->dev); 2479 pm_runtime_get_sync(d40c->base->dev);
2438 /* Fill in basic CFG register values */
2439 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
2440 &d40c->dst_def_cfg, chan_is_logical(d40c));
2441 2480
2442 d40_set_prio_realtime(d40c); 2481 d40_set_prio_realtime(d40c);
2443 2482
2444 if (chan_is_logical(d40c)) { 2483 if (chan_is_logical(d40c)) {
2445 d40_log_cfg(&d40c->dma_cfg, 2484 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2446 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2447
2448 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
2449 d40c->lcpa = d40c->base->lcpa_base + 2485 d40c->lcpa = d40c->base->lcpa_base +
2450 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; 2486 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2451 else 2487 else
2452 d40c->lcpa = d40c->base->lcpa_base + 2488 d40c->lcpa = d40c->base->lcpa_base +
2453 d40c->dma_cfg.dst_dev_type * 2489 d40c->dma_cfg.dev_type *
2454 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; 2490 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2491
2492 /* Unmask the Global Interrupt Mask. */
2493 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2494 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2455 } 2495 }
2456 2496
2457 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", 2497 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
@@ -2641,33 +2681,10 @@ static void d40_terminate_all(struct dma_chan *chan)
2641static int 2681static int
2642dma40_config_to_halfchannel(struct d40_chan *d40c, 2682dma40_config_to_halfchannel(struct d40_chan *d40c,
2643 struct stedma40_half_channel_info *info, 2683 struct stedma40_half_channel_info *info,
2644 enum dma_slave_buswidth width,
2645 u32 maxburst) 2684 u32 maxburst)
2646{ 2685{
2647 enum stedma40_periph_data_width addr_width;
2648 int psize; 2686 int psize;
2649 2687
2650 switch (width) {
2651 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2652 addr_width = STEDMA40_BYTE_WIDTH;
2653 break;
2654 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2655 addr_width = STEDMA40_HALFWORD_WIDTH;
2656 break;
2657 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2658 addr_width = STEDMA40_WORD_WIDTH;
2659 break;
2660 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2661 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2662 break;
2663 default:
2664 dev_err(d40c->base->dev,
2665 "illegal peripheral address width "
2666 "requested (%d)\n",
2667 width);
2668 return -EINVAL;
2669 }
2670
2671 if (chan_is_logical(d40c)) { 2688 if (chan_is_logical(d40c)) {
2672 if (maxburst >= 16) 2689 if (maxburst >= 16)
2673 psize = STEDMA40_PSIZE_LOG_16; 2690 psize = STEDMA40_PSIZE_LOG_16;
@@ -2688,7 +2705,6 @@ dma40_config_to_halfchannel(struct d40_chan *d40c,
2688 psize = STEDMA40_PSIZE_PHY_1; 2705 psize = STEDMA40_PSIZE_PHY_1;
2689 } 2706 }
2690 2707
2691 info->data_width = addr_width;
2692 info->psize = psize; 2708 info->psize = psize;
2693 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2709 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2694 2710
@@ -2712,21 +2728,14 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2712 dst_maxburst = config->dst_maxburst; 2728 dst_maxburst = config->dst_maxburst;
2713 2729
2714 if (config->direction == DMA_DEV_TO_MEM) { 2730 if (config->direction == DMA_DEV_TO_MEM) {
2715 dma_addr_t dev_addr_rx =
2716 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2717
2718 config_addr = config->src_addr; 2731 config_addr = config->src_addr;
2719 if (dev_addr_rx) 2732
2720 dev_dbg(d40c->base->dev, 2733 if (cfg->dir != DMA_DEV_TO_MEM)
2721 "channel has a pre-wired RX address %08x "
2722 "overriding with %08x\n",
2723 dev_addr_rx, config_addr);
2724 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2725 dev_dbg(d40c->base->dev, 2734 dev_dbg(d40c->base->dev,
2726 "channel was not configured for peripheral " 2735 "channel was not configured for peripheral "
2727 "to memory transfer (%d) overriding\n", 2736 "to memory transfer (%d) overriding\n",
2728 cfg->dir); 2737 cfg->dir);
2729 cfg->dir = STEDMA40_PERIPH_TO_MEM; 2738 cfg->dir = DMA_DEV_TO_MEM;
2730 2739
2731 /* Configure the memory side */ 2740 /* Configure the memory side */
2732 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 2741 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
@@ -2735,21 +2744,14 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2735 dst_maxburst = src_maxburst; 2744 dst_maxburst = src_maxburst;
2736 2745
2737 } else if (config->direction == DMA_MEM_TO_DEV) { 2746 } else if (config->direction == DMA_MEM_TO_DEV) {
2738 dma_addr_t dev_addr_tx =
2739 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2740
2741 config_addr = config->dst_addr; 2747 config_addr = config->dst_addr;
2742 if (dev_addr_tx) 2748
2743 dev_dbg(d40c->base->dev, 2749 if (cfg->dir != DMA_MEM_TO_DEV)
2744 "channel has a pre-wired TX address %08x "
2745 "overriding with %08x\n",
2746 dev_addr_tx, config_addr);
2747 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2748 dev_dbg(d40c->base->dev, 2750 dev_dbg(d40c->base->dev,
2749 "channel was not configured for memory " 2751 "channel was not configured for memory "
2750 "to peripheral transfer (%d) overriding\n", 2752 "to peripheral transfer (%d) overriding\n",
2751 cfg->dir); 2753 cfg->dir);
2752 cfg->dir = STEDMA40_MEM_TO_PERIPH; 2754 cfg->dir = DMA_MEM_TO_DEV;
2753 2755
2754 /* Configure the memory side */ 2756 /* Configure the memory side */
2755 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 2757 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
@@ -2763,6 +2765,11 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2763 return -EINVAL; 2765 return -EINVAL;
2764 } 2766 }
2765 2767
2768 if (config_addr <= 0) {
2769 dev_err(d40c->base->dev, "no address supplied\n");
2770 return -EINVAL;
2771 }
2772
2766 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { 2773 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2767 dev_err(d40c->base->dev, 2774 dev_err(d40c->base->dev,
2768 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", 2775 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
@@ -2781,14 +2788,24 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2781 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width; 2788 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2782 } 2789 }
2783 2790
2791 /* Only valid widths are; 1, 2, 4 and 8. */
2792 if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2793 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2794 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2795 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2796 ((src_addr_width > 1) && (src_addr_width & 1)) ||
2797 ((dst_addr_width > 1) && (dst_addr_width & 1)))
2798 return -EINVAL;
2799
2800 cfg->src_info.data_width = src_addr_width;
2801 cfg->dst_info.data_width = dst_addr_width;
2802
2784 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, 2803 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2785 src_addr_width,
2786 src_maxburst); 2804 src_maxburst);
2787 if (ret) 2805 if (ret)
2788 return ret; 2806 return ret;
2789 2807
2790 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, 2808 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2791 dst_addr_width,
2792 dst_maxburst); 2809 dst_maxburst);
2793 if (ret) 2810 if (ret)
2794 return ret; 2811 return ret;
@@ -2797,8 +2814,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2797 if (chan_is_logical(d40c)) 2814 if (chan_is_logical(d40c))
2798 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 2815 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2799 else 2816 else
2800 d40_phy_cfg(cfg, &d40c->src_def_cfg, 2817 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2801 &d40c->dst_def_cfg, false);
2802 2818
2803 /* These settings will take precedence later */ 2819 /* These settings will take precedence later */
2804 d40c->runtime_addr = config_addr; 2820 d40c->runtime_addr = config_addr;
@@ -2929,7 +2945,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
2929 } 2945 }
2930 2946
2931 d40_chan_init(base, &base->dma_memcpy, base->log_chans, 2947 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2932 base->num_log_chans, base->plat_data->memcpy_len); 2948 base->num_log_chans, base->num_memcpy_chans);
2933 2949
2934 dma_cap_zero(base->dma_memcpy.cap_mask); 2950 dma_cap_zero(base->dma_memcpy.cap_mask);
2935 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); 2951 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
@@ -3123,13 +3139,14 @@ static int __init d40_phy_res_init(struct d40_base *base)
3123 3139
3124static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) 3140static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3125{ 3141{
3126 struct stedma40_platform_data *plat_data; 3142 struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
3127 struct clk *clk = NULL; 3143 struct clk *clk = NULL;
3128 void __iomem *virtbase = NULL; 3144 void __iomem *virtbase = NULL;
3129 struct resource *res = NULL; 3145 struct resource *res = NULL;
3130 struct d40_base *base = NULL; 3146 struct d40_base *base = NULL;
3131 int num_log_chans = 0; 3147 int num_log_chans = 0;
3132 int num_phy_chans; 3148 int num_phy_chans;
3149 int num_memcpy_chans;
3133 int clk_ret = -EINVAL; 3150 int clk_ret = -EINVAL;
3134 int i; 3151 int i;
3135 u32 pid; 3152 u32 pid;
@@ -3189,8 +3206,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3189 * DB8540v1 has revision 4 3206 * DB8540v1 has revision 4
3190 */ 3207 */
3191 rev = AMBA_REV_BITS(pid); 3208 rev = AMBA_REV_BITS(pid);
3192 3209 if (rev < 2) {
3193 plat_data = pdev->dev.platform_data; 3210 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3211 goto failure;
3212 }
3194 3213
3195 /* The number of physical channels on this HW */ 3214 /* The number of physical channels on this HW */
3196 if (plat_data->num_of_phy_chans) 3215 if (plat_data->num_of_phy_chans)
@@ -3198,26 +3217,20 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3198 else 3217 else
3199 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; 3218 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3200 3219
3201 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n", 3220 /* The number of channels used for memcpy */
3202 rev, res->start, num_phy_chans); 3221 if (plat_data->num_of_memcpy_chans)
3203 3222 num_memcpy_chans = plat_data->num_of_memcpy_chans;
3204 if (rev < 2) { 3223 else
3205 d40_err(&pdev->dev, "hardware revision: %d is not supported", 3224 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3206 rev);
3207 goto failure;
3208 }
3209 3225
3210 /* Count the number of logical channels in use */ 3226 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3211 for (i = 0; i < plat_data->dev_len; i++)
3212 if (plat_data->dev_rx[i] != 0)
3213 num_log_chans++;
3214 3227
3215 for (i = 0; i < plat_data->dev_len; i++) 3228 dev_info(&pdev->dev,
3216 if (plat_data->dev_tx[i] != 0) 3229 "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n",
3217 num_log_chans++; 3230 rev, res->start, num_phy_chans, num_log_chans);
3218 3231
3219 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + 3232 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3220 (num_phy_chans + num_log_chans + plat_data->memcpy_len) * 3233 (num_phy_chans + num_log_chans + num_memcpy_chans) *
3221 sizeof(struct d40_chan), GFP_KERNEL); 3234 sizeof(struct d40_chan), GFP_KERNEL);
3222 3235
3223 if (base == NULL) { 3236 if (base == NULL) {
@@ -3227,6 +3240,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3227 3240
3228 base->rev = rev; 3241 base->rev = rev;
3229 base->clk = clk; 3242 base->clk = clk;
3243 base->num_memcpy_chans = num_memcpy_chans;
3230 base->num_phy_chans = num_phy_chans; 3244 base->num_phy_chans = num_phy_chans;
3231 base->num_log_chans = num_log_chans; 3245 base->num_log_chans = num_log_chans;
3232 base->phy_start = res->start; 3246 base->phy_start = res->start;
@@ -3278,17 +3292,11 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3278 if (!base->lookup_phy_chans) 3292 if (!base->lookup_phy_chans)
3279 goto failure; 3293 goto failure;
3280 3294
3281 if (num_log_chans + plat_data->memcpy_len) { 3295 base->lookup_log_chans = kzalloc(num_log_chans *
3282 /* 3296 sizeof(struct d40_chan *),
3283 * The max number of logical channels are event lines for all 3297 GFP_KERNEL);
3284 * src devices and dst devices 3298 if (!base->lookup_log_chans)
3285 */ 3299 goto failure;
3286 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
3287 sizeof(struct d40_chan *),
3288 GFP_KERNEL);
3289 if (!base->lookup_log_chans)
3290 goto failure;
3291 }
3292 3300
3293 base->reg_val_backup_chan = kmalloc(base->num_phy_chans * 3301 base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
3294 sizeof(d40_backup_regs_chan), 3302 sizeof(d40_backup_regs_chan),
@@ -3472,17 +3480,82 @@ failure:
3472 return ret; 3480 return ret;
3473} 3481}
3474 3482
3483static int __init d40_of_probe(struct platform_device *pdev,
3484 struct device_node *np)
3485{
3486 struct stedma40_platform_data *pdata;
3487 int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3488 const const __be32 *list;
3489
3490 pdata = devm_kzalloc(&pdev->dev,
3491 sizeof(struct stedma40_platform_data),
3492 GFP_KERNEL);
3493 if (!pdata)
3494 return -ENOMEM;
3495
3496 /* If absent this value will be obtained from h/w. */
3497 of_property_read_u32(np, "dma-channels", &num_phy);
3498 if (num_phy > 0)
3499 pdata->num_of_phy_chans = num_phy;
3500
3501 list = of_get_property(np, "memcpy-channels", &num_memcpy);
3502 num_memcpy /= sizeof(*list);
3503
3504 if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3505 d40_err(&pdev->dev,
3506 "Invalid number of memcpy channels specified (%d)\n",
3507 num_memcpy);
3508 return -EINVAL;
3509 }
3510 pdata->num_of_memcpy_chans = num_memcpy;
3511
3512 of_property_read_u32_array(np, "memcpy-channels",
3513 dma40_memcpy_channels,
3514 num_memcpy);
3515
3516 list = of_get_property(np, "disabled-channels", &num_disabled);
3517 num_disabled /= sizeof(*list);
3518
3519 if (num_disabled > STEDMA40_MAX_PHYS || num_disabled < 0) {
3520 d40_err(&pdev->dev,
3521 "Invalid number of disabled channels specified (%d)\n",
3522 num_disabled);
3523 return -EINVAL;
3524 }
3525
3526 of_property_read_u32_array(np, "disabled-channels",
3527 pdata->disabled_channels,
3528 num_disabled);
3529 pdata->disabled_channels[num_disabled] = -1;
3530
3531 pdev->dev.platform_data = pdata;
3532
3533 return 0;
3534}
3535
3475static int __init d40_probe(struct platform_device *pdev) 3536static int __init d40_probe(struct platform_device *pdev)
3476{ 3537{
3477 int err; 3538 struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
3539 struct device_node *np = pdev->dev.of_node;
3478 int ret = -ENOENT; 3540 int ret = -ENOENT;
3479 struct d40_base *base; 3541 struct d40_base *base = NULL;
3480 struct resource *res = NULL; 3542 struct resource *res = NULL;
3481 int num_reserved_chans; 3543 int num_reserved_chans;
3482 u32 val; 3544 u32 val;
3483 3545
3484 base = d40_hw_detect_init(pdev); 3546 if (!plat_data) {
3547 if (np) {
3548 if(d40_of_probe(pdev, np)) {
3549 ret = -ENOMEM;
3550 goto failure;
3551 }
3552 } else {
3553 d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3554 goto failure;
3555 }
3556 }
3485 3557
3558 base = d40_hw_detect_init(pdev);
3486 if (!base) 3559 if (!base)
3487 goto failure; 3560 goto failure;
3488 3561
@@ -3575,6 +3648,7 @@ static int __init d40_probe(struct platform_device *pdev)
3575 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); 3648 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3576 if (IS_ERR(base->lcpa_regulator)) { 3649 if (IS_ERR(base->lcpa_regulator)) {
3577 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); 3650 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3651 ret = PTR_ERR(base->lcpa_regulator);
3578 base->lcpa_regulator = NULL; 3652 base->lcpa_regulator = NULL;
3579 goto failure; 3653 goto failure;
3580 } 3654 }
@@ -3590,19 +3664,26 @@ static int __init d40_probe(struct platform_device *pdev)
3590 } 3664 }
3591 3665
3592 base->initialized = true; 3666 base->initialized = true;
3593 err = d40_dmaengine_init(base, num_reserved_chans); 3667 ret = d40_dmaengine_init(base, num_reserved_chans);
3594 if (err) 3668 if (ret)
3595 goto failure; 3669 goto failure;
3596 3670
3597 base->dev->dma_parms = &base->dma_parms; 3671 base->dev->dma_parms = &base->dma_parms;
3598 err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); 3672 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3599 if (err) { 3673 if (ret) {
3600 d40_err(&pdev->dev, "Failed to set dma max seg size\n"); 3674 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3601 goto failure; 3675 goto failure;
3602 } 3676 }
3603 3677
3604 d40_hw_init(base); 3678 d40_hw_init(base);
3605 3679
3680 if (np) {
3681 ret = of_dma_controller_register(np, d40_xlate, NULL);
3682 if (ret)
3683 dev_err(&pdev->dev,
3684 "could not register of_dma_controller\n");
3685 }
3686
3606 dev_info(base->dev, "initialized\n"); 3687 dev_info(base->dev, "initialized\n");
3607 return 0; 3688 return 0;
3608 3689
@@ -3656,11 +3737,17 @@ failure:
3656 return ret; 3737 return ret;
3657} 3738}
3658 3739
3740static const struct of_device_id d40_match[] = {
3741 { .compatible = "stericsson,dma40", },
3742 {}
3743};
3744
3659static struct platform_driver d40_driver = { 3745static struct platform_driver d40_driver = {
3660 .driver = { 3746 .driver = {
3661 .owner = THIS_MODULE, 3747 .owner = THIS_MODULE,
3662 .name = D40_NAME, 3748 .name = D40_NAME,
3663 .pm = DMA40_PM_OPS, 3749 .pm = DMA40_PM_OPS,
3750 .of_match_table = d40_match,
3664 }, 3751 },
3665}; 3752};
3666 3753
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 7180e0d41722..27b818dee7c7 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -10,6 +10,18 @@
10 10
11#include "ste_dma40_ll.h" 11#include "ste_dma40_ll.h"
12 12
13u8 d40_width_to_bits(enum dma_slave_buswidth width)
14{
15 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
16 return STEDMA40_ESIZE_8_BIT;
17 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
18 return STEDMA40_ESIZE_16_BIT;
19 else if (width == DMA_SLAVE_BUSWIDTH_8_BYTES)
20 return STEDMA40_ESIZE_64_BIT;
21 else
22 return STEDMA40_ESIZE_32_BIT;
23}
24
13/* Sets up proper LCSP1 and LCSP3 register for a logical channel */ 25/* Sets up proper LCSP1 and LCSP3 register for a logical channel */
14void d40_log_cfg(struct stedma40_chan_cfg *cfg, 26void d40_log_cfg(struct stedma40_chan_cfg *cfg,
15 u32 *lcsp1, u32 *lcsp3) 27 u32 *lcsp1, u32 *lcsp3)
@@ -18,106 +30,100 @@ void d40_log_cfg(struct stedma40_chan_cfg *cfg,
18 u32 l1 = 0; /* src */ 30 u32 l1 = 0; /* src */
19 31
20 /* src is mem? -> increase address pos */ 32 /* src is mem? -> increase address pos */
21 if (cfg->dir == STEDMA40_MEM_TO_PERIPH || 33 if (cfg->dir == DMA_MEM_TO_DEV ||
22 cfg->dir == STEDMA40_MEM_TO_MEM) 34 cfg->dir == DMA_MEM_TO_MEM)
23 l1 |= 1 << D40_MEM_LCSP1_SCFG_INCR_POS; 35 l1 |= BIT(D40_MEM_LCSP1_SCFG_INCR_POS);
24 36
25 /* dst is mem? -> increase address pos */ 37 /* dst is mem? -> increase address pos */
26 if (cfg->dir == STEDMA40_PERIPH_TO_MEM || 38 if (cfg->dir == DMA_DEV_TO_MEM ||
27 cfg->dir == STEDMA40_MEM_TO_MEM) 39 cfg->dir == DMA_MEM_TO_MEM)
28 l3 |= 1 << D40_MEM_LCSP3_DCFG_INCR_POS; 40 l3 |= BIT(D40_MEM_LCSP3_DCFG_INCR_POS);
29 41
30 /* src is hw? -> master port 1 */ 42 /* src is hw? -> master port 1 */
31 if (cfg->dir == STEDMA40_PERIPH_TO_MEM || 43 if (cfg->dir == DMA_DEV_TO_MEM ||
32 cfg->dir == STEDMA40_PERIPH_TO_PERIPH) 44 cfg->dir == DMA_DEV_TO_DEV)
33 l1 |= 1 << D40_MEM_LCSP1_SCFG_MST_POS; 45 l1 |= BIT(D40_MEM_LCSP1_SCFG_MST_POS);
34 46
35 /* dst is hw? -> master port 1 */ 47 /* dst is hw? -> master port 1 */
36 if (cfg->dir == STEDMA40_MEM_TO_PERIPH || 48 if (cfg->dir == DMA_MEM_TO_DEV ||
37 cfg->dir == STEDMA40_PERIPH_TO_PERIPH) 49 cfg->dir == DMA_DEV_TO_DEV)
38 l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS; 50 l3 |= BIT(D40_MEM_LCSP3_DCFG_MST_POS);
39 51
40 l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS; 52 l3 |= BIT(D40_MEM_LCSP3_DCFG_EIM_POS);
41 l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS; 53 l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
42 l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS; 54 l3 |= d40_width_to_bits(cfg->dst_info.data_width)
55 << D40_MEM_LCSP3_DCFG_ESIZE_POS;
43 56
44 l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS; 57 l1 |= BIT(D40_MEM_LCSP1_SCFG_EIM_POS);
45 l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; 58 l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
46 l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS; 59 l1 |= d40_width_to_bits(cfg->src_info.data_width)
60 << D40_MEM_LCSP1_SCFG_ESIZE_POS;
47 61
48 *lcsp1 = l1; 62 *lcsp1 = l1;
49 *lcsp3 = l3; 63 *lcsp3 = l3;
50 64
51} 65}
52 66
53/* Sets up SRC and DST CFG register for both logical and physical channels */ 67void d40_phy_cfg(struct stedma40_chan_cfg *cfg, u32 *src_cfg, u32 *dst_cfg)
54void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
55 u32 *src_cfg, u32 *dst_cfg, bool is_log)
56{ 68{
57 u32 src = 0; 69 u32 src = 0;
58 u32 dst = 0; 70 u32 dst = 0;
59 71
60 if (!is_log) { 72 if ((cfg->dir == DMA_DEV_TO_MEM) ||
61 /* Physical channel */ 73 (cfg->dir == DMA_DEV_TO_DEV)) {
62 if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) || 74 /* Set master port to 1 */
63 (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) { 75 src |= BIT(D40_SREG_CFG_MST_POS);
64 /* Set master port to 1 */ 76 src |= D40_TYPE_TO_EVENT(cfg->dev_type);
65 src |= 1 << D40_SREG_CFG_MST_POS; 77
66 src |= D40_TYPE_TO_EVENT(cfg->src_dev_type); 78 if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
67 79 src |= BIT(D40_SREG_CFG_PHY_TM_POS);
68 if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) 80 else
69 src |= 1 << D40_SREG_CFG_PHY_TM_POS; 81 src |= 3 << D40_SREG_CFG_PHY_TM_POS;
70 else 82 }
71 src |= 3 << D40_SREG_CFG_PHY_TM_POS; 83 if ((cfg->dir == DMA_MEM_TO_DEV) ||
72 } 84 (cfg->dir == DMA_DEV_TO_DEV)) {
73 if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) || 85 /* Set master port to 1 */
74 (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) { 86 dst |= BIT(D40_SREG_CFG_MST_POS);
75 /* Set master port to 1 */ 87 dst |= D40_TYPE_TO_EVENT(cfg->dev_type);
76 dst |= 1 << D40_SREG_CFG_MST_POS; 88
77 dst |= D40_TYPE_TO_EVENT(cfg->dst_dev_type); 89 if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
78 90 dst |= BIT(D40_SREG_CFG_PHY_TM_POS);
79 if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) 91 else
80 dst |= 1 << D40_SREG_CFG_PHY_TM_POS; 92 dst |= 3 << D40_SREG_CFG_PHY_TM_POS;
81 else 93 }
82 dst |= 3 << D40_SREG_CFG_PHY_TM_POS; 94 /* Interrupt on end of transfer for destination */
83 } 95 dst |= BIT(D40_SREG_CFG_TIM_POS);
84 /* Interrupt on end of transfer for destination */ 96
85 dst |= 1 << D40_SREG_CFG_TIM_POS; 97 /* Generate interrupt on error */
86 98 src |= BIT(D40_SREG_CFG_EIM_POS);
87 /* Generate interrupt on error */ 99 dst |= BIT(D40_SREG_CFG_EIM_POS);
88 src |= 1 << D40_SREG_CFG_EIM_POS; 100
89 dst |= 1 << D40_SREG_CFG_EIM_POS; 101 /* PSIZE */
90 102 if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) {
91 /* PSIZE */ 103 src |= BIT(D40_SREG_CFG_PHY_PEN_POS);
92 if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) { 104 src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS;
93 src |= 1 << D40_SREG_CFG_PHY_PEN_POS; 105 }
94 src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS; 106 if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) {
95 } 107 dst |= BIT(D40_SREG_CFG_PHY_PEN_POS);
96 if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) { 108 dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS;
97 dst |= 1 << D40_SREG_CFG_PHY_PEN_POS; 109 }
98 dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS; 110
99 } 111 /* Element size */
100 112 src |= d40_width_to_bits(cfg->src_info.data_width)
101 /* Element size */ 113 << D40_SREG_CFG_ESIZE_POS;
102 src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS; 114 dst |= d40_width_to_bits(cfg->dst_info.data_width)
103 dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS; 115 << D40_SREG_CFG_ESIZE_POS;
104 116
105 /* Set the priority bit to high for the physical channel */ 117 /* Set the priority bit to high for the physical channel */
106 if (cfg->high_priority) { 118 if (cfg->high_priority) {
107 src |= 1 << D40_SREG_CFG_PRI_POS; 119 src |= BIT(D40_SREG_CFG_PRI_POS);
108 dst |= 1 << D40_SREG_CFG_PRI_POS; 120 dst |= BIT(D40_SREG_CFG_PRI_POS);
109 }
110
111 } else {
112 /* Logical channel */
113 dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
114 src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
115 } 121 }
116 122
117 if (cfg->src_info.big_endian) 123 if (cfg->src_info.big_endian)
118 src |= 1 << D40_SREG_CFG_LBE_POS; 124 src |= BIT(D40_SREG_CFG_LBE_POS);
119 if (cfg->dst_info.big_endian) 125 if (cfg->dst_info.big_endian)
120 dst |= 1 << D40_SREG_CFG_LBE_POS; 126 dst |= BIT(D40_SREG_CFG_LBE_POS);
121 127
122 *src_cfg = src; 128 *src_cfg = src;
123 *dst_cfg = dst; 129 *dst_cfg = dst;
@@ -143,23 +149,22 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli,
143 num_elems = 2 << psize; 149 num_elems = 2 << psize;
144 150
145 /* Must be aligned */ 151 /* Must be aligned */
146 if (!IS_ALIGNED(data, 0x1 << data_width)) 152 if (!IS_ALIGNED(data, data_width))
147 return -EINVAL; 153 return -EINVAL;
148 154
149 /* Transfer size can't be smaller than (num_elms * elem_size) */ 155 /* Transfer size can't be smaller than (num_elms * elem_size) */
150 if (data_size < num_elems * (0x1 << data_width)) 156 if (data_size < num_elems * data_width)
151 return -EINVAL; 157 return -EINVAL;
152 158
153 /* The number of elements. IE now many chunks */ 159 /* The number of elements. IE now many chunks */
154 lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS; 160 lli->reg_elt = (data_size / data_width) << D40_SREG_ELEM_PHY_ECNT_POS;
155 161
156 /* 162 /*
157 * Distance to next element sized entry. 163 * Distance to next element sized entry.
158 * Usually the size of the element unless you want gaps. 164 * Usually the size of the element unless you want gaps.
159 */ 165 */
160 if (addr_inc) 166 if (addr_inc)
161 lli->reg_elt |= (0x1 << data_width) << 167 lli->reg_elt |= data_width << D40_SREG_ELEM_PHY_EIDX_POS;
162 D40_SREG_ELEM_PHY_EIDX_POS;
163 168
164 /* Where the data is */ 169 /* Where the data is */
165 lli->reg_ptr = data; 170 lli->reg_ptr = data;
@@ -167,18 +172,20 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli,
167 172
168 /* If this scatter list entry is the last one, no next link */ 173 /* If this scatter list entry is the last one, no next link */
169 if (next_lli == 0) 174 if (next_lli == 0)
170 lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS; 175 lli->reg_lnk = BIT(D40_SREG_LNK_PHY_TCP_POS);
171 else 176 else
172 lli->reg_lnk = next_lli; 177 lli->reg_lnk = next_lli;
173 178
174 /* Set/clear interrupt generation on this link item.*/ 179 /* Set/clear interrupt generation on this link item.*/
175 if (term_int) 180 if (term_int)
176 lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS; 181 lli->reg_cfg |= BIT(D40_SREG_CFG_TIM_POS);
177 else 182 else
178 lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS); 183 lli->reg_cfg &= ~BIT(D40_SREG_CFG_TIM_POS);
179 184
180 /* Post link */ 185 /*
181 lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS; 186 * Post link - D40_SREG_LNK_PHY_PRE_POS = 0
187 * Relink happens after transfer completion.
188 */
182 189
183 return 0; 190 return 0;
184} 191}
@@ -187,16 +194,16 @@ static int d40_seg_size(int size, int data_width1, int data_width2)
187{ 194{
188 u32 max_w = max(data_width1, data_width2); 195 u32 max_w = max(data_width1, data_width2);
189 u32 min_w = min(data_width1, data_width2); 196 u32 min_w = min(data_width1, data_width2);
190 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); 197 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
191 198
192 if (seg_max > STEDMA40_MAX_SEG_SIZE) 199 if (seg_max > STEDMA40_MAX_SEG_SIZE)
193 seg_max -= (1 << max_w); 200 seg_max -= max_w;
194 201
195 if (size <= seg_max) 202 if (size <= seg_max)
196 return size; 203 return size;
197 204
198 if (size <= 2 * seg_max) 205 if (size <= 2 * seg_max)
199 return ALIGN(size / 2, 1 << max_w); 206 return ALIGN(size / 2, max_w);
200 207
201 return seg_max; 208 return seg_max;
202} 209}
@@ -362,10 +369,10 @@ static void d40_log_fill_lli(struct d40_log_lli *lli,
362 lli->lcsp13 = reg_cfg; 369 lli->lcsp13 = reg_cfg;
363 370
364 /* The number of elements to transfer */ 371 /* The number of elements to transfer */
365 lli->lcsp02 = ((data_size >> data_width) << 372 lli->lcsp02 = ((data_size / data_width) <<
366 D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; 373 D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
367 374
368 BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE); 375 BUG_ON((data_size / data_width) > STEDMA40_MAX_SEG_SIZE);
369 376
370 /* 16 LSBs address of the current element */ 377 /* 16 LSBs address of the current element */
371 lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK; 378 lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index fdde8ef77542..1b47312bc574 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -432,8 +432,7 @@ enum d40_lli_flags {
432 432
433void d40_phy_cfg(struct stedma40_chan_cfg *cfg, 433void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
434 u32 *src_cfg, 434 u32 *src_cfg,
435 u32 *dst_cfg, 435 u32 *dst_cfg);
436 bool is_log);
437 436
438void d40_log_cfg(struct stedma40_chan_cfg *cfg, 437void d40_log_cfg(struct stedma40_chan_cfg *cfg,
439 u32 *lcsp1, 438 u32 *lcsp1,
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index d173d56dbb8c..6ec82f76f019 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -51,6 +51,8 @@ struct gpio_rcar_priv {
51#define FILONOFF 0x28 51#define FILONOFF 0x28
52#define BOTHEDGE 0x4c 52#define BOTHEDGE 0x4c
53 53
54#define RCAR_MAX_GPIO_PER_BANK 32
55
54static inline u32 gpio_rcar_read(struct gpio_rcar_priv *p, int offs) 56static inline u32 gpio_rcar_read(struct gpio_rcar_priv *p, int offs)
55{ 57{
56 return ioread32(p->base + offs); 58 return ioread32(p->base + offs);
@@ -274,9 +276,35 @@ static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
274 .map = gpio_rcar_irq_domain_map, 276 .map = gpio_rcar_irq_domain_map,
275}; 277};
276 278
279static void gpio_rcar_parse_pdata(struct gpio_rcar_priv *p)
280{
281 struct gpio_rcar_config *pdata = p->pdev->dev.platform_data;
282 struct device_node *np = p->pdev->dev.of_node;
283 struct of_phandle_args args;
284 int ret;
285
286 if (pdata) {
287 p->config = *pdata;
288 } else if (IS_ENABLED(CONFIG_OF) && np) {
289 ret = of_parse_phandle_with_args(np, "gpio-ranges",
290 "#gpio-range-cells", 0, &args);
291 p->config.number_of_pins = ret == 0 && args.args_count == 3
292 ? args.args[2]
293 : RCAR_MAX_GPIO_PER_BANK;
294 p->config.gpio_base = -1;
295 }
296
297 if (p->config.number_of_pins == 0 ||
298 p->config.number_of_pins > RCAR_MAX_GPIO_PER_BANK) {
299 dev_warn(&p->pdev->dev,
300 "Invalid number of gpio lines %u, using %u\n",
301 p->config.number_of_pins, RCAR_MAX_GPIO_PER_BANK);
302 p->config.number_of_pins = RCAR_MAX_GPIO_PER_BANK;
303 }
304}
305
277static int gpio_rcar_probe(struct platform_device *pdev) 306static int gpio_rcar_probe(struct platform_device *pdev)
278{ 307{
279 struct gpio_rcar_config *pdata = pdev->dev.platform_data;
280 struct gpio_rcar_priv *p; 308 struct gpio_rcar_priv *p;
281 struct resource *io, *irq; 309 struct resource *io, *irq;
282 struct gpio_chip *gpio_chip; 310 struct gpio_chip *gpio_chip;
@@ -291,14 +319,14 @@ static int gpio_rcar_probe(struct platform_device *pdev)
291 goto err0; 319 goto err0;
292 } 320 }
293 321
294 /* deal with driver instance configuration */
295 if (pdata)
296 p->config = *pdata;
297
298 p->pdev = pdev; 322 p->pdev = pdev;
299 platform_set_drvdata(pdev, p);
300 spin_lock_init(&p->lock); 323 spin_lock_init(&p->lock);
301 324
325 /* Get device configuration from DT node or platform data. */
326 gpio_rcar_parse_pdata(p);
327
328 platform_set_drvdata(pdev, p);
329
302 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 330 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
303 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 331 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
304 332
@@ -325,6 +353,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
325 gpio_chip->set = gpio_rcar_set; 353 gpio_chip->set = gpio_rcar_set;
326 gpio_chip->to_irq = gpio_rcar_to_irq; 354 gpio_chip->to_irq = gpio_rcar_to_irq;
327 gpio_chip->label = name; 355 gpio_chip->label = name;
356 gpio_chip->dev = &pdev->dev;
328 gpio_chip->owner = THIS_MODULE; 357 gpio_chip->owner = THIS_MODULE;
329 gpio_chip->base = p->config.gpio_base; 358 gpio_chip->base = p->config.gpio_base;
330 gpio_chip->ngpio = p->config.number_of_pins; 359 gpio_chip->ngpio = p->config.number_of_pins;
@@ -371,10 +400,12 @@ static int gpio_rcar_probe(struct platform_device *pdev)
371 p->config.irq_base, ret); 400 p->config.irq_base, ret);
372 } 401 }
373 402
374 ret = gpiochip_add_pin_range(gpio_chip, p->config.pctl_name, 0, 403 if (p->config.pctl_name) {
375 gpio_chip->base, gpio_chip->ngpio); 404 ret = gpiochip_add_pin_range(gpio_chip, p->config.pctl_name, 0,
376 if (ret < 0) 405 gpio_chip->base, gpio_chip->ngpio);
377 dev_warn(&pdev->dev, "failed to add pin range\n"); 406 if (ret < 0)
407 dev_warn(&pdev->dev, "failed to add pin range\n");
408 }
378 409
379 return 0; 410 return 0;
380 411
@@ -397,11 +428,23 @@ static int gpio_rcar_remove(struct platform_device *pdev)
397 return 0; 428 return 0;
398} 429}
399 430
431#ifdef CONFIG_OF
432static const struct of_device_id gpio_rcar_of_table[] = {
433 {
434 .compatible = "renesas,gpio-rcar",
435 },
436 { },
437};
438
439MODULE_DEVICE_TABLE(of, gpio_rcar_of_table);
440#endif
441
400static struct platform_driver gpio_rcar_device_driver = { 442static struct platform_driver gpio_rcar_device_driver = {
401 .probe = gpio_rcar_probe, 443 .probe = gpio_rcar_probe,
402 .remove = gpio_rcar_remove, 444 .remove = gpio_rcar_remove,
403 .driver = { 445 .driver = {
404 .name = "gpio_rcar", 446 .name = "gpio_rcar",
447 .of_match_table = of_match_ptr(gpio_rcar_of_table),
405 } 448 }
406}; 449};
407 450
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index b22ca7933745..a1392f47bbda 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -933,67 +933,6 @@ static void __init samsung_gpiolib_add(struct samsung_gpio_chip *chip)
933 s3c_gpiolib_track(chip); 933 s3c_gpiolib_track(chip);
934} 934}
935 935
936#if defined(CONFIG_PLAT_S3C24XX) && defined(CONFIG_OF)
937static int s3c24xx_gpio_xlate(struct gpio_chip *gc,
938 const struct of_phandle_args *gpiospec, u32 *flags)
939{
940 unsigned int pin;
941
942 if (WARN_ON(gc->of_gpio_n_cells < 3))
943 return -EINVAL;
944
945 if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
946 return -EINVAL;
947
948 if (gpiospec->args[0] > gc->ngpio)
949 return -EINVAL;
950
951 pin = gc->base + gpiospec->args[0];
952
953 if (s3c_gpio_cfgpin(pin, S3C_GPIO_SFN(gpiospec->args[1])))
954 pr_warn("gpio_xlate: failed to set pin function\n");
955 if (s3c_gpio_setpull(pin, gpiospec->args[2] & 0xffff))
956 pr_warn("gpio_xlate: failed to set pin pull up/down\n");
957
958 if (flags)
959 *flags = gpiospec->args[2] >> 16;
960
961 return gpiospec->args[0];
962}
963
964static const struct of_device_id s3c24xx_gpio_dt_match[] __initdata = {
965 { .compatible = "samsung,s3c24xx-gpio", },
966 {}
967};
968
969static __init void s3c24xx_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip,
970 u64 base, u64 offset)
971{
972 struct gpio_chip *gc = &chip->chip;
973 u64 address;
974
975 if (!of_have_populated_dt())
976 return;
977
978 address = chip->base ? base + ((u32)chip->base & 0xfff) : base + offset;
979 gc->of_node = of_find_matching_node_by_address(NULL,
980 s3c24xx_gpio_dt_match, address);
981 if (!gc->of_node) {
982 pr_info("gpio: device tree node not found for gpio controller"
983 " with base address %08llx\n", address);
984 return;
985 }
986 gc->of_gpio_n_cells = 3;
987 gc->of_xlate = s3c24xx_gpio_xlate;
988}
989#else
990static __init void s3c24xx_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip,
991 u64 base, u64 offset)
992{
993 return;
994}
995#endif /* defined(CONFIG_PLAT_S3C24XX) && defined(CONFIG_OF) */
996
997static void __init s3c24xx_gpiolib_add_chips(struct samsung_gpio_chip *chip, 936static void __init s3c24xx_gpiolib_add_chips(struct samsung_gpio_chip *chip,
998 int nr_chips, void __iomem *base) 937 int nr_chips, void __iomem *base)
999{ 938{
@@ -1018,8 +957,6 @@ static void __init s3c24xx_gpiolib_add_chips(struct samsung_gpio_chip *chip,
1018 gc->direction_output = samsung_gpiolib_2bit_output; 957 gc->direction_output = samsung_gpiolib_2bit_output;
1019 958
1020 samsung_gpiolib_add(chip); 959 samsung_gpiolib_add(chip);
1021
1022 s3c24xx_gpiolib_attach_ofnode(chip, S3C24XX_PA_GPIO, i * 0x10);
1023 } 960 }
1024} 961}
1025 962
@@ -3026,6 +2963,10 @@ static __init int samsung_gpiolib_init(void)
3026 */ 2963 */
3027 struct device_node *pctrl_np; 2964 struct device_node *pctrl_np;
3028 static const struct of_device_id exynos_pinctrl_ids[] = { 2965 static const struct of_device_id exynos_pinctrl_ids[] = {
2966 { .compatible = "samsung,s3c2412-pinctrl", },
2967 { .compatible = "samsung,s3c2416-pinctrl", },
2968 { .compatible = "samsung,s3c2440-pinctrl", },
2969 { .compatible = "samsung,s3c2450-pinctrl", },
3029 { .compatible = "samsung,exynos4210-pinctrl", }, 2970 { .compatible = "samsung,exynos4210-pinctrl", },
3030 { .compatible = "samsung,exynos4x12-pinctrl", }, 2971 { .compatible = "samsung,exynos4x12-pinctrl", },
3031 { .compatible = "samsung,exynos5250-pinctrl", }, 2972 { .compatible = "samsung,exynos5250-pinctrl", },
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 9545c9f03809..c8b5c13bcd05 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -16,4 +16,38 @@ config PL320_MBOX
16 Management Engine, primarily for cpufreq. Say Y here if you want 16 Management Engine, primarily for cpufreq. Say Y here if you want
17 to use the PL320 IPCM support. 17 to use the PL320 IPCM support.
18 18
19config OMAP_MBOX
20 tristate
21 help
22 This option is selected by any OMAP architecture specific mailbox
23 driver such as CONFIG_OMAP1_MBOX or CONFIG_OMAP2PLUS_MBOX. This
24 enables the common OMAP mailbox framework code.
25
26config OMAP1_MBOX
27 tristate "OMAP1 Mailbox framework support"
28 depends on ARCH_OMAP1
29 select OMAP_MBOX
30 help
31 Mailbox implementation for OMAP chips with hardware for
32 interprocessor communication involving DSP in OMAP1. Say Y here
33 if you want to use OMAP1 Mailbox framework support.
34
35config OMAP2PLUS_MBOX
36 tristate "OMAP2+ Mailbox framework support"
37 depends on ARCH_OMAP2PLUS
38 select OMAP_MBOX
39 help
40 Mailbox implementation for OMAP family chips with hardware for
41 interprocessor communication involving DSP, IVA1.0 and IVA2 in
42 OMAP2/3; or IPU, IVA HD and DSP in OMAP4/5. Say Y here if you
43 want to use OMAP2+ Mailbox framework support.
44
45config OMAP_MBOX_KFIFO_SIZE
46 int "Mailbox kfifo default buffer size (bytes)"
47 depends on OMAP2PLUS_MBOX || OMAP1_MBOX
48 default 256
49 help
50 Specify the default size of mailbox's kfifo buffers (bytes).
51 This can also be changed at runtime (via the mbox_kfifo_size
52 module parameter).
19endif 53endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 543ad6a79505..e0facb34084a 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -1 +1,7 @@
1obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o 1obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
2
3obj-$(CONFIG_OMAP_MBOX) += omap-mailbox.o
4obj-$(CONFIG_OMAP1_MBOX) += mailbox_omap1.o
5mailbox_omap1-objs := mailbox-omap1.o
6obj-$(CONFIG_OMAP2PLUS_MBOX) += mailbox_omap2.o
7mailbox_omap2-objs := mailbox-omap2.o
diff --git a/drivers/mailbox/mailbox-omap1.c b/drivers/mailbox/mailbox-omap1.c
new file mode 100644
index 000000000000..9001b7633f10
--- /dev/null
+++ b/drivers/mailbox/mailbox-omap1.c
@@ -0,0 +1,203 @@
1/*
2 * Mailbox reservation modules for OMAP1
3 *
4 * Copyright (C) 2006-2009 Nokia Corporation
5 * Written by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/platform_device.h>
15#include <linux/io.h>
16
17#include "omap-mbox.h"
18
19#define MAILBOX_ARM2DSP1 0x00
20#define MAILBOX_ARM2DSP1b 0x04
21#define MAILBOX_DSP2ARM1 0x08
22#define MAILBOX_DSP2ARM1b 0x0c
23#define MAILBOX_DSP2ARM2 0x10
24#define MAILBOX_DSP2ARM2b 0x14
25#define MAILBOX_ARM2DSP1_Flag 0x18
26#define MAILBOX_DSP2ARM1_Flag 0x1c
27#define MAILBOX_DSP2ARM2_Flag 0x20
28
29static void __iomem *mbox_base;
30
31struct omap_mbox1_fifo {
32 unsigned long cmd;
33 unsigned long data;
34 unsigned long flag;
35};
36
37struct omap_mbox1_priv {
38 struct omap_mbox1_fifo tx_fifo;
39 struct omap_mbox1_fifo rx_fifo;
40};
41
42static inline int mbox_read_reg(size_t ofs)
43{
44 return __raw_readw(mbox_base + ofs);
45}
46
47static inline void mbox_write_reg(u32 val, size_t ofs)
48{
49 __raw_writew(val, mbox_base + ofs);
50}
51
52/* msg */
53static mbox_msg_t omap1_mbox_fifo_read(struct omap_mbox *mbox)
54{
55 struct omap_mbox1_fifo *fifo =
56 &((struct omap_mbox1_priv *)mbox->priv)->rx_fifo;
57 mbox_msg_t msg;
58
59 msg = mbox_read_reg(fifo->data);
60 msg |= ((mbox_msg_t) mbox_read_reg(fifo->cmd)) << 16;
61
62 return msg;
63}
64
65static void
66omap1_mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
67{
68 struct omap_mbox1_fifo *fifo =
69 &((struct omap_mbox1_priv *)mbox->priv)->tx_fifo;
70
71 mbox_write_reg(msg & 0xffff, fifo->data);
72 mbox_write_reg(msg >> 16, fifo->cmd);
73}
74
75static int omap1_mbox_fifo_empty(struct omap_mbox *mbox)
76{
77 return 0;
78}
79
80static int omap1_mbox_fifo_full(struct omap_mbox *mbox)
81{
82 struct omap_mbox1_fifo *fifo =
83 &((struct omap_mbox1_priv *)mbox->priv)->rx_fifo;
84
85 return mbox_read_reg(fifo->flag);
86}
87
88/* irq */
89static void
90omap1_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
91{
92 if (irq == IRQ_RX)
93 enable_irq(mbox->irq);
94}
95
96static void
97omap1_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
98{
99 if (irq == IRQ_RX)
100 disable_irq(mbox->irq);
101}
102
103static int
104omap1_mbox_is_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
105{
106 if (irq == IRQ_TX)
107 return 0;
108 return 1;
109}
110
111static struct omap_mbox_ops omap1_mbox_ops = {
112 .type = OMAP_MBOX_TYPE1,
113 .fifo_read = omap1_mbox_fifo_read,
114 .fifo_write = omap1_mbox_fifo_write,
115 .fifo_empty = omap1_mbox_fifo_empty,
116 .fifo_full = omap1_mbox_fifo_full,
117 .enable_irq = omap1_mbox_enable_irq,
118 .disable_irq = omap1_mbox_disable_irq,
119 .is_irq = omap1_mbox_is_irq,
120};
121
122/* FIXME: the following struct should be created automatically by the user id */
123
124/* DSP */
125static struct omap_mbox1_priv omap1_mbox_dsp_priv = {
126 .tx_fifo = {
127 .cmd = MAILBOX_ARM2DSP1b,
128 .data = MAILBOX_ARM2DSP1,
129 .flag = MAILBOX_ARM2DSP1_Flag,
130 },
131 .rx_fifo = {
132 .cmd = MAILBOX_DSP2ARM1b,
133 .data = MAILBOX_DSP2ARM1,
134 .flag = MAILBOX_DSP2ARM1_Flag,
135 },
136};
137
138static struct omap_mbox mbox_dsp_info = {
139 .name = "dsp",
140 .ops = &omap1_mbox_ops,
141 .priv = &omap1_mbox_dsp_priv,
142};
143
144static struct omap_mbox *omap1_mboxes[] = { &mbox_dsp_info, NULL };
145
146static int omap1_mbox_probe(struct platform_device *pdev)
147{
148 struct resource *mem;
149 int ret;
150 struct omap_mbox **list;
151
152 list = omap1_mboxes;
153 list[0]->irq = platform_get_irq_byname(pdev, "dsp");
154
155 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
156 if (!mem)
157 return -ENOENT;
158
159 mbox_base = ioremap(mem->start, resource_size(mem));
160 if (!mbox_base)
161 return -ENOMEM;
162
163 ret = omap_mbox_register(&pdev->dev, list);
164 if (ret) {
165 iounmap(mbox_base);
166 return ret;
167 }
168
169 return 0;
170}
171
172static int omap1_mbox_remove(struct platform_device *pdev)
173{
174 omap_mbox_unregister();
175 iounmap(mbox_base);
176 return 0;
177}
178
179static struct platform_driver omap1_mbox_driver = {
180 .probe = omap1_mbox_probe,
181 .remove = omap1_mbox_remove,
182 .driver = {
183 .name = "omap-mailbox",
184 },
185};
186
187static int __init omap1_mbox_init(void)
188{
189 return platform_driver_register(&omap1_mbox_driver);
190}
191
192static void __exit omap1_mbox_exit(void)
193{
194 platform_driver_unregister(&omap1_mbox_driver);
195}
196
197module_init(omap1_mbox_init);
198module_exit(omap1_mbox_exit);
199
200MODULE_LICENSE("GPL v2");
201MODULE_DESCRIPTION("omap mailbox: omap1 architecture specific functions");
202MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
203MODULE_ALIAS("platform:omap1-mailbox");
diff --git a/drivers/mailbox/mailbox-omap2.c b/drivers/mailbox/mailbox-omap2.c
new file mode 100644
index 000000000000..eba380d7b17f
--- /dev/null
+++ b/drivers/mailbox/mailbox-omap2.c
@@ -0,0 +1,358 @@
1/*
2 * Mailbox reservation modules for OMAP2/3
3 *
4 * Copyright (C) 2006-2009 Nokia Corporation
5 * Written by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
6 * and Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/clk.h>
16#include <linux/err.h>
17#include <linux/platform_device.h>
18#include <linux/io.h>
19#include <linux/pm_runtime.h>
20#include <linux/platform_data/mailbox-omap.h>
21
22#include "omap-mbox.h"
23
24#define MAILBOX_REVISION 0x000
25#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
26#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
27#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
28#define MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u))
29#define MAILBOX_IRQENABLE(u) (0x104 + 8 * (u))
30
31#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u))
32#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u))
33#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u))
34
35#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
36#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
37
38#define MBOX_REG_SIZE 0x120
39
40#define OMAP4_MBOX_REG_SIZE 0x130
41
42#define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32))
43#define OMAP4_MBOX_NR_REGS (OMAP4_MBOX_REG_SIZE / sizeof(u32))
44
45static void __iomem *mbox_base;
46
47struct omap_mbox2_fifo {
48 unsigned long msg;
49 unsigned long fifo_stat;
50 unsigned long msg_stat;
51};
52
53struct omap_mbox2_priv {
54 struct omap_mbox2_fifo tx_fifo;
55 struct omap_mbox2_fifo rx_fifo;
56 unsigned long irqenable;
57 unsigned long irqstatus;
58 u32 newmsg_bit;
59 u32 notfull_bit;
60 u32 ctx[OMAP4_MBOX_NR_REGS];
61 unsigned long irqdisable;
62 u32 intr_type;
63};
64
65static inline unsigned int mbox_read_reg(size_t ofs)
66{
67 return __raw_readl(mbox_base + ofs);
68}
69
70static inline void mbox_write_reg(u32 val, size_t ofs)
71{
72 __raw_writel(val, mbox_base + ofs);
73}
74
75/* Mailbox H/W preparations */
76static int omap2_mbox_startup(struct omap_mbox *mbox)
77{
78 u32 l;
79
80 pm_runtime_enable(mbox->dev->parent);
81 pm_runtime_get_sync(mbox->dev->parent);
82
83 l = mbox_read_reg(MAILBOX_REVISION);
84 pr_debug("omap mailbox rev %d.%d\n", (l & 0xf0) >> 4, (l & 0x0f));
85
86 return 0;
87}
88
89static void omap2_mbox_shutdown(struct omap_mbox *mbox)
90{
91 pm_runtime_put_sync(mbox->dev->parent);
92 pm_runtime_disable(mbox->dev->parent);
93}
94
95/* Mailbox FIFO handle functions */
96static mbox_msg_t omap2_mbox_fifo_read(struct omap_mbox *mbox)
97{
98 struct omap_mbox2_fifo *fifo =
99 &((struct omap_mbox2_priv *)mbox->priv)->rx_fifo;
100 return (mbox_msg_t) mbox_read_reg(fifo->msg);
101}
102
103static void omap2_mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
104{
105 struct omap_mbox2_fifo *fifo =
106 &((struct omap_mbox2_priv *)mbox->priv)->tx_fifo;
107 mbox_write_reg(msg, fifo->msg);
108}
109
110static int omap2_mbox_fifo_empty(struct omap_mbox *mbox)
111{
112 struct omap_mbox2_fifo *fifo =
113 &((struct omap_mbox2_priv *)mbox->priv)->rx_fifo;
114 return (mbox_read_reg(fifo->msg_stat) == 0);
115}
116
117static int omap2_mbox_fifo_full(struct omap_mbox *mbox)
118{
119 struct omap_mbox2_fifo *fifo =
120 &((struct omap_mbox2_priv *)mbox->priv)->tx_fifo;
121 return mbox_read_reg(fifo->fifo_stat);
122}
123
124/* Mailbox IRQ handle functions */
125static void omap2_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
126{
127 struct omap_mbox2_priv *p = mbox->priv;
128 u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
129
130 l = mbox_read_reg(p->irqenable);
131 l |= bit;
132 mbox_write_reg(l, p->irqenable);
133}
134
135static void omap2_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
136{
137 struct omap_mbox2_priv *p = mbox->priv;
138 u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
139
140 /*
141 * Read and update the interrupt configuration register for pre-OMAP4.
142 * OMAP4 and later SoCs have a dedicated interrupt disabling register.
143 */
144 if (!p->intr_type)
145 bit = mbox_read_reg(p->irqdisable) & ~bit;
146
147 mbox_write_reg(bit, p->irqdisable);
148}
149
150static void omap2_mbox_ack_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
151{
152 struct omap_mbox2_priv *p = mbox->priv;
153 u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
154
155 mbox_write_reg(bit, p->irqstatus);
156
157 /* Flush posted write for irq status to avoid spurious interrupts */
158 mbox_read_reg(p->irqstatus);
159}
160
161static int omap2_mbox_is_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
162{
163 struct omap_mbox2_priv *p = mbox->priv;
164 u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
165 u32 enable = mbox_read_reg(p->irqenable);
166 u32 status = mbox_read_reg(p->irqstatus);
167
168 return (int)(enable & status & bit);
169}
170
171static void omap2_mbox_save_ctx(struct omap_mbox *mbox)
172{
173 int i;
174 struct omap_mbox2_priv *p = mbox->priv;
175 int nr_regs;
176
177 if (p->intr_type)
178 nr_regs = OMAP4_MBOX_NR_REGS;
179 else
180 nr_regs = MBOX_NR_REGS;
181 for (i = 0; i < nr_regs; i++) {
182 p->ctx[i] = mbox_read_reg(i * sizeof(u32));
183
184 dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
185 i, p->ctx[i]);
186 }
187}
188
189static void omap2_mbox_restore_ctx(struct omap_mbox *mbox)
190{
191 int i;
192 struct omap_mbox2_priv *p = mbox->priv;
193 int nr_regs;
194
195 if (p->intr_type)
196 nr_regs = OMAP4_MBOX_NR_REGS;
197 else
198 nr_regs = MBOX_NR_REGS;
199 for (i = 0; i < nr_regs; i++) {
200 mbox_write_reg(p->ctx[i], i * sizeof(u32));
201
202 dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
203 i, p->ctx[i]);
204 }
205}
206
207static struct omap_mbox_ops omap2_mbox_ops = {
208 .type = OMAP_MBOX_TYPE2,
209 .startup = omap2_mbox_startup,
210 .shutdown = omap2_mbox_shutdown,
211 .fifo_read = omap2_mbox_fifo_read,
212 .fifo_write = omap2_mbox_fifo_write,
213 .fifo_empty = omap2_mbox_fifo_empty,
214 .fifo_full = omap2_mbox_fifo_full,
215 .enable_irq = omap2_mbox_enable_irq,
216 .disable_irq = omap2_mbox_disable_irq,
217 .ack_irq = omap2_mbox_ack_irq,
218 .is_irq = omap2_mbox_is_irq,
219 .save_ctx = omap2_mbox_save_ctx,
220 .restore_ctx = omap2_mbox_restore_ctx,
221};
222
223static int omap2_mbox_probe(struct platform_device *pdev)
224{
225 struct resource *mem;
226 int ret;
227 struct omap_mbox **list, *mbox, *mboxblk;
228 struct omap_mbox2_priv *priv, *privblk;
229 struct omap_mbox_pdata *pdata = pdev->dev.platform_data;
230 struct omap_mbox_dev_info *info;
231 int i;
232
233 if (!pdata || !pdata->info_cnt || !pdata->info) {
234 pr_err("%s: platform not supported\n", __func__);
235 return -ENODEV;
236 }
237
238 /* allocate one extra for marking end of list */
239 list = kzalloc((pdata->info_cnt + 1) * sizeof(*list), GFP_KERNEL);
240 if (!list)
241 return -ENOMEM;
242
243 mboxblk = mbox = kzalloc(pdata->info_cnt * sizeof(*mbox), GFP_KERNEL);
244 if (!mboxblk) {
245 ret = -ENOMEM;
246 goto free_list;
247 }
248
249 privblk = priv = kzalloc(pdata->info_cnt * sizeof(*priv), GFP_KERNEL);
250 if (!privblk) {
251 ret = -ENOMEM;
252 goto free_mboxblk;
253 }
254
255 info = pdata->info;
256 for (i = 0; i < pdata->info_cnt; i++, info++, priv++) {
257 priv->tx_fifo.msg = MAILBOX_MESSAGE(info->tx_id);
258 priv->tx_fifo.fifo_stat = MAILBOX_FIFOSTATUS(info->tx_id);
259 priv->rx_fifo.msg = MAILBOX_MESSAGE(info->rx_id);
260 priv->rx_fifo.msg_stat = MAILBOX_MSGSTATUS(info->rx_id);
261 priv->notfull_bit = MAILBOX_IRQ_NOTFULL(info->tx_id);
262 priv->newmsg_bit = MAILBOX_IRQ_NEWMSG(info->rx_id);
263 if (pdata->intr_type) {
264 priv->irqenable = OMAP4_MAILBOX_IRQENABLE(info->usr_id);
265 priv->irqstatus = OMAP4_MAILBOX_IRQSTATUS(info->usr_id);
266 priv->irqdisable =
267 OMAP4_MAILBOX_IRQENABLE_CLR(info->usr_id);
268 } else {
269 priv->irqenable = MAILBOX_IRQENABLE(info->usr_id);
270 priv->irqstatus = MAILBOX_IRQSTATUS(info->usr_id);
271 priv->irqdisable = MAILBOX_IRQENABLE(info->usr_id);
272 }
273 priv->intr_type = pdata->intr_type;
274
275 mbox->priv = priv;
276 mbox->name = info->name;
277 mbox->ops = &omap2_mbox_ops;
278 mbox->irq = platform_get_irq(pdev, info->irq_id);
279 if (mbox->irq < 0) {
280 ret = mbox->irq;
281 goto free_privblk;
282 }
283 list[i] = mbox++;
284 }
285
286 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
287 if (!mem) {
288 ret = -ENOENT;
289 goto free_privblk;
290 }
291
292 mbox_base = ioremap(mem->start, resource_size(mem));
293 if (!mbox_base) {
294 ret = -ENOMEM;
295 goto free_privblk;
296 }
297
298 ret = omap_mbox_register(&pdev->dev, list);
299 if (ret)
300 goto unmap_mbox;
301 platform_set_drvdata(pdev, list);
302
303 return 0;
304
305unmap_mbox:
306 iounmap(mbox_base);
307free_privblk:
308 kfree(privblk);
309free_mboxblk:
310 kfree(mboxblk);
311free_list:
312 kfree(list);
313 return ret;
314}
315
316static int omap2_mbox_remove(struct platform_device *pdev)
317{
318 struct omap_mbox2_priv *privblk;
319 struct omap_mbox **list = platform_get_drvdata(pdev);
320 struct omap_mbox *mboxblk = list[0];
321
322 privblk = mboxblk->priv;
323 omap_mbox_unregister();
324 iounmap(mbox_base);
325 kfree(privblk);
326 kfree(mboxblk);
327 kfree(list);
328 platform_set_drvdata(pdev, NULL);
329
330 return 0;
331}
332
333static struct platform_driver omap2_mbox_driver = {
334 .probe = omap2_mbox_probe,
335 .remove = omap2_mbox_remove,
336 .driver = {
337 .name = "omap-mailbox",
338 },
339};
340
341static int __init omap2_mbox_init(void)
342{
343 return platform_driver_register(&omap2_mbox_driver);
344}
345
346static void __exit omap2_mbox_exit(void)
347{
348 platform_driver_unregister(&omap2_mbox_driver);
349}
350
351module_init(omap2_mbox_init);
352module_exit(omap2_mbox_exit);
353
354MODULE_LICENSE("GPL v2");
355MODULE_DESCRIPTION("omap mailbox: omap2/3/4 architecture specific functions");
356MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
357MODULE_AUTHOR("Paul Mundt");
358MODULE_ALIAS("platform:omap2-mailbox");
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
new file mode 100644
index 000000000000..d79a646b9042
--- /dev/null
+++ b/drivers/mailbox/omap-mailbox.c
@@ -0,0 +1,469 @@
1/*
2 * OMAP mailbox driver
3 *
4 * Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
5 *
6 * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/interrupt.h>
25#include <linux/spinlock.h>
26#include <linux/mutex.h>
27#include <linux/delay.h>
28#include <linux/slab.h>
29#include <linux/kfifo.h>
30#include <linux/err.h>
31#include <linux/notifier.h>
32#include <linux/module.h>
33
34#include "omap-mbox.h"
35
36static struct omap_mbox **mboxes;
37
38static int mbox_configured;
39static DEFINE_MUTEX(mbox_configured_lock);
40
41static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
42module_param(mbox_kfifo_size, uint, S_IRUGO);
43MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
44
45/* Mailbox FIFO handle functions */
46static inline mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox)
47{
48 return mbox->ops->fifo_read(mbox);
49}
50static inline void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
51{
52 mbox->ops->fifo_write(mbox, msg);
53}
54static inline int mbox_fifo_empty(struct omap_mbox *mbox)
55{
56 return mbox->ops->fifo_empty(mbox);
57}
58static inline int mbox_fifo_full(struct omap_mbox *mbox)
59{
60 return mbox->ops->fifo_full(mbox);
61}
62
63/* Mailbox IRQ handle functions */
64static inline void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
65{
66 if (mbox->ops->ack_irq)
67 mbox->ops->ack_irq(mbox, irq);
68}
69static inline int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
70{
71 return mbox->ops->is_irq(mbox, irq);
72}
73
74/*
75 * message sender
76 */
77static int __mbox_poll_for_space(struct omap_mbox *mbox)
78{
79 int ret = 0, i = 1000;
80
81 while (mbox_fifo_full(mbox)) {
82 if (mbox->ops->type == OMAP_MBOX_TYPE2)
83 return -1;
84 if (--i == 0)
85 return -1;
86 udelay(1);
87 }
88 return ret;
89}
90
91int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
92{
93 struct omap_mbox_queue *mq = mbox->txq;
94 int ret = 0, len;
95
96 spin_lock_bh(&mq->lock);
97
98 if (kfifo_avail(&mq->fifo) < sizeof(msg)) {
99 ret = -ENOMEM;
100 goto out;
101 }
102
103 if (kfifo_is_empty(&mq->fifo) && !__mbox_poll_for_space(mbox)) {
104 mbox_fifo_write(mbox, msg);
105 goto out;
106 }
107
108 len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
109 WARN_ON(len != sizeof(msg));
110
111 tasklet_schedule(&mbox->txq->tasklet);
112
113out:
114 spin_unlock_bh(&mq->lock);
115 return ret;
116}
117EXPORT_SYMBOL(omap_mbox_msg_send);
118
119void omap_mbox_save_ctx(struct omap_mbox *mbox)
120{
121 if (!mbox->ops->save_ctx) {
122 dev_err(mbox->dev, "%s:\tno save\n", __func__);
123 return;
124 }
125
126 mbox->ops->save_ctx(mbox);
127}
128EXPORT_SYMBOL(omap_mbox_save_ctx);
129
130void omap_mbox_restore_ctx(struct omap_mbox *mbox)
131{
132 if (!mbox->ops->restore_ctx) {
133 dev_err(mbox->dev, "%s:\tno restore\n", __func__);
134 return;
135 }
136
137 mbox->ops->restore_ctx(mbox);
138}
139EXPORT_SYMBOL(omap_mbox_restore_ctx);
140
141void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
142{
143 mbox->ops->enable_irq(mbox, irq);
144}
145EXPORT_SYMBOL(omap_mbox_enable_irq);
146
147void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
148{
149 mbox->ops->disable_irq(mbox, irq);
150}
151EXPORT_SYMBOL(omap_mbox_disable_irq);
152
153static void mbox_tx_tasklet(unsigned long tx_data)
154{
155 struct omap_mbox *mbox = (struct omap_mbox *)tx_data;
156 struct omap_mbox_queue *mq = mbox->txq;
157 mbox_msg_t msg;
158 int ret;
159
160 while (kfifo_len(&mq->fifo)) {
161 if (__mbox_poll_for_space(mbox)) {
162 omap_mbox_enable_irq(mbox, IRQ_TX);
163 break;
164 }
165
166 ret = kfifo_out(&mq->fifo, (unsigned char *)&msg,
167 sizeof(msg));
168 WARN_ON(ret != sizeof(msg));
169
170 mbox_fifo_write(mbox, msg);
171 }
172}
173
174/*
175 * Message receiver(workqueue)
176 */
177static void mbox_rx_work(struct work_struct *work)
178{
179 struct omap_mbox_queue *mq =
180 container_of(work, struct omap_mbox_queue, work);
181 mbox_msg_t msg;
182 int len;
183
184 while (kfifo_len(&mq->fifo) >= sizeof(msg)) {
185 len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
186 WARN_ON(len != sizeof(msg));
187
188 blocking_notifier_call_chain(&mq->mbox->notifier, len,
189 (void *)msg);
190 spin_lock_irq(&mq->lock);
191 if (mq->full) {
192 mq->full = false;
193 omap_mbox_enable_irq(mq->mbox, IRQ_RX);
194 }
195 spin_unlock_irq(&mq->lock);
196 }
197}
198
199/*
200 * Mailbox interrupt handler
201 */
202static void __mbox_tx_interrupt(struct omap_mbox *mbox)
203{
204 omap_mbox_disable_irq(mbox, IRQ_TX);
205 ack_mbox_irq(mbox, IRQ_TX);
206 tasklet_schedule(&mbox->txq->tasklet);
207}
208
209static void __mbox_rx_interrupt(struct omap_mbox *mbox)
210{
211 struct omap_mbox_queue *mq = mbox->rxq;
212 mbox_msg_t msg;
213 int len;
214
215 while (!mbox_fifo_empty(mbox)) {
216 if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
217 omap_mbox_disable_irq(mbox, IRQ_RX);
218 mq->full = true;
219 goto nomem;
220 }
221
222 msg = mbox_fifo_read(mbox);
223
224 len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
225 WARN_ON(len != sizeof(msg));
226
227 if (mbox->ops->type == OMAP_MBOX_TYPE1)
228 break;
229 }
230
231 /* no more messages in the fifo. clear IRQ source. */
232 ack_mbox_irq(mbox, IRQ_RX);
233nomem:
234 schedule_work(&mbox->rxq->work);
235}
236
237static irqreturn_t mbox_interrupt(int irq, void *p)
238{
239 struct omap_mbox *mbox = p;
240
241 if (is_mbox_irq(mbox, IRQ_TX))
242 __mbox_tx_interrupt(mbox);
243
244 if (is_mbox_irq(mbox, IRQ_RX))
245 __mbox_rx_interrupt(mbox);
246
247 return IRQ_HANDLED;
248}
249
250static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
251 void (*work) (struct work_struct *),
252 void (*tasklet)(unsigned long))
253{
254 struct omap_mbox_queue *mq;
255
256 mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);
257 if (!mq)
258 return NULL;
259
260 spin_lock_init(&mq->lock);
261
262 if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL))
263 goto error;
264
265 if (work)
266 INIT_WORK(&mq->work, work);
267
268 if (tasklet)
269 tasklet_init(&mq->tasklet, tasklet, (unsigned long)mbox);
270 return mq;
271error:
272 kfree(mq);
273 return NULL;
274}
275
276static void mbox_queue_free(struct omap_mbox_queue *q)
277{
278 kfifo_free(&q->fifo);
279 kfree(q);
280}
281
282static int omap_mbox_startup(struct omap_mbox *mbox)
283{
284 int ret = 0;
285 struct omap_mbox_queue *mq;
286
287 mutex_lock(&mbox_configured_lock);
288 if (!mbox_configured++) {
289 if (likely(mbox->ops->startup)) {
290 ret = mbox->ops->startup(mbox);
291 if (unlikely(ret))
292 goto fail_startup;
293 } else
294 goto fail_startup;
295 }
296
297 if (!mbox->use_count++) {
298 mq = mbox_queue_alloc(mbox, NULL, mbox_tx_tasklet);
299 if (!mq) {
300 ret = -ENOMEM;
301 goto fail_alloc_txq;
302 }
303 mbox->txq = mq;
304
305 mq = mbox_queue_alloc(mbox, mbox_rx_work, NULL);
306 if (!mq) {
307 ret = -ENOMEM;
308 goto fail_alloc_rxq;
309 }
310 mbox->rxq = mq;
311 mq->mbox = mbox;
312 ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
313 mbox->name, mbox);
314 if (unlikely(ret)) {
315 pr_err("failed to register mailbox interrupt:%d\n",
316 ret);
317 goto fail_request_irq;
318 }
319
320 omap_mbox_enable_irq(mbox, IRQ_RX);
321 }
322 mutex_unlock(&mbox_configured_lock);
323 return 0;
324
325fail_request_irq:
326 mbox_queue_free(mbox->rxq);
327fail_alloc_rxq:
328 mbox_queue_free(mbox->txq);
329fail_alloc_txq:
330 if (mbox->ops->shutdown)
331 mbox->ops->shutdown(mbox);
332 mbox->use_count--;
333fail_startup:
334 mbox_configured--;
335 mutex_unlock(&mbox_configured_lock);
336 return ret;
337}
338
339static void omap_mbox_fini(struct omap_mbox *mbox)
340{
341 mutex_lock(&mbox_configured_lock);
342
343 if (!--mbox->use_count) {
344 omap_mbox_disable_irq(mbox, IRQ_RX);
345 free_irq(mbox->irq, mbox);
346 tasklet_kill(&mbox->txq->tasklet);
347 flush_work(&mbox->rxq->work);
348 mbox_queue_free(mbox->txq);
349 mbox_queue_free(mbox->rxq);
350 }
351
352 if (likely(mbox->ops->shutdown)) {
353 if (!--mbox_configured)
354 mbox->ops->shutdown(mbox);
355 }
356
357 mutex_unlock(&mbox_configured_lock);
358}
359
360struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
361{
362 struct omap_mbox *_mbox, *mbox = NULL;
363 int i, ret;
364
365 if (!mboxes)
366 return ERR_PTR(-EINVAL);
367
368 for (i = 0; (_mbox = mboxes[i]); i++) {
369 if (!strcmp(_mbox->name, name)) {
370 mbox = _mbox;
371 break;
372 }
373 }
374
375 if (!mbox)
376 return ERR_PTR(-ENOENT);
377
378 if (nb)
379 blocking_notifier_chain_register(&mbox->notifier, nb);
380
381 ret = omap_mbox_startup(mbox);
382 if (ret) {
383 blocking_notifier_chain_unregister(&mbox->notifier, nb);
384 return ERR_PTR(-ENODEV);
385 }
386
387 return mbox;
388}
389EXPORT_SYMBOL(omap_mbox_get);
390
391void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb)
392{
393 blocking_notifier_chain_unregister(&mbox->notifier, nb);
394 omap_mbox_fini(mbox);
395}
396EXPORT_SYMBOL(omap_mbox_put);
397
398static struct class omap_mbox_class = { .name = "mbox", };
399
400int omap_mbox_register(struct device *parent, struct omap_mbox **list)
401{
402 int ret;
403 int i;
404
405 mboxes = list;
406 if (!mboxes)
407 return -EINVAL;
408
409 for (i = 0; mboxes[i]; i++) {
410 struct omap_mbox *mbox = mboxes[i];
411 mbox->dev = device_create(&omap_mbox_class,
412 parent, 0, mbox, "%s", mbox->name);
413 if (IS_ERR(mbox->dev)) {
414 ret = PTR_ERR(mbox->dev);
415 goto err_out;
416 }
417
418 BLOCKING_INIT_NOTIFIER_HEAD(&mbox->notifier);
419 }
420 return 0;
421
422err_out:
423 while (i--)
424 device_unregister(mboxes[i]->dev);
425 return ret;
426}
427EXPORT_SYMBOL(omap_mbox_register);
428
429int omap_mbox_unregister(void)
430{
431 int i;
432
433 if (!mboxes)
434 return -EINVAL;
435
436 for (i = 0; mboxes[i]; i++)
437 device_unregister(mboxes[i]->dev);
438 mboxes = NULL;
439 return 0;
440}
441EXPORT_SYMBOL(omap_mbox_unregister);
442
443static int __init omap_mbox_init(void)
444{
445 int err;
446
447 err = class_register(&omap_mbox_class);
448 if (err)
449 return err;
450
451 /* kfifo size sanity check: alignment and minimal size */
452 mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(mbox_msg_t));
453 mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
454 sizeof(mbox_msg_t));
455
456 return 0;
457}
458subsys_initcall(omap_mbox_init);
459
460static void __exit omap_mbox_exit(void)
461{
462 class_unregister(&omap_mbox_class);
463}
464module_exit(omap_mbox_exit);
465
466MODULE_LICENSE("GPL v2");
467MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging");
468MODULE_AUTHOR("Toshihiro Kobayashi");
469MODULE_AUTHOR("Hiroshi DOYU");
diff --git a/drivers/mailbox/omap-mbox.h b/drivers/mailbox/omap-mbox.h
new file mode 100644
index 000000000000..6cd38fc68599
--- /dev/null
+++ b/drivers/mailbox/omap-mbox.h
@@ -0,0 +1,67 @@
1/*
2 * omap-mbox.h: OMAP mailbox internal definitions
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef OMAP_MBOX_H
10#define OMAP_MBOX_H
11
12#include <linux/device.h>
13#include <linux/interrupt.h>
14#include <linux/kfifo.h>
15#include <linux/spinlock.h>
16#include <linux/workqueue.h>
17#include <linux/omap-mailbox.h>
18
19typedef int __bitwise omap_mbox_type_t;
20#define OMAP_MBOX_TYPE1 ((__force omap_mbox_type_t) 1)
21#define OMAP_MBOX_TYPE2 ((__force omap_mbox_type_t) 2)
22
23struct omap_mbox_ops {
24 omap_mbox_type_t type;
25 int (*startup)(struct omap_mbox *mbox);
26 void (*shutdown)(struct omap_mbox *mbox);
27 /* fifo */
28 mbox_msg_t (*fifo_read)(struct omap_mbox *mbox);
29 void (*fifo_write)(struct omap_mbox *mbox, mbox_msg_t msg);
30 int (*fifo_empty)(struct omap_mbox *mbox);
31 int (*fifo_full)(struct omap_mbox *mbox);
32 /* irq */
33 void (*enable_irq)(struct omap_mbox *mbox,
34 omap_mbox_irq_t irq);
35 void (*disable_irq)(struct omap_mbox *mbox,
36 omap_mbox_irq_t irq);
37 void (*ack_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq);
38 int (*is_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq);
39 /* ctx */
40 void (*save_ctx)(struct omap_mbox *mbox);
41 void (*restore_ctx)(struct omap_mbox *mbox);
42};
43
44struct omap_mbox_queue {
45 spinlock_t lock;
46 struct kfifo fifo;
47 struct work_struct work;
48 struct tasklet_struct tasklet;
49 struct omap_mbox *mbox;
50 bool full;
51};
52
53struct omap_mbox {
54 const char *name;
55 unsigned int irq;
56 struct omap_mbox_queue *txq, *rxq;
57 struct omap_mbox_ops *ops;
58 struct device *dev;
59 void *priv;
60 int use_count;
61 struct blocking_notifier_head notifier;
62};
63
64int omap_mbox_register(struct device *parent, struct omap_mbox **);
65int omap_mbox_unregister(void);
66
67#endif /* OMAP_MBOX_H */
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index a1c6dd32e14b..901a388dbea7 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -217,6 +217,11 @@ config PINCTRL_EXYNOS5440
217 select PINMUX 217 select PINMUX
218 select PINCONF 218 select PINCONF
219 219
220config PINCTRL_S3C24XX
221 bool "Samsung S3C24XX SoC pinctrl driver"
222 depends on ARCH_S3C24XX
223 select PINCTRL_SAMSUNG
224
220config PINCTRL_S3C64XX 225config PINCTRL_S3C64XX
221 bool "Samsung S3C64XX SoC pinctrl driver" 226 bool "Samsung S3C64XX SoC pinctrl driver"
222 depends on ARCH_S3C64XX 227 depends on ARCH_S3C64XX
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 9bdaeb8785ce..f90b645fb601 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o
42obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o 42obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o
43obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o 43obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o
44obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o 44obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o
45obj-$(CONFIG_PINCTRL_S3C24XX) += pinctrl-s3c24xx.o
45obj-$(CONFIG_PINCTRL_S3C64XX) += pinctrl-s3c64xx.o 46obj-$(CONFIG_PINCTRL_S3C64XX) += pinctrl-s3c64xx.o
46obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o 47obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
47obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o 48obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
diff --git a/drivers/pinctrl/pinctrl-s3c24xx.c b/drivers/pinctrl/pinctrl-s3c24xx.c
new file mode 100644
index 000000000000..24446daaad7d
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-s3c24xx.c
@@ -0,0 +1,651 @@
1/*
2 * S3C24XX specific support for Samsung pinctrl/gpiolib driver.
3 *
4 * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This file contains the SamsungS3C24XX specific information required by the
12 * Samsung pinctrl/gpiolib driver. It also includes the implementation of
13 * external gpio and wakeup interrupt support.
14 */
15
16#include <linux/module.h>
17#include <linux/device.h>
18#include <linux/interrupt.h>
19#include <linux/irqdomain.h>
20#include <linux/irq.h>
21#include <linux/of_irq.h>
22#include <linux/irqchip/chained_irq.h>
23#include <linux/io.h>
24#include <linux/slab.h>
25#include <linux/err.h>
26
27#include "pinctrl-samsung.h"
28
29#define NUM_EINT 24
30#define NUM_EINT_IRQ 6
31#define EINT_MAX_PER_GROUP 8
32
33#define EINTPEND_REG 0xa8
34#define EINTMASK_REG 0xa4
35
36#define EINT_GROUP(i) ((int)((i) / EINT_MAX_PER_GROUP))
37#define EINT_REG(i) ((EINT_GROUP(i) * 4) + 0x88)
38#define EINT_OFFS(i) ((i) % EINT_MAX_PER_GROUP * 4)
39
40#define EINT_LEVEL_LOW 0
41#define EINT_LEVEL_HIGH 1
42#define EINT_EDGE_FALLING 2
43#define EINT_EDGE_RISING 4
44#define EINT_EDGE_BOTH 6
45#define EINT_MASK 0xf
46
47static struct samsung_pin_bank_type bank_type_1bit = {
48 .fld_width = { 1, 1, },
49 .reg_offset = { 0x00, 0x04, },
50};
51
52static struct samsung_pin_bank_type bank_type_2bit = {
53 .fld_width = { 2, 1, 2, },
54 .reg_offset = { 0x00, 0x04, 0x08, },
55};
56
57#define PIN_BANK_A(pins, reg, id) \
58 { \
59 .type = &bank_type_1bit, \
60 .pctl_offset = reg, \
61 .nr_pins = pins, \
62 .eint_type = EINT_TYPE_NONE, \
63 .name = id \
64 }
65
66#define PIN_BANK_2BIT(pins, reg, id) \
67 { \
68 .type = &bank_type_2bit, \
69 .pctl_offset = reg, \
70 .nr_pins = pins, \
71 .eint_type = EINT_TYPE_NONE, \
72 .name = id \
73 }
74
75#define PIN_BANK_2BIT_EINTW(pins, reg, id, eoffs, emask)\
76 { \
77 .type = &bank_type_2bit, \
78 .pctl_offset = reg, \
79 .nr_pins = pins, \
80 .eint_type = EINT_TYPE_WKUP, \
81 .eint_func = 2, \
82 .eint_mask = emask, \
83 .eint_offset = eoffs, \
84 .name = id \
85 }
86
87/**
88 * struct s3c24xx_eint_data: EINT common data
89 * @drvdata: pin controller driver data
90 * @domains: IRQ domains of particular EINT interrupts
91 * @parents: mapped parent irqs in the main interrupt controller
92 */
93struct s3c24xx_eint_data {
94 struct samsung_pinctrl_drv_data *drvdata;
95 struct irq_domain *domains[NUM_EINT];
96 int parents[NUM_EINT_IRQ];
97};
98
99/**
100 * struct s3c24xx_eint_domain_data: per irq-domain data
101 * @bank: pin bank related to the domain
102 * @eint_data: common data
103 * eint0_3_parent_only: live eints 0-3 only in the main intc
104 */
105struct s3c24xx_eint_domain_data {
106 struct samsung_pin_bank *bank;
107 struct s3c24xx_eint_data *eint_data;
108 bool eint0_3_parent_only;
109};
110
111static int s3c24xx_eint_get_trigger(unsigned int type)
112{
113 switch (type) {
114 case IRQ_TYPE_EDGE_RISING:
115 return EINT_EDGE_RISING;
116 break;
117 case IRQ_TYPE_EDGE_FALLING:
118 return EINT_EDGE_FALLING;
119 break;
120 case IRQ_TYPE_EDGE_BOTH:
121 return EINT_EDGE_BOTH;
122 break;
123 case IRQ_TYPE_LEVEL_HIGH:
124 return EINT_LEVEL_HIGH;
125 break;
126 case IRQ_TYPE_LEVEL_LOW:
127 return EINT_LEVEL_LOW;
128 break;
129 default:
130 return -EINVAL;
131 }
132}
133
134static void s3c24xx_eint_set_handler(unsigned int irq, unsigned int type)
135{
136 /* Edge- and level-triggered interrupts need different handlers */
137 if (type & IRQ_TYPE_EDGE_BOTH)
138 __irq_set_handler_locked(irq, handle_edge_irq);
139 else
140 __irq_set_handler_locked(irq, handle_level_irq);
141}
142
143static void s3c24xx_eint_set_function(struct samsung_pinctrl_drv_data *d,
144 struct samsung_pin_bank *bank, int pin)
145{
146 struct samsung_pin_bank_type *bank_type = bank->type;
147 unsigned long flags;
148 void __iomem *reg;
149 u8 shift;
150 u32 mask;
151 u32 val;
152
153 /* Make sure that pin is configured as interrupt */
154 reg = d->virt_base + bank->pctl_offset;
155 shift = pin * bank_type->fld_width[PINCFG_TYPE_FUNC];
156 mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
157
158 spin_lock_irqsave(&bank->slock, flags);
159
160 val = readl(reg);
161 val &= ~(mask << shift);
162 val |= bank->eint_func << shift;
163 writel(val, reg);
164
165 spin_unlock_irqrestore(&bank->slock, flags);
166}
167
168static int s3c24xx_eint_type(struct irq_data *data, unsigned int type)
169{
170 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
171 struct samsung_pinctrl_drv_data *d = bank->drvdata;
172 int index = bank->eint_offset + data->hwirq;
173 void __iomem *reg;
174 int trigger;
175 u8 shift;
176 u32 val;
177
178 trigger = s3c24xx_eint_get_trigger(type);
179 if (trigger < 0) {
180 dev_err(d->dev, "unsupported external interrupt type\n");
181 return -EINVAL;
182 }
183
184 s3c24xx_eint_set_handler(data->irq, type);
185
186 /* Set up interrupt trigger */
187 reg = d->virt_base + EINT_REG(index);
188 shift = EINT_OFFS(index);
189
190 val = readl(reg);
191 val &= ~(EINT_MASK << shift);
192 val |= trigger << shift;
193 writel(val, reg);
194
195 s3c24xx_eint_set_function(d, bank, data->hwirq);
196
197 return 0;
198}
199
200/* Handling of EINTs 0-3 on all except S3C2412 and S3C2413 */
201
202static void s3c2410_eint0_3_ack(struct irq_data *data)
203{
204 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
205 struct s3c24xx_eint_domain_data *ddata = bank->irq_domain->host_data;
206 struct s3c24xx_eint_data *eint_data = ddata->eint_data;
207 int parent_irq = eint_data->parents[data->hwirq];
208 struct irq_chip *parent_chip = irq_get_chip(parent_irq);
209
210 parent_chip->irq_ack(irq_get_irq_data(parent_irq));
211}
212
213static void s3c2410_eint0_3_mask(struct irq_data *data)
214{
215 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
216 struct s3c24xx_eint_domain_data *ddata = bank->irq_domain->host_data;
217 struct s3c24xx_eint_data *eint_data = ddata->eint_data;
218 int parent_irq = eint_data->parents[data->hwirq];
219 struct irq_chip *parent_chip = irq_get_chip(parent_irq);
220
221 parent_chip->irq_mask(irq_get_irq_data(parent_irq));
222}
223
224static void s3c2410_eint0_3_unmask(struct irq_data *data)
225{
226 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
227 struct s3c24xx_eint_domain_data *ddata = bank->irq_domain->host_data;
228 struct s3c24xx_eint_data *eint_data = ddata->eint_data;
229 int parent_irq = eint_data->parents[data->hwirq];
230 struct irq_chip *parent_chip = irq_get_chip(parent_irq);
231
232 parent_chip->irq_unmask(irq_get_irq_data(parent_irq));
233}
234
235static struct irq_chip s3c2410_eint0_3_chip = {
236 .name = "s3c2410-eint0_3",
237 .irq_ack = s3c2410_eint0_3_ack,
238 .irq_mask = s3c2410_eint0_3_mask,
239 .irq_unmask = s3c2410_eint0_3_unmask,
240 .irq_set_type = s3c24xx_eint_type,
241};
242
243static void s3c2410_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
244{
245 struct irq_data *data = irq_desc_get_irq_data(desc);
246 struct s3c24xx_eint_data *eint_data = irq_get_handler_data(irq);
247 unsigned int virq;
248
249 /* the first 4 eints have a simple 1 to 1 mapping */
250 virq = irq_linear_revmap(eint_data->domains[data->hwirq], data->hwirq);
251 /* Something must be really wrong if an unmapped EINT is unmasked */
252 BUG_ON(!virq);
253
254 generic_handle_irq(virq);
255}
256
257/* Handling of EINTs 0-3 on S3C2412 and S3C2413 */
258
259static void s3c2412_eint0_3_ack(struct irq_data *data)
260{
261 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
262 struct samsung_pinctrl_drv_data *d = bank->drvdata;
263
264 unsigned long bitval = 1UL << data->hwirq;
265 writel(bitval, d->virt_base + EINTPEND_REG);
266}
267
268static void s3c2412_eint0_3_mask(struct irq_data *data)
269{
270 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
271 struct samsung_pinctrl_drv_data *d = bank->drvdata;
272 unsigned long mask;
273
274 mask = readl(d->virt_base + EINTMASK_REG);
275 mask |= (1UL << data->hwirq);
276 writel(mask, d->virt_base + EINTMASK_REG);
277}
278
279static void s3c2412_eint0_3_unmask(struct irq_data *data)
280{
281 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
282 struct samsung_pinctrl_drv_data *d = bank->drvdata;
283 unsigned long mask;
284
285 mask = readl(d->virt_base + EINTMASK_REG);
286 mask &= ~(1UL << data->hwirq);
287 writel(mask, d->virt_base + EINTMASK_REG);
288}
289
290static struct irq_chip s3c2412_eint0_3_chip = {
291 .name = "s3c2412-eint0_3",
292 .irq_ack = s3c2412_eint0_3_ack,
293 .irq_mask = s3c2412_eint0_3_mask,
294 .irq_unmask = s3c2412_eint0_3_unmask,
295 .irq_set_type = s3c24xx_eint_type,
296};
297
298static void s3c2412_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
299{
300 struct irq_chip *chip = irq_get_chip(irq);
301 struct irq_data *data = irq_desc_get_irq_data(desc);
302 struct s3c24xx_eint_data *eint_data = irq_get_handler_data(irq);
303 unsigned int virq;
304
305 chained_irq_enter(chip, desc);
306
307 /* the first 4 eints have a simple 1 to 1 mapping */
308 virq = irq_linear_revmap(eint_data->domains[data->hwirq], data->hwirq);
309 /* Something must be really wrong if an unmapped EINT is unmasked */
310 BUG_ON(!virq);
311
312 generic_handle_irq(virq);
313
314 chained_irq_exit(chip, desc);
315}
316
317/* Handling of all other eints */
318
319static void s3c24xx_eint_ack(struct irq_data *data)
320{
321 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
322 struct samsung_pinctrl_drv_data *d = bank->drvdata;
323 unsigned char index = bank->eint_offset + data->hwirq;
324
325 writel(1UL << index, d->virt_base + EINTPEND_REG);
326}
327
328static void s3c24xx_eint_mask(struct irq_data *data)
329{
330 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
331 struct samsung_pinctrl_drv_data *d = bank->drvdata;
332 unsigned char index = bank->eint_offset + data->hwirq;
333 unsigned long mask;
334
335 mask = readl(d->virt_base + EINTMASK_REG);
336 mask |= (1UL << index);
337 writel(mask, d->virt_base + EINTMASK_REG);
338}
339
340static void s3c24xx_eint_unmask(struct irq_data *data)
341{
342 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
343 struct samsung_pinctrl_drv_data *d = bank->drvdata;
344 unsigned char index = bank->eint_offset + data->hwirq;
345 unsigned long mask;
346
347 mask = readl(d->virt_base + EINTMASK_REG);
348 mask &= ~(1UL << index);
349 writel(mask, d->virt_base + EINTMASK_REG);
350}
351
352static struct irq_chip s3c24xx_eint_chip = {
353 .name = "s3c-eint",
354 .irq_ack = s3c24xx_eint_ack,
355 .irq_mask = s3c24xx_eint_mask,
356 .irq_unmask = s3c24xx_eint_unmask,
357 .irq_set_type = s3c24xx_eint_type,
358};
359
360static inline void s3c24xx_demux_eint(unsigned int irq, struct irq_desc *desc,
361 u32 offset, u32 range)
362{
363 struct irq_chip *chip = irq_get_chip(irq);
364 struct s3c24xx_eint_data *data = irq_get_handler_data(irq);
365 struct samsung_pinctrl_drv_data *d = data->drvdata;
366 unsigned int pend, mask;
367
368 chained_irq_enter(chip, desc);
369
370 pend = readl(d->virt_base + EINTPEND_REG);
371 mask = readl(d->virt_base + EINTMASK_REG);
372
373 pend &= ~mask;
374 pend &= range;
375
376 while (pend) {
377 unsigned int virq;
378
379 irq = __ffs(pend);
380 pend &= ~(1 << irq);
381 virq = irq_linear_revmap(data->domains[irq], irq - offset);
382 /* Something is really wrong if an unmapped EINT is unmasked */
383 BUG_ON(!virq);
384
385 generic_handle_irq(virq);
386 }
387
388 chained_irq_exit(chip, desc);
389}
390
391static void s3c24xx_demux_eint4_7(unsigned int irq, struct irq_desc *desc)
392{
393 s3c24xx_demux_eint(irq, desc, 0, 0xf0);
394}
395
396static void s3c24xx_demux_eint8_23(unsigned int irq, struct irq_desc *desc)
397{
398 s3c24xx_demux_eint(irq, desc, 8, 0xffff00);
399}
400
401static irq_flow_handler_t s3c2410_eint_handlers[NUM_EINT_IRQ] = {
402 s3c2410_demux_eint0_3,
403 s3c2410_demux_eint0_3,
404 s3c2410_demux_eint0_3,
405 s3c2410_demux_eint0_3,
406 s3c24xx_demux_eint4_7,
407 s3c24xx_demux_eint8_23,
408};
409
410static irq_flow_handler_t s3c2412_eint_handlers[NUM_EINT_IRQ] = {
411 s3c2412_demux_eint0_3,
412 s3c2412_demux_eint0_3,
413 s3c2412_demux_eint0_3,
414 s3c2412_demux_eint0_3,
415 s3c24xx_demux_eint4_7,
416 s3c24xx_demux_eint8_23,
417};
418
419static int s3c24xx_gpf_irq_map(struct irq_domain *h, unsigned int virq,
420 irq_hw_number_t hw)
421{
422 struct s3c24xx_eint_domain_data *ddata = h->host_data;
423 struct samsung_pin_bank *bank = ddata->bank;
424
425 if (!(bank->eint_mask & (1 << (bank->eint_offset + hw))))
426 return -EINVAL;
427
428 if (hw <= 3) {
429 if (ddata->eint0_3_parent_only)
430 irq_set_chip_and_handler(virq, &s3c2410_eint0_3_chip,
431 handle_edge_irq);
432 else
433 irq_set_chip_and_handler(virq, &s3c2412_eint0_3_chip,
434 handle_edge_irq);
435 } else {
436 irq_set_chip_and_handler(virq, &s3c24xx_eint_chip,
437 handle_edge_irq);
438 }
439 irq_set_chip_data(virq, bank);
440 set_irq_flags(virq, IRQF_VALID);
441 return 0;
442}
443
444static const struct irq_domain_ops s3c24xx_gpf_irq_ops = {
445 .map = s3c24xx_gpf_irq_map,
446 .xlate = irq_domain_xlate_twocell,
447};
448
449static int s3c24xx_gpg_irq_map(struct irq_domain *h, unsigned int virq,
450 irq_hw_number_t hw)
451{
452 struct s3c24xx_eint_domain_data *ddata = h->host_data;
453 struct samsung_pin_bank *bank = ddata->bank;
454
455 if (!(bank->eint_mask & (1 << (bank->eint_offset + hw))))
456 return -EINVAL;
457
458 irq_set_chip_and_handler(virq, &s3c24xx_eint_chip, handle_edge_irq);
459 irq_set_chip_data(virq, bank);
460 set_irq_flags(virq, IRQF_VALID);
461 return 0;
462}
463
464static const struct irq_domain_ops s3c24xx_gpg_irq_ops = {
465 .map = s3c24xx_gpg_irq_map,
466 .xlate = irq_domain_xlate_twocell,
467};
468
469static const struct of_device_id s3c24xx_eint_irq_ids[] = {
470 { .compatible = "samsung,s3c2410-wakeup-eint", .data = (void *)1 },
471 { .compatible = "samsung,s3c2412-wakeup-eint", .data = (void *)0 },
472 { }
473};
474
475static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
476{
477 struct device *dev = d->dev;
478 const struct of_device_id *match;
479 struct device_node *eint_np = NULL;
480 struct device_node *np;
481 struct samsung_pin_bank *bank;
482 struct s3c24xx_eint_data *eint_data;
483 const struct irq_domain_ops *ops;
484 unsigned int i;
485 bool eint0_3_parent_only;
486 irq_flow_handler_t *handlers;
487
488 for_each_child_of_node(dev->of_node, np) {
489 match = of_match_node(s3c24xx_eint_irq_ids, np);
490 if (match) {
491 eint_np = np;
492 eint0_3_parent_only = (bool)match->data;
493 break;
494 }
495 }
496 if (!eint_np)
497 return -ENODEV;
498
499 eint_data = devm_kzalloc(dev, sizeof(*eint_data), GFP_KERNEL);
500 if (!eint_data)
501 return -ENOMEM;
502
503 eint_data->drvdata = d;
504
505 handlers = eint0_3_parent_only ? s3c2410_eint_handlers
506 : s3c2412_eint_handlers;
507 for (i = 0; i < NUM_EINT_IRQ; ++i) {
508 unsigned int irq;
509
510 irq = irq_of_parse_and_map(eint_np, i);
511 if (!irq) {
512 dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i);
513 return -ENXIO;
514 }
515
516 eint_data->parents[i] = irq;
517 irq_set_chained_handler(irq, handlers[i]);
518 irq_set_handler_data(irq, eint_data);
519 }
520
521 bank = d->ctrl->pin_banks;
522 for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) {
523 struct s3c24xx_eint_domain_data *ddata;
524 unsigned int mask;
525 unsigned int irq;
526 unsigned int pin;
527
528 if (bank->eint_type != EINT_TYPE_WKUP)
529 continue;
530
531 ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
532 if (!ddata)
533 return -ENOMEM;
534
535 ddata->bank = bank;
536 ddata->eint_data = eint_data;
537 ddata->eint0_3_parent_only = eint0_3_parent_only;
538
539 ops = (bank->eint_offset == 0) ? &s3c24xx_gpf_irq_ops
540 : &s3c24xx_gpg_irq_ops;
541
542 bank->irq_domain = irq_domain_add_linear(bank->of_node,
543 bank->nr_pins, ops, ddata);
544 if (!bank->irq_domain) {
545 dev_err(dev, "wkup irq domain add failed\n");
546 return -ENXIO;
547 }
548
549 irq = bank->eint_offset;
550 mask = bank->eint_mask;
551 for (pin = 0; mask; ++pin, mask >>= 1) {
552 if (irq > NUM_EINT)
553 break;
554 if (!(mask & 1))
555 continue;
556 eint_data->domains[irq] = bank->irq_domain;
557 ++irq;
558 }
559 }
560
561 return 0;
562}
563
564static struct samsung_pin_bank s3c2412_pin_banks[] = {
565 PIN_BANK_A(23, 0x000, "gpa"),
566 PIN_BANK_2BIT(11, 0x010, "gpb"),
567 PIN_BANK_2BIT(16, 0x020, "gpc"),
568 PIN_BANK_2BIT(16, 0x030, "gpd"),
569 PIN_BANK_2BIT(16, 0x040, "gpe"),
570 PIN_BANK_2BIT_EINTW(8, 0x050, "gpf", 0, 0xff),
571 PIN_BANK_2BIT_EINTW(16, 0x060, "gpg", 8, 0xffff00),
572 PIN_BANK_2BIT(11, 0x070, "gph"),
573 PIN_BANK_2BIT(13, 0x080, "gpj"),
574};
575
576struct samsung_pin_ctrl s3c2412_pin_ctrl[] = {
577 {
578 .pin_banks = s3c2412_pin_banks,
579 .nr_banks = ARRAY_SIZE(s3c2412_pin_banks),
580 .eint_wkup_init = s3c24xx_eint_init,
581 .label = "S3C2412-GPIO",
582 },
583};
584
585static struct samsung_pin_bank s3c2416_pin_banks[] = {
586 PIN_BANK_A(27, 0x000, "gpa"),
587 PIN_BANK_2BIT(11, 0x010, "gpb"),
588 PIN_BANK_2BIT(16, 0x020, "gpc"),
589 PIN_BANK_2BIT(16, 0x030, "gpd"),
590 PIN_BANK_2BIT(16, 0x040, "gpe"),
591 PIN_BANK_2BIT_EINTW(8, 0x050, "gpf", 0, 0xff),
592 PIN_BANK_2BIT_EINTW(8, 0x060, "gpg", 8, 0xff00),
593 PIN_BANK_2BIT(15, 0x070, "gph"),
594 PIN_BANK_2BIT(16, 0x0e0, "gpk"),
595 PIN_BANK_2BIT(14, 0x0f0, "gpl"),
596 PIN_BANK_2BIT(2, 0x100, "gpm"),
597};
598
599struct samsung_pin_ctrl s3c2416_pin_ctrl[] = {
600 {
601 .pin_banks = s3c2416_pin_banks,
602 .nr_banks = ARRAY_SIZE(s3c2416_pin_banks),
603 .eint_wkup_init = s3c24xx_eint_init,
604 .label = "S3C2416-GPIO",
605 },
606};
607
608static struct samsung_pin_bank s3c2440_pin_banks[] = {
609 PIN_BANK_A(25, 0x000, "gpa"),
610 PIN_BANK_2BIT(11, 0x010, "gpb"),
611 PIN_BANK_2BIT(16, 0x020, "gpc"),
612 PIN_BANK_2BIT(16, 0x030, "gpd"),
613 PIN_BANK_2BIT(16, 0x040, "gpe"),
614 PIN_BANK_2BIT_EINTW(8, 0x050, "gpf", 0, 0xff),
615 PIN_BANK_2BIT_EINTW(16, 0x060, "gpg", 8, 0xffff00),
616 PIN_BANK_2BIT(11, 0x070, "gph"),
617 PIN_BANK_2BIT(13, 0x0d0, "gpj"),
618};
619
620struct samsung_pin_ctrl s3c2440_pin_ctrl[] = {
621 {
622 .pin_banks = s3c2440_pin_banks,
623 .nr_banks = ARRAY_SIZE(s3c2440_pin_banks),
624 .eint_wkup_init = s3c24xx_eint_init,
625 .label = "S3C2440-GPIO",
626 },
627};
628
629static struct samsung_pin_bank s3c2450_pin_banks[] = {
630 PIN_BANK_A(28, 0x000, "gpa"),
631 PIN_BANK_2BIT(11, 0x010, "gpb"),
632 PIN_BANK_2BIT(16, 0x020, "gpc"),
633 PIN_BANK_2BIT(16, 0x030, "gpd"),
634 PIN_BANK_2BIT(16, 0x040, "gpe"),
635 PIN_BANK_2BIT_EINTW(8, 0x050, "gpf", 0, 0xff),
636 PIN_BANK_2BIT_EINTW(16, 0x060, "gpg", 8, 0xffff00),
637 PIN_BANK_2BIT(15, 0x070, "gph"),
638 PIN_BANK_2BIT(16, 0x0d0, "gpj"),
639 PIN_BANK_2BIT(16, 0x0e0, "gpk"),
640 PIN_BANK_2BIT(15, 0x0f0, "gpl"),
641 PIN_BANK_2BIT(2, 0x100, "gpm"),
642};
643
644struct samsung_pin_ctrl s3c2450_pin_ctrl[] = {
645 {
646 .pin_banks = s3c2450_pin_banks,
647 .nr_banks = ARRAY_SIZE(s3c2450_pin_banks),
648 .eint_wkup_init = s3c24xx_eint_init,
649 .label = "S3C2450-GPIO",
650 },
651};
diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/pinctrl-samsung.c
index 63ac22e89678..e67ff1b8042c 100644
--- a/drivers/pinctrl/pinctrl-samsung.c
+++ b/drivers/pinctrl/pinctrl-samsung.c
@@ -1118,6 +1118,16 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
1118 { .compatible = "samsung,s3c64xx-pinctrl", 1118 { .compatible = "samsung,s3c64xx-pinctrl",
1119 .data = s3c64xx_pin_ctrl }, 1119 .data = s3c64xx_pin_ctrl },
1120#endif 1120#endif
1121#ifdef CONFIG_PINCTRL_S3C24XX
1122 { .compatible = "samsung,s3c2412-pinctrl",
1123 .data = s3c2412_pin_ctrl },
1124 { .compatible = "samsung,s3c2416-pinctrl",
1125 .data = s3c2416_pin_ctrl },
1126 { .compatible = "samsung,s3c2440-pinctrl",
1127 .data = s3c2440_pin_ctrl },
1128 { .compatible = "samsung,s3c2450-pinctrl",
1129 .data = s3c2450_pin_ctrl },
1130#endif
1121 {}, 1131 {},
1122}; 1132};
1123MODULE_DEVICE_TABLE(of, samsung_pinctrl_dt_match); 1133MODULE_DEVICE_TABLE(of, samsung_pinctrl_dt_match);
diff --git a/drivers/pinctrl/pinctrl-samsung.h b/drivers/pinctrl/pinctrl-samsung.h
index 26d3519240c9..79fcc2076c00 100644
--- a/drivers/pinctrl/pinctrl-samsung.h
+++ b/drivers/pinctrl/pinctrl-samsung.h
@@ -255,5 +255,9 @@ extern struct samsung_pin_ctrl exynos4210_pin_ctrl[];
255extern struct samsung_pin_ctrl exynos4x12_pin_ctrl[]; 255extern struct samsung_pin_ctrl exynos4x12_pin_ctrl[];
256extern struct samsung_pin_ctrl exynos5250_pin_ctrl[]; 256extern struct samsung_pin_ctrl exynos5250_pin_ctrl[];
257extern struct samsung_pin_ctrl s3c64xx_pin_ctrl[]; 257extern struct samsung_pin_ctrl s3c64xx_pin_ctrl[];
258extern struct samsung_pin_ctrl s3c2412_pin_ctrl[];
259extern struct samsung_pin_ctrl s3c2416_pin_ctrl[];
260extern struct samsung_pin_ctrl s3c2440_pin_ctrl[];
261extern struct samsung_pin_ctrl s3c2450_pin_ctrl[];
258 262
259#endif /* __PINCTRL_SAMSUNG_H */ 263#endif /* __PINCTRL_SAMSUNG_H */
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
index bbff5596e922..82bf6aba0074 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
@@ -1488,6 +1488,66 @@ IRQC_PINS_MUX(326, 54);
1488IRQC_PINS_MUX(327, 55); 1488IRQC_PINS_MUX(327, 55);
1489IRQC_PINS_MUX(328, 56); 1489IRQC_PINS_MUX(328, 56);
1490IRQC_PINS_MUX(329, 57); 1490IRQC_PINS_MUX(329, 57);
1491/* - MMCIF0 ----------------------------------------------------------------- */
1492static const unsigned int mmc0_data1_pins[] = {
1493 /* D[0] */
1494 164,
1495};
1496static const unsigned int mmc0_data1_mux[] = {
1497 MMCD0_0_MARK,
1498};
1499static const unsigned int mmc0_data4_pins[] = {
1500 /* D[0:3] */
1501 164, 165, 166, 167,
1502};
1503static const unsigned int mmc0_data4_mux[] = {
1504 MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK,
1505};
1506static const unsigned int mmc0_data8_pins[] = {
1507 /* D[0:7] */
1508 164, 165, 166, 167, 168, 169, 170, 171,
1509};
1510static const unsigned int mmc0_data8_mux[] = {
1511 MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK,
1512 MMCD0_4_MARK, MMCD0_5_MARK, MMCD0_6_MARK, MMCD0_7_MARK,
1513};
1514static const unsigned int mmc0_ctrl_pins[] = {
1515 /* CMD, CLK */
1516 172, 173,
1517};
1518static const unsigned int mmc0_ctrl_mux[] = {
1519 MMCCMD0_MARK, MMCCLK0_MARK,
1520};
1521/* - MMCIF1 ----------------------------------------------------------------- */
1522static const unsigned int mmc1_data1_pins[] = {
1523 /* D[0] */
1524 199,
1525};
1526static const unsigned int mmc1_data1_mux[] = {
1527 MMCD1_0_MARK,
1528};
1529static const unsigned int mmc1_data4_pins[] = {
1530 /* D[0:3] */
1531 199, 198, 197, 196,
1532};
1533static const unsigned int mmc1_data4_mux[] = {
1534 MMCD1_0_MARK, MMCD1_1_MARK, MMCD1_2_MARK, MMCD1_3_MARK,
1535};
1536static const unsigned int mmc1_data8_pins[] = {
1537 /* D[0:7] */
1538 199, 198, 197, 196, 195, 194, 193, 192,
1539};
1540static const unsigned int mmc1_data8_mux[] = {
1541 MMCD1_0_MARK, MMCD1_1_MARK, MMCD1_2_MARK, MMCD1_3_MARK,
1542 MMCD1_4_MARK, MMCD1_5_MARK, MMCD1_6_MARK, MMCD1_7_MARK,
1543};
1544static const unsigned int mmc1_ctrl_pins[] = {
1545 /* CMD, CLK */
1546 200, 203,
1547};
1548static const unsigned int mmc1_ctrl_mux[] = {
1549 MMCCMD1_MARK, MMCCLK1_MARK,
1550};
1491/* - SCIFA0 ----------------------------------------------------------------- */ 1551/* - SCIFA0 ----------------------------------------------------------------- */
1492static const unsigned int scifa0_data_pins[] = { 1552static const unsigned int scifa0_data_pins[] = {
1493 /* SCIFA0_RXD, SCIFA0_TXD */ 1553 /* SCIFA0_RXD, SCIFA0_TXD */
@@ -1683,6 +1743,86 @@ static const unsigned int scifb3_ctrl_b_pins[] = {
1683static const unsigned int scifb3_ctrl_b_mux[] = { 1743static const unsigned int scifb3_ctrl_b_mux[] = {
1684 SCIFB3_RTS_38_MARK, SCIFB3_CTS_39_MARK, 1744 SCIFB3_RTS_38_MARK, SCIFB3_CTS_39_MARK,
1685}; 1745};
1746/* - SDHI0 ------------------------------------------------------------------ */
1747static const unsigned int sdhi0_data1_pins[] = {
1748 /* D0 */
1749 302,
1750};
1751static const unsigned int sdhi0_data1_mux[] = {
1752 SDHID0_0_MARK,
1753};
1754static const unsigned int sdhi0_data4_pins[] = {
1755 /* D[0:3] */
1756 302, 303, 304, 305,
1757};
1758static const unsigned int sdhi0_data4_mux[] = {
1759 SDHID0_0_MARK, SDHID0_1_MARK, SDHID0_2_MARK, SDHID0_3_MARK,
1760};
1761static const unsigned int sdhi0_ctrl_pins[] = {
1762 /* CLK, CMD */
1763 308, 306,
1764};
1765static const unsigned int sdhi0_ctrl_mux[] = {
1766 SDHICLK0_MARK, SDHICMD0_MARK,
1767};
1768static const unsigned int sdhi0_cd_pins[] = {
1769 /* CD */
1770 301,
1771};
1772static const unsigned int sdhi0_cd_mux[] = {
1773 SDHICD0_MARK,
1774};
1775static const unsigned int sdhi0_wp_pins[] = {
1776 /* WP */
1777 307,
1778};
1779static const unsigned int sdhi0_wp_mux[] = {
1780 SDHIWP0_MARK,
1781};
1782/* - SDHI1 ------------------------------------------------------------------ */
1783static const unsigned int sdhi1_data1_pins[] = {
1784 /* D0 */
1785 289,
1786};
1787static const unsigned int sdhi1_data1_mux[] = {
1788 SDHID1_0_MARK,
1789};
1790static const unsigned int sdhi1_data4_pins[] = {
1791 /* D[0:3] */
1792 289, 290, 291, 292,
1793};
1794static const unsigned int sdhi1_data4_mux[] = {
1795 SDHID1_0_MARK, SDHID1_1_MARK, SDHID1_2_MARK, SDHID1_3_MARK,
1796};
1797static const unsigned int sdhi1_ctrl_pins[] = {
1798 /* CLK, CMD */
1799 293, 294,
1800};
1801static const unsigned int sdhi1_ctrl_mux[] = {
1802 SDHICLK1_MARK, SDHICMD1_MARK,
1803};
1804/* - SDHI2 ------------------------------------------------------------------ */
1805static const unsigned int sdhi2_data1_pins[] = {
1806 /* D0 */
1807 295,
1808};
1809static const unsigned int sdhi2_data1_mux[] = {
1810 SDHID2_0_MARK,
1811};
1812static const unsigned int sdhi2_data4_pins[] = {
1813 /* D[0:3] */
1814 295, 296, 297, 298,
1815};
1816static const unsigned int sdhi2_data4_mux[] = {
1817 SDHID2_0_MARK, SDHID2_1_MARK, SDHID2_2_MARK, SDHID2_3_MARK,
1818};
1819static const unsigned int sdhi2_ctrl_pins[] = {
1820 /* CLK, CMD */
1821 299, 300,
1822};
1823static const unsigned int sdhi2_ctrl_mux[] = {
1824 SDHICLK2_MARK, SDHICMD2_MARK,
1825};
1686 1826
1687static const struct sh_pfc_pin_group pinmux_groups[] = { 1827static const struct sh_pfc_pin_group pinmux_groups[] = {
1688 SH_PFC_PIN_GROUP(irqc_irq0), 1828 SH_PFC_PIN_GROUP(irqc_irq0),
@@ -1743,6 +1883,14 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
1743 SH_PFC_PIN_GROUP(irqc_irq55), 1883 SH_PFC_PIN_GROUP(irqc_irq55),
1744 SH_PFC_PIN_GROUP(irqc_irq56), 1884 SH_PFC_PIN_GROUP(irqc_irq56),
1745 SH_PFC_PIN_GROUP(irqc_irq57), 1885 SH_PFC_PIN_GROUP(irqc_irq57),
1886 SH_PFC_PIN_GROUP(mmc0_data1),
1887 SH_PFC_PIN_GROUP(mmc0_data4),
1888 SH_PFC_PIN_GROUP(mmc0_data8),
1889 SH_PFC_PIN_GROUP(mmc0_ctrl),
1890 SH_PFC_PIN_GROUP(mmc1_data1),
1891 SH_PFC_PIN_GROUP(mmc1_data4),
1892 SH_PFC_PIN_GROUP(mmc1_data8),
1893 SH_PFC_PIN_GROUP(mmc1_ctrl),
1746 SH_PFC_PIN_GROUP(scifa0_data), 1894 SH_PFC_PIN_GROUP(scifa0_data),
1747 SH_PFC_PIN_GROUP(scifa0_clk), 1895 SH_PFC_PIN_GROUP(scifa0_clk),
1748 SH_PFC_PIN_GROUP(scifa0_ctrl), 1896 SH_PFC_PIN_GROUP(scifa0_ctrl),
@@ -1770,6 +1918,17 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
1770 SH_PFC_PIN_GROUP(scifb3_data_b), 1918 SH_PFC_PIN_GROUP(scifb3_data_b),
1771 SH_PFC_PIN_GROUP(scifb3_clk_b), 1919 SH_PFC_PIN_GROUP(scifb3_clk_b),
1772 SH_PFC_PIN_GROUP(scifb3_ctrl_b), 1920 SH_PFC_PIN_GROUP(scifb3_ctrl_b),
1921 SH_PFC_PIN_GROUP(sdhi0_data1),
1922 SH_PFC_PIN_GROUP(sdhi0_data4),
1923 SH_PFC_PIN_GROUP(sdhi0_ctrl),
1924 SH_PFC_PIN_GROUP(sdhi0_cd),
1925 SH_PFC_PIN_GROUP(sdhi0_wp),
1926 SH_PFC_PIN_GROUP(sdhi1_data1),
1927 SH_PFC_PIN_GROUP(sdhi1_data4),
1928 SH_PFC_PIN_GROUP(sdhi1_ctrl),
1929 SH_PFC_PIN_GROUP(sdhi2_data1),
1930 SH_PFC_PIN_GROUP(sdhi2_data4),
1931 SH_PFC_PIN_GROUP(sdhi2_ctrl),
1773}; 1932};
1774 1933
1775static const char * const irqc_groups[] = { 1934static const char * const irqc_groups[] = {
@@ -1833,6 +1992,20 @@ static const char * const irqc_groups[] = {
1833 "irqc_irq57", 1992 "irqc_irq57",
1834}; 1993};
1835 1994
1995static const char * const mmc0_groups[] = {
1996 "mmc0_data1",
1997 "mmc0_data4",
1998 "mmc0_data8",
1999 "mmc0_ctrl",
2000};
2001
2002static const char * const mmc1_groups[] = {
2003 "mmc1_data1",
2004 "mmc1_data4",
2005 "mmc1_data8",
2006 "mmc1_ctrl",
2007};
2008
1836static const char * const scifa0_groups[] = { 2009static const char * const scifa0_groups[] = {
1837 "scifa0_data", 2010 "scifa0_data",
1838 "scifa0_clk", 2011 "scifa0_clk",
@@ -1878,14 +2051,39 @@ static const char * const scifb3_groups[] = {
1878 "scifb3_ctrl_b", 2051 "scifb3_ctrl_b",
1879}; 2052};
1880 2053
2054static const char * const sdhi0_groups[] = {
2055 "sdhi0_data1",
2056 "sdhi0_data4",
2057 "sdhi0_ctrl",
2058 "sdhi0_cd",
2059 "sdhi0_wp",
2060};
2061
2062static const char * const sdhi1_groups[] = {
2063 "sdhi1_data1",
2064 "sdhi1_data4",
2065 "sdhi1_ctrl",
2066};
2067
2068static const char * const sdhi2_groups[] = {
2069 "sdhi2_data1",
2070 "sdhi2_data4",
2071 "sdhi2_ctrl",
2072};
2073
1881static const struct sh_pfc_function pinmux_functions[] = { 2074static const struct sh_pfc_function pinmux_functions[] = {
1882 SH_PFC_FUNCTION(irqc), 2075 SH_PFC_FUNCTION(irqc),
2076 SH_PFC_FUNCTION(mmc0),
2077 SH_PFC_FUNCTION(mmc1),
1883 SH_PFC_FUNCTION(scifa0), 2078 SH_PFC_FUNCTION(scifa0),
1884 SH_PFC_FUNCTION(scifa1), 2079 SH_PFC_FUNCTION(scifa1),
1885 SH_PFC_FUNCTION(scifb0), 2080 SH_PFC_FUNCTION(scifb0),
1886 SH_PFC_FUNCTION(scifb1), 2081 SH_PFC_FUNCTION(scifb1),
1887 SH_PFC_FUNCTION(scifb2), 2082 SH_PFC_FUNCTION(scifb2),
1888 SH_PFC_FUNCTION(scifb3), 2083 SH_PFC_FUNCTION(scifb3),
2084 SH_PFC_FUNCTION(sdhi0),
2085 SH_PFC_FUNCTION(sdhi1),
2086 SH_PFC_FUNCTION(sdhi2),
1889}; 2087};
1890 2088
1891#undef PORTCR 2089#undef PORTCR
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index 1dcbabcd7b3c..f9039102bb43 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -1447,11 +1447,11 @@ MMC_PFC_PINS(mmc_ctrl, RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6));
1447MMC_PFC_CTRL(mmc_ctrl, MMC_CLK, MMC_CMD); 1447MMC_PFC_CTRL(mmc_ctrl, MMC_CLK, MMC_CMD);
1448MMC_PFC_PINS(mmc_data1, RCAR_GP_PIN(1, 7)); 1448MMC_PFC_PINS(mmc_data1, RCAR_GP_PIN(1, 7));
1449MMC_PFC_DAT1(mmc_data1, MMC_D0); 1449MMC_PFC_DAT1(mmc_data1, MMC_D0);
1450MMC_PFC_PINS(mmc_data4, RCAR_GP_PIN(1, 7), RCAR_GP_PIN(2, 8), 1450MMC_PFC_PINS(mmc_data4, RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 8),
1451 RCAR_GP_PIN(0, 5), RCAR_GP_PIN(0, 6)); 1451 RCAR_GP_PIN(0, 5), RCAR_GP_PIN(0, 6));
1452MMC_PFC_DAT4(mmc_data4, MMC_D0, MMC_D1, 1452MMC_PFC_DAT4(mmc_data4, MMC_D0, MMC_D1,
1453 MMC_D2, MMC_D3); 1453 MMC_D2, MMC_D3);
1454MMC_PFC_PINS(mmc_data8, RCAR_GP_PIN(1, 7), RCAR_GP_PIN(2, 8), 1454MMC_PFC_PINS(mmc_data8, RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 8),
1455 RCAR_GP_PIN(0, 5), RCAR_GP_PIN(0, 6), 1455 RCAR_GP_PIN(0, 5), RCAR_GP_PIN(0, 6),
1456 RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 0), 1456 RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 0),
1457 RCAR_GP_PIN(0, 30), RCAR_GP_PIN(0, 31)); 1457 RCAR_GP_PIN(0, 30), RCAR_GP_PIN(0, 31));
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index 85d77a417c0e..14f3ec267e1f 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -1979,6 +1979,141 @@ static const unsigned int scif1_clk_e_pins[] = {
1979static const unsigned int scif1_clk_e_mux[] = { 1979static const unsigned int scif1_clk_e_mux[] = {
1980 SCK1_E_MARK, 1980 SCK1_E_MARK,
1981}; 1981};
1982/* - HSCIF0 ----------------------------------------------------------------- */
1983static const unsigned int hscif0_data_pins[] = {
1984 /* RX, TX */
1985 RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 9),
1986};
1987static const unsigned int hscif0_data_mux[] = {
1988 HRX0_MARK, HTX0_MARK,
1989};
1990static const unsigned int hscif0_clk_pins[] = {
1991 /* SCK */
1992 RCAR_GP_PIN(5, 7),
1993};
1994static const unsigned int hscif0_clk_mux[] = {
1995 HSCK0_MARK,
1996};
1997static const unsigned int hscif0_ctrl_pins[] = {
1998 /* RTS, CTS */
1999 RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
2000};
2001static const unsigned int hscif0_ctrl_mux[] = {
2002 HRTS0_N_MARK, HCTS0_N_MARK,
2003};
2004static const unsigned int hscif0_data_b_pins[] = {
2005 /* RX, TX */
2006 RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 12),
2007};
2008static const unsigned int hscif0_data_b_mux[] = {
2009 HRX0_B_MARK, HTX0_B_MARK,
2010};
2011static const unsigned int hscif0_ctrl_b_pins[] = {
2012 /* RTS, CTS */
2013 RCAR_GP_PIN(1, 29), RCAR_GP_PIN(1, 28),
2014};
2015static const unsigned int hscif0_ctrl_b_mux[] = {
2016 HRTS0_N_B_MARK, HCTS0_N_B_MARK,
2017};
2018static const unsigned int hscif0_data_c_pins[] = {
2019 /* RX, TX */
2020 RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 16),
2021};
2022static const unsigned int hscif0_data_c_mux[] = {
2023 HRX0_C_MARK, HTX0_C_MARK,
2024};
2025static const unsigned int hscif0_ctrl_c_pins[] = {
2026 /* RTS, CTS */
2027 RCAR_GP_PIN(5, 3), RCAR_GP_PIN(5, 7),
2028};
2029static const unsigned int hscif0_ctrl_c_mux[] = {
2030 HRTS0_N_C_MARK, HCTS0_N_C_MARK,
2031};
2032static const unsigned int hscif0_data_d_pins[] = {
2033 /* RX, TX */
2034 RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 21),
2035};
2036static const unsigned int hscif0_data_d_mux[] = {
2037 HRX0_D_MARK, HTX0_D_MARK,
2038};
2039static const unsigned int hscif0_ctrl_d_pins[] = {
2040 /* RTS, CTS */
2041 RCAR_GP_PIN(3, 23), RCAR_GP_PIN(3, 22),
2042};
2043static const unsigned int hscif0_ctrl_d_mux[] = {
2044 HRTS0_N_D_MARK, HCTS0_N_D_MARK,
2045};
2046static const unsigned int hscif0_data_e_pins[] = {
2047 /* RX, TX */
2048 RCAR_GP_PIN(2, 21), RCAR_GP_PIN(2, 22),
2049};
2050static const unsigned int hscif0_data_e_mux[] = {
2051 HRX0_E_MARK, HTX0_E_MARK,
2052};
2053static const unsigned int hscif0_ctrl_e_pins[] = {
2054 /* RTS, CTS */
2055 RCAR_GP_PIN(2, 24), RCAR_GP_PIN(2, 23),
2056};
2057static const unsigned int hscif0_ctrl_e_mux[] = {
2058 HRTS0_N_E_MARK, HCTS0_N_E_MARK,
2059};
2060static const unsigned int hscif0_data_f_pins[] = {
2061 /* RX, TX */
2062 RCAR_GP_PIN(2, 23), RCAR_GP_PIN(2, 25),
2063};
2064static const unsigned int hscif0_data_f_mux[] = {
2065 HRX0_F_MARK, HTX0_F_MARK,
2066};
2067static const unsigned int hscif0_ctrl_f_pins[] = {
2068 /* RTS, CTS */
2069 RCAR_GP_PIN(2, 26), RCAR_GP_PIN(2, 24),
2070};
2071static const unsigned int hscif0_ctrl_f_mux[] = {
2072 HRTS0_N_F_MARK, HCTS0_N_F_MARK,
2073};
2074/* - HSCIF1 ----------------------------------------------------------------- */
2075static const unsigned int hscif1_data_pins[] = {
2076 /* RX, TX */
2077 RCAR_GP_PIN(4, 28), RCAR_GP_PIN(4, 29),
2078};
2079static const unsigned int hscif1_data_mux[] = {
2080 HRX1_MARK, HTX1_MARK,
2081};
2082static const unsigned int hscif1_clk_pins[] = {
2083 /* SCK */
2084 RCAR_GP_PIN(4, 27),
2085};
2086static const unsigned int hscif1_clk_mux[] = {
2087 HSCK1_MARK,
2088};
2089static const unsigned int hscif1_ctrl_pins[] = {
2090 /* RTS, CTS */
2091 RCAR_GP_PIN(4, 31), RCAR_GP_PIN(4, 30),
2092};
2093static const unsigned int hscif1_ctrl_mux[] = {
2094 HRTS1_N_MARK, HCTS1_N_MARK,
2095};
2096static const unsigned int hscif1_data_b_pins[] = {
2097 /* RX, TX */
2098 RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 18),
2099};
2100static const unsigned int hscif1_data_b_mux[] = {
2101 HRX1_B_MARK, HTX1_B_MARK,
2102};
2103static const unsigned int hscif1_clk_b_pins[] = {
2104 /* SCK */
2105 RCAR_GP_PIN(1, 28),
2106};
2107static const unsigned int hscif1_clk_b_mux[] = {
2108 HSCK1_B_MARK,
2109};
2110static const unsigned int hscif1_ctrl_b_pins[] = {
2111 /* RTS, CTS */
2112 RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
2113};
2114static const unsigned int hscif1_ctrl_b_mux[] = {
2115 HRTS1_N_B_MARK, HCTS1_N_B_MARK,
2116};
1982/* - SCIFA0 ----------------------------------------------------------------- */ 2117/* - SCIFA0 ----------------------------------------------------------------- */
1983static const unsigned int scifa0_data_pins[] = { 2118static const unsigned int scifa0_data_pins[] = {
1984 /* RXD, TXD */ 2119 /* RXD, TXD */
@@ -2371,8 +2506,7 @@ static const unsigned int tpu0_to3_pins[] = {
2371static const unsigned int tpu0_to3_mux[] = { 2506static const unsigned int tpu0_to3_mux[] = {
2372 TPU0TO3_MARK, 2507 TPU0TO3_MARK,
2373}; 2508};
2374 2509/* - MMCIF0 ----------------------------------------------------------------- */
2375/* - MMCIF ------------------------------------------------------------------ */
2376static const unsigned int mmc0_data1_pins[] = { 2510static const unsigned int mmc0_data1_pins[] = {
2377 /* D[0] */ 2511 /* D[0] */
2378 RCAR_GP_PIN(3, 18), 2512 RCAR_GP_PIN(3, 18),
@@ -2406,7 +2540,7 @@ static const unsigned int mmc0_ctrl_pins[] = {
2406static const unsigned int mmc0_ctrl_mux[] = { 2540static const unsigned int mmc0_ctrl_mux[] = {
2407 MMC0_CLK_MARK, MMC0_CMD_MARK, 2541 MMC0_CLK_MARK, MMC0_CMD_MARK,
2408}; 2542};
2409 2543/* - MMCIF1 ----------------------------------------------------------------- */
2410static const unsigned int mmc1_data1_pins[] = { 2544static const unsigned int mmc1_data1_pins[] = {
2411 /* D[0] */ 2545 /* D[0] */
2412 RCAR_GP_PIN(3, 26), 2546 RCAR_GP_PIN(3, 26),
@@ -2427,7 +2561,7 @@ static const unsigned int mmc1_data8_pins[] = {
2427 RCAR_GP_PIN(3, 26), RCAR_GP_PIN(3, 27), 2561 RCAR_GP_PIN(3, 26), RCAR_GP_PIN(3, 27),
2428 RCAR_GP_PIN(3, 28), RCAR_GP_PIN(3, 29), 2562 RCAR_GP_PIN(3, 28), RCAR_GP_PIN(3, 29),
2429 RCAR_GP_PIN(3, 30), RCAR_GP_PIN(3, 31), 2563 RCAR_GP_PIN(3, 30), RCAR_GP_PIN(3, 31),
2430 RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 14), 2564 RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15),
2431}; 2565};
2432static const unsigned int mmc1_data8_mux[] = { 2566static const unsigned int mmc1_data8_mux[] = {
2433 MMC1_D0_MARK, MMC1_D1_MARK, MMC1_D2_MARK, MMC1_D3_MARK, 2567 MMC1_D0_MARK, MMC1_D1_MARK, MMC1_D2_MARK, MMC1_D3_MARK,
@@ -2440,8 +2574,7 @@ static const unsigned int mmc1_ctrl_pins[] = {
2440static const unsigned int mmc1_ctrl_mux[] = { 2574static const unsigned int mmc1_ctrl_mux[] = {
2441 MMC1_CLK_MARK, MMC1_CMD_MARK, 2575 MMC1_CLK_MARK, MMC1_CMD_MARK,
2442}; 2576};
2443 2577/* - SDHI0 ------------------------------------------------------------------ */
2444/* - SDHI ------------------------------------------------------------------- */
2445static const unsigned int sdhi0_data1_pins[] = { 2578static const unsigned int sdhi0_data1_pins[] = {
2446 /* D0 */ 2579 /* D0 */
2447 RCAR_GP_PIN(3, 2), 2580 RCAR_GP_PIN(3, 2),
@@ -2477,7 +2610,7 @@ static const unsigned int sdhi0_wp_pins[] = {
2477static const unsigned int sdhi0_wp_mux[] = { 2610static const unsigned int sdhi0_wp_mux[] = {
2478 SD0_WP_MARK, 2611 SD0_WP_MARK,
2479}; 2612};
2480 2613/* - SDHI1 ------------------------------------------------------------------ */
2481static const unsigned int sdhi1_data1_pins[] = { 2614static const unsigned int sdhi1_data1_pins[] = {
2482 /* D0 */ 2615 /* D0 */
2483 RCAR_GP_PIN(3, 10), 2616 RCAR_GP_PIN(3, 10),
@@ -2513,7 +2646,7 @@ static const unsigned int sdhi1_wp_pins[] = {
2513static const unsigned int sdhi1_wp_mux[] = { 2646static const unsigned int sdhi1_wp_mux[] = {
2514 SD1_WP_MARK, 2647 SD1_WP_MARK,
2515}; 2648};
2516 2649/* - SDHI2 ------------------------------------------------------------------ */
2517static const unsigned int sdhi2_data1_pins[] = { 2650static const unsigned int sdhi2_data1_pins[] = {
2518 /* D0 */ 2651 /* D0 */
2519 RCAR_GP_PIN(3, 18), 2652 RCAR_GP_PIN(3, 18),
@@ -2549,7 +2682,7 @@ static const unsigned int sdhi2_wp_pins[] = {
2549static const unsigned int sdhi2_wp_mux[] = { 2682static const unsigned int sdhi2_wp_mux[] = {
2550 SD2_WP_MARK, 2683 SD2_WP_MARK,
2551}; 2684};
2552 2685/* - SDHI3 ------------------------------------------------------------------ */
2553static const unsigned int sdhi3_data1_pins[] = { 2686static const unsigned int sdhi3_data1_pins[] = {
2554 /* D0 */ 2687 /* D0 */
2555 RCAR_GP_PIN(3, 26), 2688 RCAR_GP_PIN(3, 26),
@@ -2591,10 +2724,37 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
2591 SH_PFC_PIN_GROUP(eth_magic), 2724 SH_PFC_PIN_GROUP(eth_magic),
2592 SH_PFC_PIN_GROUP(eth_mdio), 2725 SH_PFC_PIN_GROUP(eth_mdio),
2593 SH_PFC_PIN_GROUP(eth_rmii), 2726 SH_PFC_PIN_GROUP(eth_rmii),
2727 SH_PFC_PIN_GROUP(hscif0_data),
2728 SH_PFC_PIN_GROUP(hscif0_clk),
2729 SH_PFC_PIN_GROUP(hscif0_ctrl),
2730 SH_PFC_PIN_GROUP(hscif0_data_b),
2731 SH_PFC_PIN_GROUP(hscif0_ctrl_b),
2732 SH_PFC_PIN_GROUP(hscif0_data_c),
2733 SH_PFC_PIN_GROUP(hscif0_ctrl_c),
2734 SH_PFC_PIN_GROUP(hscif0_data_d),
2735 SH_PFC_PIN_GROUP(hscif0_ctrl_d),
2736 SH_PFC_PIN_GROUP(hscif0_data_e),
2737 SH_PFC_PIN_GROUP(hscif0_ctrl_e),
2738 SH_PFC_PIN_GROUP(hscif0_data_f),
2739 SH_PFC_PIN_GROUP(hscif0_ctrl_f),
2740 SH_PFC_PIN_GROUP(hscif1_data),
2741 SH_PFC_PIN_GROUP(hscif1_clk),
2742 SH_PFC_PIN_GROUP(hscif1_ctrl),
2743 SH_PFC_PIN_GROUP(hscif1_data_b),
2744 SH_PFC_PIN_GROUP(hscif1_clk_b),
2745 SH_PFC_PIN_GROUP(hscif1_ctrl_b),
2594 SH_PFC_PIN_GROUP(intc_irq0), 2746 SH_PFC_PIN_GROUP(intc_irq0),
2595 SH_PFC_PIN_GROUP(intc_irq1), 2747 SH_PFC_PIN_GROUP(intc_irq1),
2596 SH_PFC_PIN_GROUP(intc_irq2), 2748 SH_PFC_PIN_GROUP(intc_irq2),
2597 SH_PFC_PIN_GROUP(intc_irq3), 2749 SH_PFC_PIN_GROUP(intc_irq3),
2750 SH_PFC_PIN_GROUP(mmc0_data1),
2751 SH_PFC_PIN_GROUP(mmc0_data4),
2752 SH_PFC_PIN_GROUP(mmc0_data8),
2753 SH_PFC_PIN_GROUP(mmc0_ctrl),
2754 SH_PFC_PIN_GROUP(mmc1_data1),
2755 SH_PFC_PIN_GROUP(mmc1_data4),
2756 SH_PFC_PIN_GROUP(mmc1_data8),
2757 SH_PFC_PIN_GROUP(mmc1_ctrl),
2598 SH_PFC_PIN_GROUP(scif0_data), 2758 SH_PFC_PIN_GROUP(scif0_data),
2599 SH_PFC_PIN_GROUP(scif0_clk), 2759 SH_PFC_PIN_GROUP(scif0_clk),
2600 SH_PFC_PIN_GROUP(scif0_ctrl), 2760 SH_PFC_PIN_GROUP(scif0_ctrl),
@@ -2659,18 +2819,6 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
2659 SH_PFC_PIN_GROUP(scifb2_clk_b), 2819 SH_PFC_PIN_GROUP(scifb2_clk_b),
2660 SH_PFC_PIN_GROUP(scifb2_ctrl_b), 2820 SH_PFC_PIN_GROUP(scifb2_ctrl_b),
2661 SH_PFC_PIN_GROUP(scifb2_data_c), 2821 SH_PFC_PIN_GROUP(scifb2_data_c),
2662 SH_PFC_PIN_GROUP(tpu0_to0),
2663 SH_PFC_PIN_GROUP(tpu0_to1),
2664 SH_PFC_PIN_GROUP(tpu0_to2),
2665 SH_PFC_PIN_GROUP(tpu0_to3),
2666 SH_PFC_PIN_GROUP(mmc0_data1),
2667 SH_PFC_PIN_GROUP(mmc0_data4),
2668 SH_PFC_PIN_GROUP(mmc0_data8),
2669 SH_PFC_PIN_GROUP(mmc0_ctrl),
2670 SH_PFC_PIN_GROUP(mmc1_data1),
2671 SH_PFC_PIN_GROUP(mmc1_data4),
2672 SH_PFC_PIN_GROUP(mmc1_data8),
2673 SH_PFC_PIN_GROUP(mmc1_ctrl),
2674 SH_PFC_PIN_GROUP(sdhi0_data1), 2822 SH_PFC_PIN_GROUP(sdhi0_data1),
2675 SH_PFC_PIN_GROUP(sdhi0_data4), 2823 SH_PFC_PIN_GROUP(sdhi0_data4),
2676 SH_PFC_PIN_GROUP(sdhi0_ctrl), 2824 SH_PFC_PIN_GROUP(sdhi0_ctrl),
@@ -2691,6 +2839,10 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
2691 SH_PFC_PIN_GROUP(sdhi3_ctrl), 2839 SH_PFC_PIN_GROUP(sdhi3_ctrl),
2692 SH_PFC_PIN_GROUP(sdhi3_cd), 2840 SH_PFC_PIN_GROUP(sdhi3_cd),
2693 SH_PFC_PIN_GROUP(sdhi3_wp), 2841 SH_PFC_PIN_GROUP(sdhi3_wp),
2842 SH_PFC_PIN_GROUP(tpu0_to0),
2843 SH_PFC_PIN_GROUP(tpu0_to1),
2844 SH_PFC_PIN_GROUP(tpu0_to2),
2845 SH_PFC_PIN_GROUP(tpu0_to3),
2694}; 2846};
2695 2847
2696static const char * const eth_groups[] = { 2848static const char * const eth_groups[] = {
@@ -2726,6 +2878,31 @@ static const char * const scif1_groups[] = {
2726 "scif1_clk_e", 2878 "scif1_clk_e",
2727}; 2879};
2728 2880
2881static const char * const hscif0_groups[] = {
2882 "hscif0_data",
2883 "hscif0_clk",
2884 "hscif0_ctrl",
2885 "hscif0_data_b",
2886 "hscif0_ctrl_b",
2887 "hscif0_data_c",
2888 "hscif0_ctrl_c",
2889 "hscif0_data_d",
2890 "hscif0_ctrl_d",
2891 "hscif0_data_e",
2892 "hscif0_ctrl_e",
2893 "hscif0_data_f",
2894 "hscif0_ctrl_f",
2895};
2896
2897static const char * const hscif1_groups[] = {
2898 "hscif1_data",
2899 "hscif1_clk",
2900 "hscif1_ctrl",
2901 "hscif1_data_b",
2902 "hscif1_clk_b",
2903 "hscif1_ctrl_b",
2904};
2905
2729static const char * const scifa0_groups[] = { 2906static const char * const scifa0_groups[] = {
2730 "scifa0_data", 2907 "scifa0_data",
2731 "scifa0_clk", 2908 "scifa0_clk",
@@ -2850,7 +3027,11 @@ static const char * const sdhi3_groups[] = {
2850 3027
2851static const struct sh_pfc_function pinmux_functions[] = { 3028static const struct sh_pfc_function pinmux_functions[] = {
2852 SH_PFC_FUNCTION(eth), 3029 SH_PFC_FUNCTION(eth),
3030 SH_PFC_FUNCTION(hscif0),
3031 SH_PFC_FUNCTION(hscif1),
2853 SH_PFC_FUNCTION(intc), 3032 SH_PFC_FUNCTION(intc),
3033 SH_PFC_FUNCTION(mmc0),
3034 SH_PFC_FUNCTION(mmc1),
2854 SH_PFC_FUNCTION(scif0), 3035 SH_PFC_FUNCTION(scif0),
2855 SH_PFC_FUNCTION(scif1), 3036 SH_PFC_FUNCTION(scif1),
2856 SH_PFC_FUNCTION(scifa0), 3037 SH_PFC_FUNCTION(scifa0),
@@ -2859,13 +3040,11 @@ static const struct sh_pfc_function pinmux_functions[] = {
2859 SH_PFC_FUNCTION(scifb0), 3040 SH_PFC_FUNCTION(scifb0),
2860 SH_PFC_FUNCTION(scifb1), 3041 SH_PFC_FUNCTION(scifb1),
2861 SH_PFC_FUNCTION(scifb2), 3042 SH_PFC_FUNCTION(scifb2),
2862 SH_PFC_FUNCTION(tpu0),
2863 SH_PFC_FUNCTION(mmc0),
2864 SH_PFC_FUNCTION(mmc1),
2865 SH_PFC_FUNCTION(sdhi0), 3043 SH_PFC_FUNCTION(sdhi0),
2866 SH_PFC_FUNCTION(sdhi1), 3044 SH_PFC_FUNCTION(sdhi1),
2867 SH_PFC_FUNCTION(sdhi2), 3045 SH_PFC_FUNCTION(sdhi2),
2868 SH_PFC_FUNCTION(sdhi3), 3046 SH_PFC_FUNCTION(sdhi3),
3047 SH_PFC_FUNCTION(tpu0),
2869}; 3048};
2870 3049
2871static struct pinmux_cfg_reg pinmux_config_regs[] = { 3050static struct pinmux_cfg_reg pinmux_config_regs[] = {
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index d4d377c40ec9..ce1743d0b679 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -14,8 +14,9 @@ config OMAP_REMOTEPROC
14 depends on HAS_DMA 14 depends on HAS_DMA
15 depends on ARCH_OMAP4 || SOC_OMAP5 15 depends on ARCH_OMAP4 || SOC_OMAP5
16 depends on OMAP_IOMMU 16 depends on OMAP_IOMMU
17 depends on OMAP_MBOX_FWK
18 select REMOTEPROC 17 select REMOTEPROC
18 select MAILBOX
19 select OMAP2PLUS_MBOX
19 select RPMSG 20 select RPMSG
20 help 21 help
21 Say y here to support OMAP's remote processors (dual M3 22 Say y here to support OMAP's remote processors (dual M3
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 0e396c155b3b..51689721ea7a 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -27,8 +27,8 @@
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <linux/remoteproc.h> 29#include <linux/remoteproc.h>
30#include <linux/omap-mailbox.h>
30 31
31#include <plat/mailbox.h>
32#include <linux/platform_data/remoteproc-omap.h> 32#include <linux/platform_data/remoteproc-omap.h>
33 33
34#include "omap_remoteproc.h" 34#include "omap_remoteproc.h"
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 60848f198b48..165b918b8171 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -5,7 +5,8 @@
5menuconfig TIDSPBRIDGE 5menuconfig TIDSPBRIDGE
6 tristate "DSP Bridge driver" 6 tristate "DSP Bridge driver"
7 depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM 7 depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM
8 select OMAP_MBOX_FWK 8 select MAILBOX
9 select OMAP2PLUS_MBOX
9 help 10 help
10 DSP/BIOS Bridge is designed for platforms that contain a GPP and 11 DSP/BIOS Bridge is designed for platforms that contain a GPP and
11 one or more attached DSPs. The GPP is considered the master or 12 one or more attached DSPs. The GPP is considered the master or
diff --git a/drivers/staging/tidspbridge/include/dspbridge/host_os.h b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
index 7f3a1db31619..d1441db469fc 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/host_os.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
@@ -41,7 +41,7 @@
41#include <linux/ioport.h> 41#include <linux/ioport.h>
42#include <linux/platform_device.h> 42#include <linux/platform_device.h>
43#include <linux/clk.h> 43#include <linux/clk.h>
44#include <plat/mailbox.h> 44#include <linux/omap-mailbox.h>
45#include <linux/pagemap.h> 45#include <linux/pagemap.h>
46#include <asm/cacheflush.h> 46#include <asm/cacheflush.h>
47#include <linux/dma-mapping.h> 47#include <linux/dma-mapping.h>
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 01b8229fa862..62f6802f6e0f 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -155,7 +155,7 @@ config USB_LPC32XX
155 155
156config USB_ATMEL_USBA 156config USB_ATMEL_USBA
157 tristate "Atmel USBA" 157 tristate "Atmel USBA"
158 depends on AVR32 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45 158 depends on AVR32 || ARCH_AT91
159 help 159 help
160 USBA is the integrated high-speed USB Device controller on 160 USBA is the integrated high-speed USB Device controller on
161 the AT32AP700x, some AT91SAM9 and AT91CAP9 processors from Atmel. 161 the AT32AP700x, some AT91SAM9 and AT91CAP9 processors from Atmel.
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 5a5128a226f7..1d9722203ca6 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -22,15 +22,13 @@
22#include <linux/usb/atmel_usba_udc.h> 22#include <linux/usb/atmel_usba_udc.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/platform_data/atmel.h> 24#include <linux/platform_data/atmel.h>
25#include <linux/of.h>
26#include <linux/of_gpio.h>
25 27
26#include <asm/gpio.h> 28#include <asm/gpio.h>
27 29
28#include "atmel_usba_udc.h" 30#include "atmel_usba_udc.h"
29 31
30
31static struct usba_udc the_udc;
32static struct usba_ep *usba_ep;
33
34#ifdef CONFIG_USB_GADGET_DEBUG_FS 32#ifdef CONFIG_USB_GADGET_DEBUG_FS
35#include <linux/debugfs.h> 33#include <linux/debugfs.h>
36#include <linux/uaccess.h> 34#include <linux/uaccess.h>
@@ -1014,16 +1012,13 @@ static void nop_release(struct device *dev)
1014 1012
1015} 1013}
1016 1014
1017static struct usba_udc the_udc = { 1015struct usb_gadget usba_gadget_template = {
1018 .gadget = { 1016 .ops = &usba_udc_ops,
1019 .ops = &usba_udc_ops, 1017 .max_speed = USB_SPEED_HIGH,
1020 .ep_list = LIST_HEAD_INIT(the_udc.gadget.ep_list), 1018 .name = "atmel_usba_udc",
1021 .max_speed = USB_SPEED_HIGH, 1019 .dev = {
1022 .name = "atmel_usba_udc", 1020 .init_name = "gadget",
1023 .dev = { 1021 .release = nop_release,
1024 .init_name = "gadget",
1025 .release = nop_release,
1026 },
1027 }, 1022 },
1028}; 1023};
1029 1024
@@ -1147,7 +1142,7 @@ static int do_test_mode(struct usba_udc *udc)
1147 * Test_SE0_NAK: Force high-speed mode and set up ep0 1142 * Test_SE0_NAK: Force high-speed mode and set up ep0
1148 * for Bulk IN transfers 1143 * for Bulk IN transfers
1149 */ 1144 */
1150 ep = &usba_ep[0]; 1145 ep = &udc->usba_ep[0];
1151 usba_writel(udc, TST, 1146 usba_writel(udc, TST,
1152 USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH)); 1147 USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH));
1153 usba_ep_writel(ep, CFG, 1148 usba_ep_writel(ep, CFG,
@@ -1165,7 +1160,7 @@ static int do_test_mode(struct usba_udc *udc)
1165 break; 1160 break;
1166 case 0x0400: 1161 case 0x0400:
1167 /* Test_Packet */ 1162 /* Test_Packet */
1168 ep = &usba_ep[0]; 1163 ep = &udc->usba_ep[0];
1169 usba_ep_writel(ep, CFG, 1164 usba_ep_writel(ep, CFG,
1170 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64) 1165 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
1171 | USBA_EPT_DIR_IN 1166 | USBA_EPT_DIR_IN
@@ -1668,7 +1663,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
1668 1663
1669 for (i = 1; i < USBA_NR_ENDPOINTS; i++) 1664 for (i = 1; i < USBA_NR_ENDPOINTS; i++)
1670 if (dma_status & (1 << i)) 1665 if (dma_status & (1 << i))
1671 usba_dma_irq(udc, &usba_ep[i]); 1666 usba_dma_irq(udc, &udc->usba_ep[i]);
1672 } 1667 }
1673 1668
1674 ep_status = USBA_BFEXT(EPT_INT, status); 1669 ep_status = USBA_BFEXT(EPT_INT, status);
@@ -1677,10 +1672,10 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
1677 1672
1678 for (i = 0; i < USBA_NR_ENDPOINTS; i++) 1673 for (i = 0; i < USBA_NR_ENDPOINTS; i++)
1679 if (ep_status & (1 << i)) { 1674 if (ep_status & (1 << i)) {
1680 if (ep_is_control(&usba_ep[i])) 1675 if (ep_is_control(&udc->usba_ep[i]))
1681 usba_control_irq(udc, &usba_ep[i]); 1676 usba_control_irq(udc, &udc->usba_ep[i]);
1682 else 1677 else
1683 usba_ep_irq(udc, &usba_ep[i]); 1678 usba_ep_irq(udc, &udc->usba_ep[i]);
1684 } 1679 }
1685 } 1680 }
1686 1681
@@ -1705,7 +1700,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
1705 DBG(DBG_BUS, "%s bus reset detected\n", 1700 DBG(DBG_BUS, "%s bus reset detected\n",
1706 usb_speed_string(udc->gadget.speed)); 1701 usb_speed_string(udc->gadget.speed));
1707 1702
1708 ep0 = &usba_ep[0]; 1703 ep0 = &udc->usba_ep[0];
1709 ep0->ep.desc = &usba_ep0_desc; 1704 ep0->ep.desc = &usba_ep0_desc;
1710 ep0->state = WAIT_FOR_SETUP; 1705 ep0->state = WAIT_FOR_SETUP;
1711 usba_ep_writel(ep0, CFG, 1706 usba_ep_writel(ep0, CFG,
@@ -1835,17 +1830,158 @@ static int atmel_usba_stop(struct usb_gadget *gadget,
1835 return 0; 1830 return 0;
1836} 1831}
1837 1832
1838static int __init usba_udc_probe(struct platform_device *pdev) 1833#ifdef CONFIG_OF
1834static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
1835 struct usba_udc *udc)
1836{
1837 u32 val;
1838 const char *name;
1839 enum of_gpio_flags flags;
1840 struct device_node *np = pdev->dev.of_node;
1841 struct device_node *pp;
1842 int i, ret;
1843 struct usba_ep *eps, *ep;
1844
1845 udc->num_ep = 0;
1846
1847 udc->vbus_pin = of_get_named_gpio_flags(np, "atmel,vbus-gpio", 0,
1848 &flags);
1849 udc->vbus_pin_inverted = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0;
1850
1851 pp = NULL;
1852 while ((pp = of_get_next_child(np, pp)))
1853 udc->num_ep++;
1854
1855 eps = devm_kzalloc(&pdev->dev, sizeof(struct usba_ep) * udc->num_ep,
1856 GFP_KERNEL);
1857 if (!eps)
1858 return ERR_PTR(-ENOMEM);
1859
1860 udc->gadget.ep0 = &eps[0].ep;
1861
1862 INIT_LIST_HEAD(&eps[0].ep.ep_list);
1863
1864 pp = NULL;
1865 i = 0;
1866 while ((pp = of_get_next_child(np, pp))) {
1867 ep = &eps[i];
1868
1869 ret = of_property_read_u32(pp, "reg", &val);
1870 if (ret) {
1871 dev_err(&pdev->dev, "of_probe: reg error(%d)\n", ret);
1872 goto err;
1873 }
1874 ep->index = val;
1875
1876 ret = of_property_read_u32(pp, "atmel,fifo-size", &val);
1877 if (ret) {
1878 dev_err(&pdev->dev, "of_probe: fifo-size error(%d)\n", ret);
1879 goto err;
1880 }
1881 ep->fifo_size = val;
1882
1883 ret = of_property_read_u32(pp, "atmel,nb-banks", &val);
1884 if (ret) {
1885 dev_err(&pdev->dev, "of_probe: nb-banks error(%d)\n", ret);
1886 goto err;
1887 }
1888 ep->nr_banks = val;
1889
1890 ep->can_dma = of_property_read_bool(pp, "atmel,can-dma");
1891 ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc");
1892
1893 ret = of_property_read_string(pp, "name", &name);
1894 ep->ep.name = name;
1895
1896 ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
1897 ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
1898 ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
1899 ep->ep.ops = &usba_ep_ops;
1900 ep->ep.maxpacket = ep->fifo_size;
1901 ep->udc = udc;
1902 INIT_LIST_HEAD(&ep->queue);
1903
1904 if (i)
1905 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1906
1907 i++;
1908 }
1909
1910 return eps;
1911err:
1912 return ERR_PTR(ret);
1913}
1914#else
1915static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
1916 struct usba_udc *udc)
1917{
1918 return ERR_PTR(-ENOSYS);
1919}
1920#endif
1921
1922static struct usba_ep * usba_udc_pdata(struct platform_device *pdev,
1923 struct usba_udc *udc)
1839{ 1924{
1840 struct usba_platform_data *pdata = pdev->dev.platform_data; 1925 struct usba_platform_data *pdata = pdev->dev.platform_data;
1926 struct usba_ep *eps;
1927 int i;
1928
1929 if (!pdata)
1930 return ERR_PTR(-ENXIO);
1931
1932 eps = devm_kzalloc(&pdev->dev, sizeof(struct usba_ep) * pdata->num_ep,
1933 GFP_KERNEL);
1934 if (!eps)
1935 return ERR_PTR(-ENOMEM);
1936
1937 udc->gadget.ep0 = &eps[0].ep;
1938
1939 udc->vbus_pin = pdata->vbus_pin;
1940 udc->vbus_pin_inverted = pdata->vbus_pin_inverted;
1941 udc->num_ep = pdata->num_ep;
1942
1943 INIT_LIST_HEAD(&eps[0].ep.ep_list);
1944
1945 for (i = 0; i < pdata->num_ep; i++) {
1946 struct usba_ep *ep = &eps[i];
1947
1948 ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
1949 ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
1950 ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
1951 ep->ep.ops = &usba_ep_ops;
1952 ep->ep.name = pdata->ep[i].name;
1953 ep->fifo_size = ep->ep.maxpacket = pdata->ep[i].fifo_size;
1954 ep->udc = udc;
1955 INIT_LIST_HEAD(&ep->queue);
1956 ep->nr_banks = pdata->ep[i].nr_banks;
1957 ep->index = pdata->ep[i].index;
1958 ep->can_dma = pdata->ep[i].can_dma;
1959 ep->can_isoc = pdata->ep[i].can_isoc;
1960
1961 if (i)
1962 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1963 }
1964
1965 return eps;
1966}
1967
1968static int __init usba_udc_probe(struct platform_device *pdev)
1969{
1841 struct resource *regs, *fifo; 1970 struct resource *regs, *fifo;
1842 struct clk *pclk, *hclk; 1971 struct clk *pclk, *hclk;
1843 struct usba_udc *udc = &the_udc; 1972 struct usba_udc *udc;
1844 int irq, ret, i; 1973 int irq, ret, i;
1845 1974
1975 udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
1976 if (!udc)
1977 return -ENOMEM;
1978
1979 udc->gadget = usba_gadget_template;
1980 INIT_LIST_HEAD(&udc->gadget.ep_list);
1981
1846 regs = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID); 1982 regs = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID);
1847 fifo = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID); 1983 fifo = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID);
1848 if (!regs || !fifo || !pdata) 1984 if (!regs || !fifo)
1849 return -ENXIO; 1985 return -ENXIO;
1850 1986
1851 irq = platform_get_irq(pdev, 0); 1987 irq = platform_get_irq(pdev, 0);
@@ -1891,46 +2027,14 @@ static int __init usba_udc_probe(struct platform_device *pdev)
1891 usba_writel(udc, CTRL, USBA_DISABLE_MASK); 2027 usba_writel(udc, CTRL, USBA_DISABLE_MASK);
1892 clk_disable(pclk); 2028 clk_disable(pclk);
1893 2029
1894 usba_ep = kzalloc(sizeof(struct usba_ep) * pdata->num_ep, 2030 if (pdev->dev.of_node)
1895 GFP_KERNEL); 2031 udc->usba_ep = atmel_udc_of_init(pdev, udc);
1896 if (!usba_ep) 2032 else
1897 goto err_alloc_ep; 2033 udc->usba_ep = usba_udc_pdata(pdev, udc);
1898
1899 the_udc.gadget.ep0 = &usba_ep[0].ep;
1900
1901 INIT_LIST_HEAD(&usba_ep[0].ep.ep_list);
1902 usba_ep[0].ep_regs = udc->regs + USBA_EPT_BASE(0);
1903 usba_ep[0].dma_regs = udc->regs + USBA_DMA_BASE(0);
1904 usba_ep[0].fifo = udc->fifo + USBA_FIFO_BASE(0);
1905 usba_ep[0].ep.ops = &usba_ep_ops;
1906 usba_ep[0].ep.name = pdata->ep[0].name;
1907 usba_ep[0].ep.maxpacket = pdata->ep[0].fifo_size;
1908 usba_ep[0].udc = &the_udc;
1909 INIT_LIST_HEAD(&usba_ep[0].queue);
1910 usba_ep[0].fifo_size = pdata->ep[0].fifo_size;
1911 usba_ep[0].nr_banks = pdata->ep[0].nr_banks;
1912 usba_ep[0].index = pdata->ep[0].index;
1913 usba_ep[0].can_dma = pdata->ep[0].can_dma;
1914 usba_ep[0].can_isoc = pdata->ep[0].can_isoc;
1915
1916 for (i = 1; i < pdata->num_ep; i++) {
1917 struct usba_ep *ep = &usba_ep[i];
1918
1919 ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
1920 ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
1921 ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
1922 ep->ep.ops = &usba_ep_ops;
1923 ep->ep.name = pdata->ep[i].name;
1924 ep->ep.maxpacket = pdata->ep[i].fifo_size;
1925 ep->udc = &the_udc;
1926 INIT_LIST_HEAD(&ep->queue);
1927 ep->fifo_size = pdata->ep[i].fifo_size;
1928 ep->nr_banks = pdata->ep[i].nr_banks;
1929 ep->index = pdata->ep[i].index;
1930 ep->can_dma = pdata->ep[i].can_dma;
1931 ep->can_isoc = pdata->ep[i].can_isoc;
1932 2034
1933 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); 2035 if (IS_ERR(udc->usba_ep)) {
2036 ret = PTR_ERR(udc->usba_ep);
2037 goto err_alloc_ep;
1934 } 2038 }
1935 2039
1936 ret = request_irq(irq, usba_udc_irq, 0, "atmel_usba_udc", udc); 2040 ret = request_irq(irq, usba_udc_irq, 0, "atmel_usba_udc", udc);
@@ -1941,16 +2045,12 @@ static int __init usba_udc_probe(struct platform_device *pdev)
1941 } 2045 }
1942 udc->irq = irq; 2046 udc->irq = irq;
1943 2047
1944 if (gpio_is_valid(pdata->vbus_pin)) { 2048 if (gpio_is_valid(udc->vbus_pin)) {
1945 if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) { 2049 if (!devm_gpio_request(&pdev->dev, udc->vbus_pin, "atmel_usba_udc")) {
1946 udc->vbus_pin = pdata->vbus_pin;
1947 udc->vbus_pin_inverted = pdata->vbus_pin_inverted;
1948
1949 ret = request_irq(gpio_to_irq(udc->vbus_pin), 2050 ret = request_irq(gpio_to_irq(udc->vbus_pin),
1950 usba_vbus_irq, 0, 2051 usba_vbus_irq, 0,
1951 "atmel_usba_udc", udc); 2052 "atmel_usba_udc", udc);
1952 if (ret) { 2053 if (ret) {
1953 gpio_free(udc->vbus_pin);
1954 udc->vbus_pin = -ENODEV; 2054 udc->vbus_pin = -ENODEV;
1955 dev_warn(&udc->pdev->dev, 2055 dev_warn(&udc->pdev->dev,
1956 "failed to request vbus irq; " 2056 "failed to request vbus irq; "
@@ -1969,20 +2069,17 @@ static int __init usba_udc_probe(struct platform_device *pdev)
1969 goto err_add_udc; 2069 goto err_add_udc;
1970 2070
1971 usba_init_debugfs(udc); 2071 usba_init_debugfs(udc);
1972 for (i = 1; i < pdata->num_ep; i++) 2072 for (i = 1; i < udc->num_ep; i++)
1973 usba_ep_init_debugfs(udc, &usba_ep[i]); 2073 usba_ep_init_debugfs(udc, &udc->usba_ep[i]);
1974 2074
1975 return 0; 2075 return 0;
1976 2076
1977err_add_udc: 2077err_add_udc:
1978 if (gpio_is_valid(pdata->vbus_pin)) { 2078 if (gpio_is_valid(udc->vbus_pin))
1979 free_irq(gpio_to_irq(udc->vbus_pin), udc); 2079 free_irq(gpio_to_irq(udc->vbus_pin), udc);
1980 gpio_free(udc->vbus_pin);
1981 }
1982 2080
1983 free_irq(irq, udc); 2081 free_irq(irq, udc);
1984err_request_irq: 2082err_request_irq:
1985 kfree(usba_ep);
1986err_alloc_ep: 2083err_alloc_ep:
1987 iounmap(udc->fifo); 2084 iounmap(udc->fifo);
1988err_map_fifo: 2085err_map_fifo:
@@ -1999,23 +2096,20 @@ static int __exit usba_udc_remove(struct platform_device *pdev)
1999{ 2096{
2000 struct usba_udc *udc; 2097 struct usba_udc *udc;
2001 int i; 2098 int i;
2002 struct usba_platform_data *pdata = pdev->dev.platform_data;
2003 2099
2004 udc = platform_get_drvdata(pdev); 2100 udc = platform_get_drvdata(pdev);
2005 2101
2006 usb_del_gadget_udc(&udc->gadget); 2102 usb_del_gadget_udc(&udc->gadget);
2007 2103
2008 for (i = 1; i < pdata->num_ep; i++) 2104 for (i = 1; i < udc->num_ep; i++)
2009 usba_ep_cleanup_debugfs(&usba_ep[i]); 2105 usba_ep_cleanup_debugfs(&udc->usba_ep[i]);
2010 usba_cleanup_debugfs(udc); 2106 usba_cleanup_debugfs(udc);
2011 2107
2012 if (gpio_is_valid(udc->vbus_pin)) { 2108 if (gpio_is_valid(udc->vbus_pin)) {
2013 free_irq(gpio_to_irq(udc->vbus_pin), udc); 2109 free_irq(gpio_to_irq(udc->vbus_pin), udc);
2014 gpio_free(udc->vbus_pin);
2015 } 2110 }
2016 2111
2017 free_irq(udc->irq, udc); 2112 free_irq(udc->irq, udc);
2018 kfree(usba_ep);
2019 iounmap(udc->fifo); 2113 iounmap(udc->fifo);
2020 iounmap(udc->regs); 2114 iounmap(udc->regs);
2021 clk_put(udc->hclk); 2115 clk_put(udc->hclk);
@@ -2024,11 +2118,21 @@ static int __exit usba_udc_remove(struct platform_device *pdev)
2024 return 0; 2118 return 0;
2025} 2119}
2026 2120
2121#if defined(CONFIG_OF)
2122static const struct of_device_id atmel_udc_dt_ids[] = {
2123 { .compatible = "atmel,at91sam9rl-udc" },
2124 { /* sentinel */ }
2125};
2126
2127MODULE_DEVICE_TABLE(of, atmel_udc_dt_ids);
2128#endif
2129
2027static struct platform_driver udc_driver = { 2130static struct platform_driver udc_driver = {
2028 .remove = __exit_p(usba_udc_remove), 2131 .remove = __exit_p(usba_udc_remove),
2029 .driver = { 2132 .driver = {
2030 .name = "atmel_usba_udc", 2133 .name = "atmel_usba_udc",
2031 .owner = THIS_MODULE, 2134 .owner = THIS_MODULE,
2135 .of_match_table = of_match_ptr(atmel_udc_dt_ids),
2032 }, 2136 },
2033}; 2137};
2034 2138
diff --git a/drivers/usb/gadget/atmel_usba_udc.h b/drivers/usb/gadget/atmel_usba_udc.h
index d65a61851d3d..2922db50befe 100644
--- a/drivers/usb/gadget/atmel_usba_udc.h
+++ b/drivers/usb/gadget/atmel_usba_udc.h
@@ -317,8 +317,10 @@ struct usba_udc {
317 int irq; 317 int irq;
318 int vbus_pin; 318 int vbus_pin;
319 int vbus_pin_inverted; 319 int vbus_pin_inverted;
320 int num_ep;
320 struct clk *pclk; 321 struct clk *pclk;
321 struct clk *hclk; 322 struct clk *hclk;
323 struct usba_ep *usba_ep;
322 324
323 u16 devstatus; 325 u16 devstatus;
324 326
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index 028ff4d07dc7..fce71b605936 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -25,11 +25,19 @@
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/io.h> 27#include <linux/io.h>
28#include <linux/of.h>
28#include <linux/platform_device.h> 29#include <linux/platform_device.h>
29#include <linux/usb/musb-ux500.h> 30#include <linux/usb/musb-ux500.h>
30 31
31#include "musb_core.h" 32#include "musb_core.h"
32 33
34static struct musb_hdrc_config ux500_musb_hdrc_config = {
35 .multipoint = true,
36 .dyn_fifo = true,
37 .num_eps = 16,
38 .ram_bits = 16,
39};
40
33struct ux500_glue { 41struct ux500_glue {
34 struct device *dev; 42 struct device *dev;
35 struct platform_device *musb; 43 struct platform_device *musb;
@@ -187,15 +195,58 @@ static const struct musb_platform_ops ux500_ops = {
187 .set_vbus = ux500_musb_set_vbus, 195 .set_vbus = ux500_musb_set_vbus,
188}; 196};
189 197
198static struct musb_hdrc_platform_data *
199ux500_of_probe(struct platform_device *pdev, struct device_node *np)
200{
201 struct musb_hdrc_platform_data *pdata;
202 const char *mode;
203 int strlen;
204
205 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
206 if (!pdata)
207 return NULL;
208
209 mode = of_get_property(np, "dr_mode", &strlen);
210 if (!mode) {
211 dev_err(&pdev->dev, "No 'dr_mode' property found\n");
212 return NULL;
213 }
214
215 if (strlen > 0) {
216 if (!strcmp(mode, "host"))
217 pdata->mode = MUSB_HOST;
218 if (!strcmp(mode, "otg"))
219 pdata->mode = MUSB_OTG;
220 if (!strcmp(mode, "peripheral"))
221 pdata->mode = MUSB_PERIPHERAL;
222 }
223
224 return pdata;
225}
226
190static int ux500_probe(struct platform_device *pdev) 227static int ux500_probe(struct platform_device *pdev)
191{ 228{
192 struct resource musb_resources[2]; 229 struct resource musb_resources[2];
193 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; 230 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
231 struct device_node *np = pdev->dev.of_node;
194 struct platform_device *musb; 232 struct platform_device *musb;
195 struct ux500_glue *glue; 233 struct ux500_glue *glue;
196 struct clk *clk; 234 struct clk *clk;
197 int ret = -ENOMEM; 235 int ret = -ENOMEM;
198 236
237 if (!pdata) {
238 if (np) {
239 pdata = ux500_of_probe(pdev, np);
240 if (!pdata)
241 goto err0;
242
243 pdev->dev.platform_data = pdata;
244 } else {
245 dev_err(&pdev->dev, "no pdata or device tree found\n");
246 goto err0;
247 }
248 }
249
199 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 250 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
200 if (!glue) { 251 if (!glue) {
201 dev_err(&pdev->dev, "failed to allocate glue context\n"); 252 dev_err(&pdev->dev, "failed to allocate glue context\n");
@@ -222,14 +273,16 @@ static int ux500_probe(struct platform_device *pdev)
222 } 273 }
223 274
224 musb->dev.parent = &pdev->dev; 275 musb->dev.parent = &pdev->dev;
225 musb->dev.dma_mask = pdev->dev.dma_mask; 276 musb->dev.dma_mask = &pdev->dev.coherent_dma_mask;
226 musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; 277 musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
278 musb->dev.of_node = pdev->dev.of_node;
227 279
228 glue->dev = &pdev->dev; 280 glue->dev = &pdev->dev;
229 glue->musb = musb; 281 glue->musb = musb;
230 glue->clk = clk; 282 glue->clk = clk;
231 283
232 pdata->platform_ops = &ux500_ops; 284 pdata->platform_ops = &ux500_ops;
285 pdata->config = &ux500_musb_hdrc_config;
233 286
234 platform_set_drvdata(pdev, glue); 287 platform_set_drvdata(pdev, glue);
235 288
@@ -334,12 +387,18 @@ static const struct dev_pm_ops ux500_pm_ops = {
334#define DEV_PM_OPS NULL 387#define DEV_PM_OPS NULL
335#endif 388#endif
336 389
390static const struct of_device_id ux500_match[] = {
391 { .compatible = "stericsson,db8500-musb", },
392 {}
393};
394
337static struct platform_driver ux500_driver = { 395static struct platform_driver ux500_driver = {
338 .probe = ux500_probe, 396 .probe = ux500_probe,
339 .remove = ux500_remove, 397 .remove = ux500_remove,
340 .driver = { 398 .driver = {
341 .name = "musb-ux500", 399 .name = "musb-ux500",
342 .pm = DEV_PM_OPS, 400 .pm = DEV_PM_OPS,
401 .of_match_table = ux500_match,
343 }, 402 },
344}; 403};
345 404
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index 63e7c8a6b125..bfb7a65d83cc 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -34,6 +34,11 @@
34#include <linux/platform_data/usb-musb-ux500.h> 34#include <linux/platform_data/usb-musb-ux500.h>
35#include "musb_core.h" 35#include "musb_core.h"
36 36
37static const char *iep_chan_names[] = { "iep_1_9", "iep_2_10", "iep_3_11", "iep_4_12",
38 "iep_5_13", "iep_6_14", "iep_7_15", "iep_8" };
39static const char *oep_chan_names[] = { "oep_1_9", "oep_2_10", "oep_3_11", "oep_4_12",
40 "oep_5_13", "oep_6_14", "oep_7_15", "oep_8" };
41
37struct ux500_dma_channel { 42struct ux500_dma_channel {
38 struct dma_channel channel; 43 struct dma_channel channel;
39 struct ux500_dma_controller *controller; 44 struct ux500_dma_controller *controller;
@@ -48,10 +53,8 @@ struct ux500_dma_channel {
48 53
49struct ux500_dma_controller { 54struct ux500_dma_controller {
50 struct dma_controller controller; 55 struct dma_controller controller;
51 struct ux500_dma_channel rx_channel[UX500_MUSB_DMA_NUM_RX_CHANNELS]; 56 struct ux500_dma_channel rx_channel[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS];
52 struct ux500_dma_channel tx_channel[UX500_MUSB_DMA_NUM_TX_CHANNELS]; 57 struct ux500_dma_channel tx_channel[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS];
53 u32 num_rx_channels;
54 u32 num_tx_channels;
55 void *private_data; 58 void *private_data;
56 dma_addr_t phy_base; 59 dma_addr_t phy_base;
57}; 60};
@@ -143,19 +146,15 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
143 struct ux500_dma_channel *ux500_channel = NULL; 146 struct ux500_dma_channel *ux500_channel = NULL;
144 struct musb *musb = controller->private_data; 147 struct musb *musb = controller->private_data;
145 u8 ch_num = hw_ep->epnum - 1; 148 u8 ch_num = hw_ep->epnum - 1;
146 u32 max_ch;
147 149
148 /* Max 8 DMA channels (0 - 7). Each DMA channel can only be allocated 150 /* 8 DMA channels (0 - 7). Each DMA channel can only be allocated
149 * to specified hw_ep. For example DMA channel 0 can only be allocated 151 * to specified hw_ep. For example DMA channel 0 can only be allocated
150 * to hw_ep 1 and 9. 152 * to hw_ep 1 and 9.
151 */ 153 */
152 if (ch_num > 7) 154 if (ch_num > 7)
153 ch_num -= 8; 155 ch_num -= 8;
154 156
155 max_ch = is_tx ? controller->num_tx_channels : 157 if (ch_num >= UX500_MUSB_DMA_NUM_RX_TX_CHANNELS)
156 controller->num_rx_channels;
157
158 if (ch_num >= max_ch)
159 return NULL; 158 return NULL;
160 159
161 ux500_channel = is_tx ? &(controller->tx_channel[ch_num]) : 160 ux500_channel = is_tx ? &(controller->tx_channel[ch_num]) :
@@ -263,7 +262,7 @@ static int ux500_dma_controller_stop(struct dma_controller *c)
263 struct dma_channel *channel; 262 struct dma_channel *channel;
264 u8 ch_num; 263 u8 ch_num;
265 264
266 for (ch_num = 0; ch_num < controller->num_rx_channels; ch_num++) { 265 for (ch_num = 0; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; ch_num++) {
267 channel = &controller->rx_channel[ch_num].channel; 266 channel = &controller->rx_channel[ch_num].channel;
268 ux500_channel = channel->private_data; 267 ux500_channel = channel->private_data;
269 268
@@ -273,7 +272,7 @@ static int ux500_dma_controller_stop(struct dma_controller *c)
273 dma_release_channel(ux500_channel->dma_chan); 272 dma_release_channel(ux500_channel->dma_chan);
274 } 273 }
275 274
276 for (ch_num = 0; ch_num < controller->num_tx_channels; ch_num++) { 275 for (ch_num = 0; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; ch_num++) {
277 channel = &controller->tx_channel[ch_num].channel; 276 channel = &controller->tx_channel[ch_num].channel;
278 ux500_channel = channel->private_data; 277 ux500_channel = channel->private_data;
279 278
@@ -294,34 +293,36 @@ static int ux500_dma_controller_start(struct dma_controller *c)
294 struct musb *musb = controller->private_data; 293 struct musb *musb = controller->private_data;
295 struct device *dev = musb->controller; 294 struct device *dev = musb->controller;
296 struct musb_hdrc_platform_data *plat = dev->platform_data; 295 struct musb_hdrc_platform_data *plat = dev->platform_data;
297 struct ux500_musb_board_data *data = plat->board_data; 296 struct ux500_musb_board_data *data;
298 struct dma_channel *dma_channel = NULL; 297 struct dma_channel *dma_channel = NULL;
298 char **chan_names;
299 u32 ch_num; 299 u32 ch_num;
300 u8 dir; 300 u8 dir;
301 u8 is_tx = 0; 301 u8 is_tx = 0;
302 302
303 void **param_array; 303 void **param_array;
304 struct ux500_dma_channel *channel_array; 304 struct ux500_dma_channel *channel_array;
305 u32 ch_count;
306 dma_cap_mask_t mask; 305 dma_cap_mask_t mask;
307 306
308 if ((data->num_rx_channels > UX500_MUSB_DMA_NUM_RX_CHANNELS) || 307 if (!plat) {
309 (data->num_tx_channels > UX500_MUSB_DMA_NUM_TX_CHANNELS)) 308 dev_err(musb->controller, "No platform data\n");
310 return -EINVAL; 309 return -EINVAL;
310 }
311 311
312 controller->num_rx_channels = data->num_rx_channels; 312 data = plat->board_data;
313 controller->num_tx_channels = data->num_tx_channels;
314 313
315 dma_cap_zero(mask); 314 dma_cap_zero(mask);
316 dma_cap_set(DMA_SLAVE, mask); 315 dma_cap_set(DMA_SLAVE, mask);
317 316
318 /* Prepare the loop for RX channels */ 317 /* Prepare the loop for RX channels */
319 channel_array = controller->rx_channel; 318 channel_array = controller->rx_channel;
320 ch_count = data->num_rx_channels; 319 param_array = data ? data->dma_rx_param_array : NULL;
321 param_array = data->dma_rx_param_array; 320 chan_names = (char **)iep_chan_names;
322 321
323 for (dir = 0; dir < 2; dir++) { 322 for (dir = 0; dir < 2; dir++) {
324 for (ch_num = 0; ch_num < ch_count; ch_num++) { 323 for (ch_num = 0;
324 ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS;
325 ch_num++) {
325 ux500_channel = &channel_array[ch_num]; 326 ux500_channel = &channel_array[ch_num];
326 ux500_channel->controller = controller; 327 ux500_channel->controller = controller;
327 ux500_channel->ch_num = ch_num; 328 ux500_channel->ch_num = ch_num;
@@ -332,9 +333,15 @@ static int ux500_dma_controller_start(struct dma_controller *c)
332 dma_channel->status = MUSB_DMA_STATUS_FREE; 333 dma_channel->status = MUSB_DMA_STATUS_FREE;
333 dma_channel->max_len = SZ_16M; 334 dma_channel->max_len = SZ_16M;
334 335
335 ux500_channel->dma_chan = dma_request_channel(mask, 336 ux500_channel->dma_chan =
336 data->dma_filter, 337 dma_request_slave_channel(dev, chan_names[ch_num]);
337 param_array[ch_num]); 338
339 if (!ux500_channel->dma_chan)
340 ux500_channel->dma_chan =
341 dma_request_channel(mask,
342 data->dma_filter,
343 param_array[ch_num]);
344
338 if (!ux500_channel->dma_chan) { 345 if (!ux500_channel->dma_chan) {
339 ERR("Dma pipe allocation error dir=%d ch=%d\n", 346 ERR("Dma pipe allocation error dir=%d ch=%d\n",
340 dir, ch_num); 347 dir, ch_num);
@@ -349,8 +356,8 @@ static int ux500_dma_controller_start(struct dma_controller *c)
349 356
350 /* Prepare the loop for TX channels */ 357 /* Prepare the loop for TX channels */
351 channel_array = controller->tx_channel; 358 channel_array = controller->tx_channel;
352 ch_count = data->num_tx_channels; 359 param_array = data ? data->dma_tx_param_array : NULL;
353 param_array = data->dma_tx_param_array; 360 chan_names = (char **)oep_chan_names;
354 is_tx = 1; 361 is_tx = 1;
355 } 362 }
356 363