aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-02 19:35:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-02 19:35:49 -0400
commita727eaf64ff084a50b983fc506810c7a576b7ce3 (patch)
treecb82642227ed590ebc43b12cfad285a2d7681d5d /drivers
parent755a9ba7bf24a45b6dbf8bb15a5a56c8ed12461a (diff)
parent45e70b7d48d53d5eb193c6b3f012b31ca135fb4c (diff)
Merge tag 'drivers-for-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc into next
Pull ARM SoC driver changes from Olof Johansson: "SoC-near driver changes that we're merging through our tree. Mostly because they depend on other changes we have staged, but in some cases because the driver maintainers preferred that we did it this way. This contains a largeish cleanup series of the omap_l3_noc bus driver, cpuidle rework for Exynos, some reset driver conversions and a long branch of TI EDMA fixes and cleanups, with more to come next release. The TI EDMA cleanups is a shared branch with the dmaengine tree, with a handful of Davinci-specific fixes on top. After discussion at last year's KS (and some more on the mailing lists), we are here adding a drivers/soc directory. The purpose of this is to keep per-vendor shared code that's needed by different drivers but that doesn't fit into the MFD (nor drivers/platform) model. We expect to keep merging contents for this hierarchy through arm-soc so we can keep an eye on what the vendors keep adding here and not making it a free-for-all to shove in crazy stuff" * tag 'drivers-for-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (101 commits) cpufreq: exynos: Fix driver compilation with ARCH_MULTIPLATFORM tty: serial: msm: Remove direct access to GSBI power: reset: keystone-reset: introduce keystone reset driver Documentation: dt: add bindings for keystone pll control controller Documentation: dt: add bindings for keystone reset driver soc: qcom: fix of_device_id table ARM: EXYNOS: Fix kernel panic when unplugging CPU1 on exynos ARM: EXYNOS: Move the driver to drivers/cpuidle directory ARM: EXYNOS: Cleanup all unneeded headers from cpuidle.c ARM: EXYNOS: Pass the AFTR callback to the platform_data ARM: EXYNOS: Move S5P_CHECK_SLEEP into pm.c ARM: EXYNOS: Move the power sequence call in the cpu_pm notifier ARM: EXYNOS: Move the AFTR state function into pm.c ARM: EXYNOS: Encapsulate the AFTR code into a function ARM: EXYNOS: Disable cpuidle for exynos5440 ARM: EXYNOS: Encapsulate boot vector code into a function for cpuidle ARM: EXYNOS: Pass wakeup mask parameter to function for cpuidle ARM: EXYNOS: Remove ifdef for scu_enable in pm ARM: EXYNOS: Move scu_enable in the cpu_pm notifier ARM: EXYNOS: Use the cpu_pm notifier for pm ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/bus/Kconfig8
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/brcmstb_gisb.c289
-rw-r--r--drivers/bus/omap_l3_noc.c406
-rw-r--r--drivers/bus/omap_l3_noc.h545
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c42
-rw-r--r--drivers/clocksource/exynos_mct.c8
-rw-r--r--drivers/cpufreq/Kconfig.arm6
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c2
-rw-r--r--drivers/cpufreq/exynos-cpufreq.h30
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c39
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c40
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c43
-rw-r--r--drivers/cpuidle/Kconfig.arm6
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/cpuidle-exynos.c99
-rw-r--r--drivers/dma/edma.c335
-rw-r--r--drivers/memory/mvebu-devbus.c229
-rw-r--r--drivers/power/reset/Kconfig15
-rw-r--r--drivers/power/reset/Makefile2
-rw-r--r--drivers/power/reset/keystone-reset.c166
-rw-r--r--drivers/power/reset/sun6i-reboot.c85
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-socfpga.c146
-rw-r--r--drivers/reset/reset-sunxi.c21
-rw-r--r--drivers/soc/Kconfig5
-rw-r--r--drivers/soc/Makefile5
-rw-r--r--drivers/soc/qcom/Kconfig11
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c85
-rw-r--r--drivers/tty/serial/msm_serial.c48
-rw-r--r--drivers/tty/serial/msm_serial.h5
34 files changed, 2185 insertions, 545 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 0a0a90f52d26..0e87a34b6472 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -132,6 +132,8 @@ source "drivers/staging/Kconfig"
132 132
133source "drivers/platform/Kconfig" 133source "drivers/platform/Kconfig"
134 134
135source "drivers/soc/Kconfig"
136
135source "drivers/clk/Kconfig" 137source "drivers/clk/Kconfig"
136 138
137source "drivers/hwspinlock/Kconfig" 139source "drivers/hwspinlock/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 7183b6af5dac..1a1790e4de6a 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -33,6 +33,9 @@ obj-y += amba/
33# really early. 33# really early.
34obj-$(CONFIG_DMADEVICES) += dma/ 34obj-$(CONFIG_DMADEVICES) += dma/
35 35
36# SOC specific infrastructure drivers.
37obj-y += soc/
38
36obj-$(CONFIG_VIRTIO) += virtio/ 39obj-$(CONFIG_VIRTIO) += virtio/
37obj-$(CONFIG_XEN) += xen/ 40obj-$(CONFIG_XEN) += xen/
38 41
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 286342778884..a118ec1650fa 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -4,6 +4,14 @@
4 4
5menu "Bus devices" 5menu "Bus devices"
6 6
7config BRCMSTB_GISB_ARB
8 bool "Broadcom STB GISB bus arbiter"
9 depends on ARM
10 help
11 Driver for the Broadcom Set Top Box System-on-a-chip internal bus
12 arbiter. This driver provides timeout and target abort error handling
13 and internal bus master decoding.
14
7config IMX_WEIM 15config IMX_WEIM
8 bool "Freescale EIM DRIVER" 16 bool "Freescale EIM DRIVER"
9 depends on ARCH_MXC 17 depends on ARCH_MXC
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index f095aa771de9..6a4ea7e4af1a 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -2,6 +2,7 @@
2# Makefile for the bus drivers. 2# Makefile for the bus drivers.
3# 3#
4 4
5obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
5obj-$(CONFIG_IMX_WEIM) += imx-weim.o 6obj-$(CONFIG_IMX_WEIM) += imx-weim.o
6obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o 7obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
7obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o 8obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
new file mode 100644
index 000000000000..6159b7752a64
--- /dev/null
+++ b/drivers/bus/brcmstb_gisb.c
@@ -0,0 +1,289 @@
1/*
2 * Copyright (C) 2014 Broadcom Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/init.h>
15#include <linux/types.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/interrupt.h>
19#include <linux/sysfs.h>
20#include <linux/io.h>
21#include <linux/string.h>
22#include <linux/device.h>
23#include <linux/list.h>
24#include <linux/of.h>
25#include <linux/bitops.h>
26
27#include <asm/bug.h>
28#include <asm/signal.h>
29
30#define ARB_TIMER 0x008
31#define ARB_ERR_CAP_CLR 0x7e4
32#define ARB_ERR_CAP_CLEAR (1 << 0)
33#define ARB_ERR_CAP_HI_ADDR 0x7e8
34#define ARB_ERR_CAP_ADDR 0x7ec
35#define ARB_ERR_CAP_DATA 0x7f0
36#define ARB_ERR_CAP_STATUS 0x7f4
37#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12)
38#define ARB_ERR_CAP_STATUS_TEA (1 << 11)
39#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2)
40#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c
41#define ARB_ERR_CAP_STATUS_WRITE (1 << 1)
42#define ARB_ERR_CAP_STATUS_VALID (1 << 0)
43#define ARB_ERR_CAP_MASTER 0x7f8
44
45struct brcmstb_gisb_arb_device {
46 void __iomem *base;
47 struct mutex lock;
48 struct list_head next;
49 u32 valid_mask;
50 const char *master_names[sizeof(u32) * BITS_PER_BYTE];
51};
52
53static LIST_HEAD(brcmstb_gisb_arb_device_list);
54
55static ssize_t gisb_arb_get_timeout(struct device *dev,
56 struct device_attribute *attr,
57 char *buf)
58{
59 struct platform_device *pdev = to_platform_device(dev);
60 struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
61 u32 timeout;
62
63 mutex_lock(&gdev->lock);
64 timeout = ioread32(gdev->base + ARB_TIMER);
65 mutex_unlock(&gdev->lock);
66
67 return sprintf(buf, "%d", timeout);
68}
69
70static ssize_t gisb_arb_set_timeout(struct device *dev,
71 struct device_attribute *attr,
72 const char *buf, size_t count)
73{
74 struct platform_device *pdev = to_platform_device(dev);
75 struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
76 int val, ret;
77
78 ret = kstrtoint(buf, 10, &val);
79 if (ret < 0)
80 return ret;
81
82 if (val == 0 || val >= 0xffffffff)
83 return -EINVAL;
84
85 mutex_lock(&gdev->lock);
86 iowrite32(val, gdev->base + ARB_TIMER);
87 mutex_unlock(&gdev->lock);
88
89 return count;
90}
91
92static const char *
93brcmstb_gisb_master_to_str(struct brcmstb_gisb_arb_device *gdev,
94 u32 masters)
95{
96 u32 mask = gdev->valid_mask & masters;
97
98 if (hweight_long(mask) != 1)
99 return NULL;
100
101 return gdev->master_names[ffs(mask) - 1];
102}
103
104static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
105 const char *reason)
106{
107 u32 cap_status;
108 unsigned long arb_addr;
109 u32 master;
110 const char *m_name;
111 char m_fmt[11];
112
113 cap_status = ioread32(gdev->base + ARB_ERR_CAP_STATUS);
114
115 /* Invalid captured address, bail out */
116 if (!(cap_status & ARB_ERR_CAP_STATUS_VALID))
117 return 1;
118
119 /* Read the address and master */
120 arb_addr = ioread32(gdev->base + ARB_ERR_CAP_ADDR) & 0xffffffff;
121#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
122 arb_addr |= (u64)ioread32(gdev->base + ARB_ERR_CAP_HI_ADDR) << 32;
123#endif
124 master = ioread32(gdev->base + ARB_ERR_CAP_MASTER);
125
126 m_name = brcmstb_gisb_master_to_str(gdev, master);
127 if (!m_name) {
128 snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master);
129 m_name = m_fmt;
130 }
131
132 pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n",
133 __func__, reason, arb_addr,
134 cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
135 cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
136 m_name);
137
138 /* clear the GISB error */
139 iowrite32(ARB_ERR_CAP_CLEAR, gdev->base + ARB_ERR_CAP_CLR);
140
141 return 0;
142}
143
144static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr,
145 struct pt_regs *regs)
146{
147 int ret = 0;
148 struct brcmstb_gisb_arb_device *gdev;
149
150 /* iterate over each GISB arb registered handlers */
151 list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next)
152 ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error");
153 /*
154 * If it was an imprecise abort, then we need to correct the
155 * return address to be _after_ the instruction.
156 */
157 if (fsr & (1 << 10))
158 regs->ARM_pc += 4;
159
160 return ret;
161}
162
163void __init brcmstb_hook_fault_code(void)
164{
165 hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0,
166 "imprecise external abort");
167}
168
169static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id)
170{
171 brcmstb_gisb_arb_decode_addr(dev_id, "timeout");
172
173 return IRQ_HANDLED;
174}
175
176static irqreturn_t brcmstb_gisb_tea_handler(int irq, void *dev_id)
177{
178 brcmstb_gisb_arb_decode_addr(dev_id, "target abort");
179
180 return IRQ_HANDLED;
181}
182
183static DEVICE_ATTR(gisb_arb_timeout, S_IWUSR | S_IRUGO,
184 gisb_arb_get_timeout, gisb_arb_set_timeout);
185
186static struct attribute *gisb_arb_sysfs_attrs[] = {
187 &dev_attr_gisb_arb_timeout.attr,
188 NULL,
189};
190
191static struct attribute_group gisb_arb_sysfs_attr_group = {
192 .attrs = gisb_arb_sysfs_attrs,
193};
194
195static int brcmstb_gisb_arb_probe(struct platform_device *pdev)
196{
197 struct device_node *dn = pdev->dev.of_node;
198 struct brcmstb_gisb_arb_device *gdev;
199 struct resource *r;
200 int err, timeout_irq, tea_irq;
201 unsigned int num_masters, j = 0;
202 int i, first, last;
203
204 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
205 timeout_irq = platform_get_irq(pdev, 0);
206 tea_irq = platform_get_irq(pdev, 1);
207
208 gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL);
209 if (!gdev)
210 return -ENOMEM;
211
212 mutex_init(&gdev->lock);
213 INIT_LIST_HEAD(&gdev->next);
214
215 gdev->base = devm_request_and_ioremap(&pdev->dev, r);
216 if (!gdev->base)
217 return -ENOMEM;
218
219 err = devm_request_irq(&pdev->dev, timeout_irq,
220 brcmstb_gisb_timeout_handler, 0, pdev->name,
221 gdev);
222 if (err < 0)
223 return err;
224
225 err = devm_request_irq(&pdev->dev, tea_irq,
226 brcmstb_gisb_tea_handler, 0, pdev->name,
227 gdev);
228 if (err < 0)
229 return err;
230
231 /* If we do not have a valid mask, assume all masters are enabled */
232 if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask",
233 &gdev->valid_mask))
234 gdev->valid_mask = 0xffffffff;
235
236 /* Proceed with reading the litteral names if we agree on the
237 * number of masters
238 */
239 num_masters = of_property_count_strings(dn,
240 "brcm,gisb-arb-master-names");
241 if (hweight_long(gdev->valid_mask) == num_masters) {
242 first = ffs(gdev->valid_mask) - 1;
243 last = fls(gdev->valid_mask) - 1;
244
245 for (i = first; i < last; i++) {
246 if (!(gdev->valid_mask & BIT(i)))
247 continue;
248
249 of_property_read_string_index(dn,
250 "brcm,gisb-arb-master-names", j,
251 &gdev->master_names[i]);
252 j++;
253 }
254 }
255
256 err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
257 if (err)
258 return err;
259
260 platform_set_drvdata(pdev, gdev);
261
262 list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
263
264 dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n",
265 gdev->base, timeout_irq, tea_irq);
266
267 return 0;
268}
269
270static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
271 { .compatible = "brcm,gisb-arb" },
272 { },
273};
274
275static struct platform_driver brcmstb_gisb_arb_driver = {
276 .probe = brcmstb_gisb_arb_probe,
277 .driver = {
278 .name = "brcm-gisb-arb",
279 .owner = THIS_MODULE,
280 .of_match_table = brcmstb_gisb_arb_of_match,
281 },
282};
283
284static int __init brcm_gisb_driver_init(void)
285{
286 return platform_driver_register(&brcmstb_gisb_arb_driver);
287}
288
289module_init(brcm_gisb_driver_init);
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
index feeecae623f6..531ae591783b 100644
--- a/drivers/bus/omap_l3_noc.c
+++ b/drivers/bus/omap_l3_noc.c
@@ -1,43 +1,45 @@
1/* 1/*
2 * OMAP4XXX L3 Interconnect error handling driver 2 * OMAP L3 Interconnect error handling driver
3 * 3 *
4 * Copyright (C) 2011 Texas Corporation 4 * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/
5 * Santosh Shilimkar <santosh.shilimkar@ti.com> 5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 * Sricharan <r.sricharan@ti.com> 6 * Sricharan <r.sricharan@ti.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License version 2 as
10 * the Free Software Foundation; either version 2 of the License, or 10 * published by the Free Software Foundation.
11 * (at your option) any later version.
12 * 11 *
13 * This program is distributed in the hope that it will be useful, 12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * kind, whether express or implied; without even the implied warranty
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 15 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21 * USA
22 */ 16 */
23#include <linux/module.h>
24#include <linux/init.h> 17#include <linux/init.h>
25#include <linux/io.h>
26#include <linux/platform_device.h>
27#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/io.h>
28#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/of_device.h>
23#include <linux/of.h>
24#include <linux/platform_device.h>
29#include <linux/slab.h> 25#include <linux/slab.h>
30 26
31#include "omap_l3_noc.h" 27#include "omap_l3_noc.h"
32 28
33/* 29/**
34 * Interrupt Handler for L3 error detection. 30 * l3_handle_target() - Handle Target specific parse and reporting
35 * 1) Identify the L3 clockdomain partition to which the error belongs to. 31 * @l3: pointer to l3 struct
36 * 2) Identify the slave where the error information is logged 32 * @base: base address of clkdm
37 * 3) Print the logged information. 33 * @flag_mux: flagmux corresponding to the event
38 * 4) Add dump stack to provide kernel trace. 34 * @err_src: error source index of the slave (target)
39 * 35 *
40 * Two Types of errors : 36 * This does the second part of the error interrupt handling:
37 * 3) Parse in the slave information
38 * 4) Print the logged information.
39 * 5) Add dump stack to provide kernel trace.
40 * 6) Clear the source if known.
41 *
42 * This handles two types of errors:
41 * 1) Custom errors in L3 : 43 * 1) Custom errors in L3 :
42 * Target like DMM/FW/EMIF generates SRESP=ERR error 44 * Target like DMM/FW/EMIF generates SRESP=ERR error
43 * 2) Standard L3 error: 45 * 2) Standard L3 error:
@@ -53,214 +55,264 @@
53 * can be trapped as well. But the trapping is implemented as part 55 * can be trapped as well. But the trapping is implemented as part
54 * secure software and hence need not be implemented here. 56 * secure software and hence need not be implemented here.
55 */ 57 */
56static irqreturn_t l3_interrupt_handler(int irq, void *_l3) 58static int l3_handle_target(struct omap_l3 *l3, void __iomem *base,
59 struct l3_flagmux_data *flag_mux, int err_src)
57{ 60{
61 int k;
62 u32 std_err_main, clear, masterid;
63 u8 op_code, m_req_info;
64 void __iomem *l3_targ_base;
65 void __iomem *l3_targ_stderr, *l3_targ_slvofslsb, *l3_targ_mstaddr;
66 void __iomem *l3_targ_hdr, *l3_targ_info;
67 struct l3_target_data *l3_targ_inst;
68 struct l3_masters_data *master;
69 char *target_name, *master_name = "UN IDENTIFIED";
70 char *err_description;
71 char err_string[30] = { 0 };
72 char info_string[60] = { 0 };
73
74 /* We DONOT expect err_src to go out of bounds */
75 BUG_ON(err_src > MAX_CLKDM_TARGETS);
76
77 if (err_src < flag_mux->num_targ_data) {
78 l3_targ_inst = &flag_mux->l3_targ[err_src];
79 target_name = l3_targ_inst->name;
80 l3_targ_base = base + l3_targ_inst->offset;
81 } else {
82 target_name = L3_TARGET_NOT_SUPPORTED;
83 }
58 84
59 struct omap4_l3 *l3 = _l3; 85 if (target_name == L3_TARGET_NOT_SUPPORTED)
60 int inttype, i, k; 86 return -ENODEV;
87
88 /* Read the stderrlog_main_source from clk domain */
89 l3_targ_stderr = l3_targ_base + L3_TARG_STDERRLOG_MAIN;
90 l3_targ_slvofslsb = l3_targ_base + L3_TARG_STDERRLOG_SLVOFSLSB;
91
92 std_err_main = readl_relaxed(l3_targ_stderr);
93
94 switch (std_err_main & CUSTOM_ERROR) {
95 case STANDARD_ERROR:
96 err_description = "Standard";
97 snprintf(err_string, sizeof(err_string),
98 ": At Address: 0x%08X ",
99 readl_relaxed(l3_targ_slvofslsb));
100
101 l3_targ_mstaddr = l3_targ_base + L3_TARG_STDERRLOG_MSTADDR;
102 l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_HDR;
103 l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_INFO;
104 break;
105
106 case CUSTOM_ERROR:
107 err_description = "Custom";
108
109 l3_targ_mstaddr = l3_targ_base +
110 L3_TARG_STDERRLOG_CINFO_MSTADDR;
111 l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_CINFO_OPCODE;
112 l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_CINFO_INFO;
113 break;
114
115 default:
116 /* Nothing to be handled here as of now */
117 return 0;
118 }
119
120 /* STDERRLOG_MSTADDR Stores the NTTP master address. */
121 masterid = (readl_relaxed(l3_targ_mstaddr) &
122 l3->mst_addr_mask) >> __ffs(l3->mst_addr_mask);
123
124 for (k = 0, master = l3->l3_masters; k < l3->num_masters;
125 k++, master++) {
126 if (masterid == master->id) {
127 master_name = master->name;
128 break;
129 }
130 }
131
132 op_code = readl_relaxed(l3_targ_hdr) & 0x7;
133
134 m_req_info = readl_relaxed(l3_targ_info) & 0xF;
135 snprintf(info_string, sizeof(info_string),
136 ": %s in %s mode during %s access",
137 (m_req_info & BIT(0)) ? "Opcode Fetch" : "Data Access",
138 (m_req_info & BIT(1)) ? "Supervisor" : "User",
139 (m_req_info & BIT(3)) ? "Debug" : "Functional");
140
141 WARN(true,
142 "%s:L3 %s Error: MASTER %s TARGET %s (%s)%s%s\n",
143 dev_name(l3->dev),
144 err_description,
145 master_name, target_name,
146 l3_transaction_type[op_code],
147 err_string, info_string);
148
149 /* clear the std error log*/
150 clear = std_err_main | CLEAR_STDERR_LOG;
151 writel_relaxed(clear, l3_targ_stderr);
152
153 return 0;
154}
155
156/**
157 * l3_interrupt_handler() - interrupt handler for l3 events
158 * @irq: irq number
159 * @_l3: pointer to l3 structure
160 *
161 * Interrupt Handler for L3 error detection.
162 * 1) Identify the L3 clockdomain partition to which the error belongs to.
163 * 2) Identify the slave where the error information is logged
164 * ... handle the slave event..
165 * 7) if the slave is unknown, mask out the slave.
166 */
167static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
168{
169 struct omap_l3 *l3 = _l3;
170 int inttype, i, ret;
61 int err_src = 0; 171 int err_src = 0;
62 u32 std_err_main, err_reg, clear, masterid; 172 u32 err_reg, mask_val;
63 void __iomem *base, *l3_targ_base; 173 void __iomem *base, *mask_reg;
64 char *target_name, *master_name = "UN IDENTIFIED"; 174 struct l3_flagmux_data *flag_mux;
65 175
66 /* Get the Type of interrupt */ 176 /* Get the Type of interrupt */
67 inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR; 177 inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
68 178
69 for (i = 0; i < L3_MODULES; i++) { 179 for (i = 0; i < l3->num_modules; i++) {
70 /* 180 /*
71 * Read the regerr register of the clock domain 181 * Read the regerr register of the clock domain
72 * to determine the source 182 * to determine the source
73 */ 183 */
74 base = l3->l3_base[i]; 184 base = l3->l3_base[i];
75 err_reg = __raw_readl(base + l3_flagmux[i] + 185 flag_mux = l3->l3_flagmux[i];
76 + L3_FLAGMUX_REGERR0 + (inttype << 3)); 186 err_reg = readl_relaxed(base + flag_mux->offset +
187 L3_FLAGMUX_REGERR0 + (inttype << 3));
188
189 err_reg &= ~(inttype ? flag_mux->mask_app_bits :
190 flag_mux->mask_dbg_bits);
77 191
78 /* Get the corresponding error and analyse */ 192 /* Get the corresponding error and analyse */
79 if (err_reg) { 193 if (err_reg) {
80 /* Identify the source from control status register */ 194 /* Identify the source from control status register */
81 err_src = __ffs(err_reg); 195 err_src = __ffs(err_reg);
82 196
83 /* Read the stderrlog_main_source from clk domain */ 197 ret = l3_handle_target(l3, base, flag_mux, err_src);
84 l3_targ_base = base + *(l3_targ[i] + err_src); 198
85 std_err_main = __raw_readl(l3_targ_base + 199 /*
86 L3_TARG_STDERRLOG_MAIN); 200 * Certain plaforms may have "undocumented" status
87 masterid = __raw_readl(l3_targ_base + 201 * pending on boot. So dont generate a severe warning
88 L3_TARG_STDERRLOG_MSTADDR); 202 * here. Just mask it off to prevent the error from
89 203 * reoccuring and locking up the system.
90 switch (std_err_main & CUSTOM_ERROR) { 204 */
91 case STANDARD_ERROR: 205 if (ret) {
92 target_name = 206 dev_err(l3->dev,
93 l3_targ_inst_name[i][err_src]; 207 "L3 %s error: target %d mod:%d %s\n",
94 WARN(true, "L3 standard error: TARGET:%s at address 0x%x\n", 208 inttype ? "debug" : "application",
95 target_name, 209 err_src, i, "(unclearable)");
96 __raw_readl(l3_targ_base + 210
97 L3_TARG_STDERRLOG_SLVOFSLSB)); 211 mask_reg = base + flag_mux->offset +
98 /* clear the std error log*/ 212 L3_FLAGMUX_MASK0 + (inttype << 3);
99 clear = std_err_main | CLEAR_STDERR_LOG; 213 mask_val = readl_relaxed(mask_reg);
100 writel(clear, l3_targ_base + 214 mask_val &= ~(1 << err_src);
101 L3_TARG_STDERRLOG_MAIN); 215 writel_relaxed(mask_val, mask_reg);
102 break; 216
103 217 /* Mark these bits as to be ignored */
104 case CUSTOM_ERROR: 218 if (inttype)
105 target_name = 219 flag_mux->mask_app_bits |= 1 << err_src;
106 l3_targ_inst_name[i][err_src]; 220 else
107 for (k = 0; k < NUM_OF_L3_MASTERS; k++) { 221 flag_mux->mask_dbg_bits |= 1 << err_src;
108 if (masterid == l3_masters[k].id)
109 master_name =
110 l3_masters[k].name;
111 }
112 WARN(true, "L3 custom error: MASTER:%s TARGET:%s\n",
113 master_name, target_name);
114 /* clear the std error log*/
115 clear = std_err_main | CLEAR_STDERR_LOG;
116 writel(clear, l3_targ_base +
117 L3_TARG_STDERRLOG_MAIN);
118 break;
119
120 default:
121 /* Nothing to be handled here as of now */
122 break;
123 } 222 }
124 /* Error found so break the for loop */ 223
125 break; 224 /* Error found so break the for loop */
225 break;
126 } 226 }
127 } 227 }
128 return IRQ_HANDLED; 228 return IRQ_HANDLED;
129} 229}
130 230
131static int omap4_l3_probe(struct platform_device *pdev) 231static const struct of_device_id l3_noc_match[] = {
232 {.compatible = "ti,omap4-l3-noc", .data = &omap_l3_data},
233 {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
234 {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
235 {},
236};
237MODULE_DEVICE_TABLE(of, l3_noc_match);
238
239static int omap_l3_probe(struct platform_device *pdev)
132{ 240{
133 static struct omap4_l3 *l3; 241 const struct of_device_id *of_id;
134 struct resource *res; 242 static struct omap_l3 *l3;
135 int ret; 243 int ret, i, res_idx;
244
245 of_id = of_match_device(l3_noc_match, &pdev->dev);
246 if (!of_id) {
247 dev_err(&pdev->dev, "OF data missing\n");
248 return -EINVAL;
249 }
136 250
137 l3 = kzalloc(sizeof(*l3), GFP_KERNEL); 251 l3 = devm_kzalloc(&pdev->dev, sizeof(*l3), GFP_KERNEL);
138 if (!l3) 252 if (!l3)
139 return -ENOMEM; 253 return -ENOMEM;
140 254
255 memcpy(l3, of_id->data, sizeof(*l3));
256 l3->dev = &pdev->dev;
141 platform_set_drvdata(pdev, l3); 257 platform_set_drvdata(pdev, l3);
142 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
143 if (!res) {
144 dev_err(&pdev->dev, "couldn't find resource 0\n");
145 ret = -ENODEV;
146 goto err0;
147 }
148
149 l3->l3_base[0] = ioremap(res->start, resource_size(res));
150 if (!l3->l3_base[0]) {
151 dev_err(&pdev->dev, "ioremap failed\n");
152 ret = -ENOMEM;
153 goto err0;
154 }
155
156 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
157 if (!res) {
158 dev_err(&pdev->dev, "couldn't find resource 1\n");
159 ret = -ENODEV;
160 goto err1;
161 }
162
163 l3->l3_base[1] = ioremap(res->start, resource_size(res));
164 if (!l3->l3_base[1]) {
165 dev_err(&pdev->dev, "ioremap failed\n");
166 ret = -ENOMEM;
167 goto err1;
168 }
169 258
170 res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 259 /* Get mem resources */
171 if (!res) { 260 for (i = 0, res_idx = 0; i < l3->num_modules; i++) {
172 dev_err(&pdev->dev, "couldn't find resource 2\n"); 261 struct resource *res;
173 ret = -ENODEV;
174 goto err2;
175 }
176 262
177 l3->l3_base[2] = ioremap(res->start, resource_size(res)); 263 if (l3->l3_base[i] == L3_BASE_IS_SUBMODULE) {
178 if (!l3->l3_base[2]) { 264 /* First entry cannot be submodule */
179 dev_err(&pdev->dev, "ioremap failed\n"); 265 BUG_ON(i == 0);
180 ret = -ENOMEM; 266 l3->l3_base[i] = l3->l3_base[i - 1];
181 goto err2; 267 continue;
268 }
269 res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx);
270 l3->l3_base[i] = devm_ioremap_resource(&pdev->dev, res);
271 if (IS_ERR(l3->l3_base[i])) {
272 dev_err(l3->dev, "ioremap %d failed\n", i);
273 return PTR_ERR(l3->l3_base[i]);
274 }
275 res_idx++;
182 } 276 }
183 277
184 /* 278 /*
185 * Setup interrupt Handlers 279 * Setup interrupt Handlers
186 */ 280 */
187 l3->debug_irq = platform_get_irq(pdev, 0); 281 l3->debug_irq = platform_get_irq(pdev, 0);
188 ret = request_irq(l3->debug_irq, 282 ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
189 l3_interrupt_handler, 283 IRQF_DISABLED, "l3-dbg-irq", l3);
190 IRQF_DISABLED, "l3-dbg-irq", l3);
191 if (ret) { 284 if (ret) {
192 pr_crit("L3: request_irq failed to register for 0x%x\n", 285 dev_err(l3->dev, "request_irq failed for %d\n",
193 l3->debug_irq); 286 l3->debug_irq);
194 goto err3; 287 return ret;
195 } 288 }
196 289
197 l3->app_irq = platform_get_irq(pdev, 1); 290 l3->app_irq = platform_get_irq(pdev, 1);
198 ret = request_irq(l3->app_irq, 291 ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
199 l3_interrupt_handler, 292 IRQF_DISABLED, "l3-app-irq", l3);
200 IRQF_DISABLED, "l3-app-irq", l3); 293 if (ret)
201 if (ret) { 294 dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
202 pr_crit("L3: request_irq failed to register for 0x%x\n",
203 l3->app_irq);
204 goto err4;
205 }
206 295
207 return 0;
208
209err4:
210 free_irq(l3->debug_irq, l3);
211err3:
212 iounmap(l3->l3_base[2]);
213err2:
214 iounmap(l3->l3_base[1]);
215err1:
216 iounmap(l3->l3_base[0]);
217err0:
218 kfree(l3);
219 return ret; 296 return ret;
220} 297}
221 298
222static int omap4_l3_remove(struct platform_device *pdev) 299static struct platform_driver omap_l3_driver = {
223{ 300 .probe = omap_l3_probe,
224 struct omap4_l3 *l3 = platform_get_drvdata(pdev);
225
226 free_irq(l3->app_irq, l3);
227 free_irq(l3->debug_irq, l3);
228 iounmap(l3->l3_base[0]);
229 iounmap(l3->l3_base[1]);
230 iounmap(l3->l3_base[2]);
231 kfree(l3);
232
233 return 0;
234}
235
236#if defined(CONFIG_OF)
237static const struct of_device_id l3_noc_match[] = {
238 {.compatible = "ti,omap4-l3-noc", },
239 {},
240};
241MODULE_DEVICE_TABLE(of, l3_noc_match);
242#else
243#define l3_noc_match NULL
244#endif
245
246static struct platform_driver omap4_l3_driver = {
247 .probe = omap4_l3_probe,
248 .remove = omap4_l3_remove,
249 .driver = { 301 .driver = {
250 .name = "omap_l3_noc", 302 .name = "omap_l3_noc",
251 .owner = THIS_MODULE, 303 .owner = THIS_MODULE,
252 .of_match_table = l3_noc_match, 304 .of_match_table = of_match_ptr(l3_noc_match),
253 }, 305 },
254}; 306};
255 307
256static int __init omap4_l3_init(void) 308static int __init omap_l3_init(void)
257{ 309{
258 return platform_driver_register(&omap4_l3_driver); 310 return platform_driver_register(&omap_l3_driver);
259} 311}
260postcore_initcall_sync(omap4_l3_init); 312postcore_initcall_sync(omap_l3_init);
261 313
262static void __exit omap4_l3_exit(void) 314static void __exit omap_l3_exit(void)
263{ 315{
264 platform_driver_unregister(&omap4_l3_driver); 316 platform_driver_unregister(&omap_l3_driver);
265} 317}
266module_exit(omap4_l3_exit); 318module_exit(omap_l3_exit);
diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h
index a6ce34dc4814..551e01061434 100644
--- a/drivers/bus/omap_l3_noc.h
+++ b/drivers/bus/omap_l3_noc.h
@@ -1,29 +1,25 @@
1/* 1/*
2 * OMAP4XXX L3 Interconnect error handling driver header 2 * OMAP L3 Interconnect error handling driver header
3 * 3 *
4 * Copyright (C) 2011 Texas Corporation 4 * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/
5 * Santosh Shilimkar <santosh.shilimkar@ti.com> 5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 * sricharan <r.sricharan@ti.com> 6 * sricharan <r.sricharan@ti.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License version 2 as
10 * the Free Software Foundation; either version 2 of the License, or 10 * published by the Free Software Foundation.
11 * (at your option) any later version.
12 * 11 *
13 * This program is distributed in the hope that it will be useful, 12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * kind, whether express or implied; without even the implied warranty
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 15 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21 * USA
22 */ 16 */
23#ifndef __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H 17#ifndef __OMAP_L3_NOC_H
24#define __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H 18#define __OMAP_L3_NOC_H
19
20#define MAX_L3_MODULES 3
21#define MAX_CLKDM_TARGETS 31
25 22
26#define L3_MODULES 3
27#define CLEAR_STDERR_LOG (1 << 31) 23#define CLEAR_STDERR_LOG (1 << 31)
28#define CUSTOM_ERROR 0x2 24#define CUSTOM_ERROR 0x2
29#define STANDARD_ERROR 0x0 25#define STANDARD_ERROR 0x0
@@ -33,63 +29,165 @@
33 29
34/* L3 TARG register offsets */ 30/* L3 TARG register offsets */
35#define L3_TARG_STDERRLOG_MAIN 0x48 31#define L3_TARG_STDERRLOG_MAIN 0x48
32#define L3_TARG_STDERRLOG_HDR 0x4c
33#define L3_TARG_STDERRLOG_MSTADDR 0x50
34#define L3_TARG_STDERRLOG_INFO 0x58
36#define L3_TARG_STDERRLOG_SLVOFSLSB 0x5c 35#define L3_TARG_STDERRLOG_SLVOFSLSB 0x5c
37#define L3_TARG_STDERRLOG_MSTADDR 0x68 36#define L3_TARG_STDERRLOG_CINFO_INFO 0x64
37#define L3_TARG_STDERRLOG_CINFO_MSTADDR 0x68
38#define L3_TARG_STDERRLOG_CINFO_OPCODE 0x6c
38#define L3_FLAGMUX_REGERR0 0xc 39#define L3_FLAGMUX_REGERR0 0xc
40#define L3_FLAGMUX_MASK0 0x8
41
42#define L3_TARGET_NOT_SUPPORTED NULL
43
44#define L3_BASE_IS_SUBMODULE ((void __iomem *)(1 << 0))
45
46static const char * const l3_transaction_type[] = {
47 /* 0 0 0 */ "Idle",
48 /* 0 0 1 */ "Write",
49 /* 0 1 0 */ "Read",
50 /* 0 1 1 */ "ReadEx",
51 /* 1 0 0 */ "Read Link",
52 /* 1 0 1 */ "Write Non-Posted",
53 /* 1 1 0 */ "Write Conditional",
54 /* 1 1 1 */ "Write Broadcast",
55};
39 56
40#define NUM_OF_L3_MASTERS (sizeof(l3_masters)/sizeof(l3_masters[0])) 57/**
41 58 * struct l3_masters_data - L3 Master information
42static u32 l3_flagmux[L3_MODULES] = { 59 * @id: ID of the L3 Master
43 0x500, 60 * @name: master name
44 0x1000, 61 */
45 0X0200 62struct l3_masters_data {
46};
47
48/* L3 Target standard Error register offsets */
49static u32 l3_targ_inst_clk1[] = {
50 0x100, /* DMM1 */
51 0x200, /* DMM2 */
52 0x300, /* ABE */
53 0x400, /* L4CFG */
54 0x600, /* CLK2 PWR DISC */
55 0x0, /* Host CLK1 */
56 0x900 /* L4 Wakeup */
57};
58
59static u32 l3_targ_inst_clk2[] = {
60 0x500, /* CORTEX M3 */
61 0x300, /* DSS */
62 0x100, /* GPMC */
63 0x400, /* ISS */
64 0x700, /* IVAHD */
65 0xD00, /* missing in TRM corresponds to AES1*/
66 0x900, /* L4 PER0*/
67 0x200, /* OCMRAM */
68 0x100, /* missing in TRM corresponds to GPMC sERROR*/
69 0x600, /* SGX */
70 0x800, /* SL2 */
71 0x1600, /* C2C */
72 0x1100, /* missing in TRM corresponds PWR DISC CLK1*/
73 0xF00, /* missing in TRM corrsponds to SHA1*/
74 0xE00, /* missing in TRM corresponds to AES2*/
75 0xC00, /* L4 PER3 */
76 0xA00, /* L4 PER1*/
77 0xB00, /* L4 PER2*/
78 0x0, /* HOST CLK2 */
79 0x1800, /* CAL */
80 0x1700 /* LLI */
81};
82
83static u32 l3_targ_inst_clk3[] = {
84 0x0100 /* EMUSS */,
85 0x0300, /* DEBUGSS_CT_TBR */
86 0x0 /* HOST CLK3 */
87};
88
89static struct l3_masters_data {
90 u32 id; 63 u32 id;
91 char name[10]; 64 char *name;
92} l3_masters[] = { 65};
66
67/**
68 * struct l3_target_data - L3 Target information
69 * @offset: Offset from base for L3 Target
70 * @name: Target name
71 *
72 * Target information is organized indexed by bit field definitions.
73 */
74struct l3_target_data {
75 u32 offset;
76 char *name;
77};
78
79/**
80 * struct l3_flagmux_data - Flag Mux information
81 * @offset: offset from base for flagmux register
82 * @l3_targ: array indexed by flagmux index (bit offset) pointing to the
83 * target data. unsupported ones are marked with
84 * L3_TARGET_NOT_SUPPORTED
85 * @num_targ_data: number of entries in target data
86 * @mask_app_bits: ignore these from raw application irq status
87 * @mask_dbg_bits: ignore these from raw debug irq status
88 */
89struct l3_flagmux_data {
90 u32 offset;
91 struct l3_target_data *l3_targ;
92 u8 num_targ_data;
93 u32 mask_app_bits;
94 u32 mask_dbg_bits;
95};
96
97
98/**
99 * struct omap_l3 - Description of data relevant for L3 bus.
100 * @dev: device representing the bus (populated runtime)
101 * @l3_base: base addresses of modules (populated runtime if 0)
102 * if set to L3_BASE_IS_SUBMODULE, then uses previous
103 * module index as the base address
104 * @l3_flag_mux: array containing flag mux data per module
105 * offset from corresponding module base indexed per
106 * module.
107 * @num_modules: number of clock domains / modules.
108 * @l3_masters: array pointing to master data containing name and register
109 * offset for the master.
110 * @num_master: number of masters
111 * @mst_addr_mask: Mask representing MSTADDR information of NTTP packet
112 * @debug_irq: irq number of the debug interrupt (populated runtime)
113 * @app_irq: irq number of the application interrupt (populated runtime)
114 */
115struct omap_l3 {
116 struct device *dev;
117
118 void __iomem *l3_base[MAX_L3_MODULES];
119 struct l3_flagmux_data **l3_flagmux;
120 int num_modules;
121
122 struct l3_masters_data *l3_masters;
123 int num_masters;
124 u32 mst_addr_mask;
125
126 int debug_irq;
127 int app_irq;
128};
129
130static struct l3_target_data omap_l3_target_data_clk1[] = {
131 {0x100, "DMM1",},
132 {0x200, "DMM2",},
133 {0x300, "ABE",},
134 {0x400, "L4CFG",},
135 {0x600, "CLK2PWRDISC",},
136 {0x0, "HOSTCLK1",},
137 {0x900, "L4WAKEUP",},
138};
139
140static struct l3_flagmux_data omap_l3_flagmux_clk1 = {
141 .offset = 0x500,
142 .l3_targ = omap_l3_target_data_clk1,
143 .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk1),
144};
145
146
147static struct l3_target_data omap_l3_target_data_clk2[] = {
148 {0x500, "CORTEXM3",},
149 {0x300, "DSS",},
150 {0x100, "GPMC",},
151 {0x400, "ISS",},
152 {0x700, "IVAHD",},
153 {0xD00, "AES1",},
154 {0x900, "L4PER0",},
155 {0x200, "OCMRAM",},
156 {0x100, "GPMCsERROR",},
157 {0x600, "SGX",},
158 {0x800, "SL2",},
159 {0x1600, "C2C",},
160 {0x1100, "PWRDISCCLK1",},
161 {0xF00, "SHA1",},
162 {0xE00, "AES2",},
163 {0xC00, "L4PER3",},
164 {0xA00, "L4PER1",},
165 {0xB00, "L4PER2",},
166 {0x0, "HOSTCLK2",},
167 {0x1800, "CAL",},
168 {0x1700, "LLI",},
169};
170
171static struct l3_flagmux_data omap_l3_flagmux_clk2 = {
172 .offset = 0x1000,
173 .l3_targ = omap_l3_target_data_clk2,
174 .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk2),
175};
176
177
178static struct l3_target_data omap_l3_target_data_clk3[] = {
179 {0x0100, "EMUSS",},
180 {0x0300, "DEBUG SOURCE",},
181 {0x0, "HOST CLK3",},
182};
183
184static struct l3_flagmux_data omap_l3_flagmux_clk3 = {
185 .offset = 0x0200,
186 .l3_targ = omap_l3_target_data_clk3,
187 .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk3),
188};
189
190static struct l3_masters_data omap_l3_masters[] = {
93 { 0x0 , "MPU"}, 191 { 0x0 , "MPU"},
94 { 0x10, "CS_ADP"}, 192 { 0x10, "CS_ADP"},
95 { 0x14, "xxx"}, 193 { 0x14, "xxx"},
@@ -117,60 +215,261 @@ static struct l3_masters_data {
117 { 0xC8, "USBHOSTFS"} 215 { 0xC8, "USBHOSTFS"}
118}; 216};
119 217
120static char *l3_targ_inst_name[L3_MODULES][21] = { 218static struct l3_flagmux_data *omap_l3_flagmux[] = {
121 { 219 &omap_l3_flagmux_clk1,
122 "DMM1", 220 &omap_l3_flagmux_clk2,
123 "DMM2", 221 &omap_l3_flagmux_clk3,
124 "ABE", 222};
125 "L4CFG", 223
126 "CLK2 PWR DISC", 224static const struct omap_l3 omap_l3_data = {
127 "HOST CLK1", 225 .l3_flagmux = omap_l3_flagmux,
128 "L4 WAKEUP" 226 .num_modules = ARRAY_SIZE(omap_l3_flagmux),
129 }, 227 .l3_masters = omap_l3_masters,
130 { 228 .num_masters = ARRAY_SIZE(omap_l3_masters),
131 "CORTEX M3" , 229 /* The 6 MSBs of register field used to distinguish initiator */
132 "DSS ", 230 .mst_addr_mask = 0xFC,
133 "GPMC ", 231};
134 "ISS ",
135 "IVAHD ",
136 "AES1",
137 "L4 PER0",
138 "OCMRAM ",
139 "GPMC sERROR",
140 "SGX ",
141 "SL2 ",
142 "C2C ",
143 "PWR DISC CLK1",
144 "SHA1",
145 "AES2",
146 "L4 PER3",
147 "L4 PER1",
148 "L4 PER2",
149 "HOST CLK2",
150 "CAL",
151 "LLI"
152 },
153 {
154 "EMUSS",
155 "DEBUG SOURCE",
156 "HOST CLK3"
157 },
158};
159
160static u32 *l3_targ[L3_MODULES] = {
161 l3_targ_inst_clk1,
162 l3_targ_inst_clk2,
163 l3_targ_inst_clk3,
164};
165
166struct omap4_l3 {
167 struct device *dev;
168 struct clk *ick;
169 232
170 /* memory base */ 233/* DRA7 data */
171 void __iomem *l3_base[L3_MODULES]; 234static struct l3_target_data dra_l3_target_data_clk1[] = {
235 {0x2a00, "AES1",},
236 {0x0200, "DMM_P1",},
237 {0x0600, "DSP2_SDMA",},
238 {0x0b00, "EVE2",},
239 {0x1300, "DMM_P2",},
240 {0x2c00, "AES2",},
241 {0x0300, "DSP1_SDMA",},
242 {0x0a00, "EVE1",},
243 {0x0c00, "EVE3",},
244 {0x0d00, "EVE4",},
245 {0x2900, "DSS",},
246 {0x0100, "GPMC",},
247 {0x3700, "PCIE1",},
248 {0x1600, "IVA_CONFIG",},
249 {0x1800, "IVA_SL2IF",},
250 {0x0500, "L4_CFG",},
251 {0x1d00, "L4_WKUP",},
252 {0x3800, "PCIE2",},
253 {0x3300, "SHA2_1",},
254 {0x1200, "GPU",},
255 {0x1000, "IPU1",},
256 {0x1100, "IPU2",},
257 {0x2000, "TPCC_EDMA",},
258 {0x2e00, "TPTC1_EDMA",},
259 {0x2b00, "TPTC2_EDMA",},
260 {0x0700, "VCP1",},
261 {0x2500, "L4_PER2_P3",},
262 {0x0e00, "L4_PER3_P3",},
263 {0x2200, "MMU1",},
264 {0x1400, "PRUSS1",},
265 {0x1500, "PRUSS2"},
266 {0x0800, "VCP1",},
267};
172 268
173 int debug_irq; 269static struct l3_flagmux_data dra_l3_flagmux_clk1 = {
174 int app_irq; 270 .offset = 0x803500,
271 .l3_targ = dra_l3_target_data_clk1,
272 .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk1),
273};
274
275static struct l3_target_data dra_l3_target_data_clk2[] = {
276 {0x0, "HOST CLK1",},
277 {0x0, "HOST CLK2",},
278 {0xdead, L3_TARGET_NOT_SUPPORTED,},
279 {0x3400, "SHA2_2",},
280 {0x0900, "BB2D",},
281 {0xdead, L3_TARGET_NOT_SUPPORTED,},
282 {0x2100, "L4_PER1_P3",},
283 {0x1c00, "L4_PER1_P1",},
284 {0x1f00, "L4_PER1_P2",},
285 {0x2300, "L4_PER2_P1",},
286 {0x2400, "L4_PER2_P2",},
287 {0x2600, "L4_PER3_P1",},
288 {0x2700, "L4_PER3_P2",},
289 {0x2f00, "MCASP1",},
290 {0x3000, "MCASP2",},
291 {0x3100, "MCASP3",},
292 {0x2800, "MMU2",},
293 {0x0f00, "OCMC_RAM1",},
294 {0x1700, "OCMC_RAM2",},
295 {0x1900, "OCMC_RAM3",},
296 {0x1e00, "OCMC_ROM",},
297 {0x3900, "QSPI",},
298};
299
300static struct l3_flagmux_data dra_l3_flagmux_clk2 = {
301 .offset = 0x803600,
302 .l3_targ = dra_l3_target_data_clk2,
303 .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk2),
304};
305
306static struct l3_target_data dra_l3_target_data_clk3[] = {
307 {0x0100, "L3_INSTR"},
308 {0x0300, "DEBUGSS_CT_TBR"},
309 {0x0, "HOST CLK3"},
310};
311
312static struct l3_flagmux_data dra_l3_flagmux_clk3 = {
313 .offset = 0x200,
314 .l3_targ = dra_l3_target_data_clk3,
315 .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk3),
316};
317
318static struct l3_masters_data dra_l3_masters[] = {
319 { 0x0, "MPU" },
320 { 0x4, "CS_DAP" },
321 { 0x5, "IEEE1500_2_OCP" },
322 { 0x8, "DSP1_MDMA" },
323 { 0x9, "DSP1_CFG" },
324 { 0xA, "DSP1_DMA" },
325 { 0xB, "DSP2_MDMA" },
326 { 0xC, "DSP2_CFG" },
327 { 0xD, "DSP2_DMA" },
328 { 0xE, "IVA" },
329 { 0x10, "EVE1_P1" },
330 { 0x11, "EVE2_P1" },
331 { 0x12, "EVE3_P1" },
332 { 0x13, "EVE4_P1" },
333 { 0x14, "PRUSS1 PRU1" },
334 { 0x15, "PRUSS1 PRU2" },
335 { 0x16, "PRUSS2 PRU1" },
336 { 0x17, "PRUSS2 PRU2" },
337 { 0x18, "IPU1" },
338 { 0x19, "IPU2" },
339 { 0x1A, "SDMA" },
340 { 0x1B, "CDMA" },
341 { 0x1C, "TC1_EDMA" },
342 { 0x1D, "TC2_EDMA" },
343 { 0x20, "DSS" },
344 { 0x21, "MMU1" },
345 { 0x22, "PCIE1" },
346 { 0x23, "MMU2" },
347 { 0x24, "VIP1" },
348 { 0x25, "VIP2" },
349 { 0x26, "VIP3" },
350 { 0x27, "VPE" },
351 { 0x28, "GPU_P1" },
352 { 0x29, "BB2D" },
353 { 0x29, "GPU_P2" },
354 { 0x2B, "GMAC_SW" },
355 { 0x2C, "USB3" },
356 { 0x2D, "USB2_SS" },
357 { 0x2E, "USB2_ULPI_SS1" },
358 { 0x2F, "USB2_ULPI_SS2" },
359 { 0x30, "CSI2_1" },
360 { 0x31, "CSI2_2" },
361 { 0x33, "SATA" },
362 { 0x34, "EVE1_P2" },
363 { 0x35, "EVE2_P2" },
364 { 0x36, "EVE3_P2" },
365 { 0x37, "EVE4_P2" }
175}; 366};
176#endif 367
368static struct l3_flagmux_data *dra_l3_flagmux[] = {
369 &dra_l3_flagmux_clk1,
370 &dra_l3_flagmux_clk2,
371 &dra_l3_flagmux_clk3,
372};
373
374static const struct omap_l3 dra_l3_data = {
375 .l3_base = { [1] = L3_BASE_IS_SUBMODULE },
376 .l3_flagmux = dra_l3_flagmux,
377 .num_modules = ARRAY_SIZE(dra_l3_flagmux),
378 .l3_masters = dra_l3_masters,
379 .num_masters = ARRAY_SIZE(dra_l3_masters),
380 /* The 6 MSBs of register field used to distinguish initiator */
381 .mst_addr_mask = 0xFC,
382};
383
384/* AM4372 data */
385static struct l3_target_data am4372_l3_target_data_200f[] = {
386 {0xf00, "EMIF",},
387 {0x1200, "DES",},
388 {0x400, "OCMCRAM",},
389 {0x700, "TPTC0",},
390 {0x800, "TPTC1",},
391 {0x900, "TPTC2"},
392 {0xb00, "TPCC",},
393 {0xd00, "DEBUGSS",},
394 {0xdead, L3_TARGET_NOT_SUPPORTED,},
395 {0x200, "SHA",},
396 {0xc00, "SGX530",},
397 {0x500, "AES0",},
398 {0xa00, "L4_FAST",},
399 {0x300, "MPUSS_L2_RAM",},
400 {0x100, "ICSS",},
401};
402
403static struct l3_flagmux_data am4372_l3_flagmux_200f = {
404 .offset = 0x1000,
405 .l3_targ = am4372_l3_target_data_200f,
406 .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_200f),
407};
408
409static struct l3_target_data am4372_l3_target_data_100s[] = {
410 {0x100, "L4_PER_0",},
411 {0x200, "L4_PER_1",},
412 {0x300, "L4_PER_2",},
413 {0x400, "L4_PER_3",},
414 {0x800, "McASP0",},
415 {0x900, "McASP1",},
416 {0xC00, "MMCHS2",},
417 {0x700, "GPMC",},
418 {0xD00, "L4_FW",},
419 {0xdead, L3_TARGET_NOT_SUPPORTED,},
420 {0x500, "ADCTSC",},
421 {0xE00, "L4_WKUP",},
422 {0xA00, "MAG_CARD",},
423};
424
425static struct l3_flagmux_data am4372_l3_flagmux_100s = {
426 .offset = 0x600,
427 .l3_targ = am4372_l3_target_data_100s,
428 .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_100s),
429};
430
431static struct l3_masters_data am4372_l3_masters[] = {
432 { 0x0, "M1 (128-bit)"},
433 { 0x1, "M2 (64-bit)"},
434 { 0x4, "DAP"},
435 { 0x5, "P1500"},
436 { 0xC, "ICSS0"},
437 { 0xD, "ICSS1"},
438 { 0x14, "Wakeup Processor"},
439 { 0x18, "TPTC0 Read"},
440 { 0x19, "TPTC0 Write"},
441 { 0x1A, "TPTC1 Read"},
442 { 0x1B, "TPTC1 Write"},
443 { 0x1C, "TPTC2 Read"},
444 { 0x1D, "TPTC2 Write"},
445 { 0x20, "SGX530"},
446 { 0x21, "OCP WP Traffic Probe"},
447 { 0x22, "OCP WP DMA Profiling"},
448 { 0x23, "OCP WP Event Trace"},
449 { 0x25, "DSS"},
450 { 0x28, "Crypto DMA RD"},
451 { 0x29, "Crypto DMA WR"},
452 { 0x2C, "VPFE0"},
453 { 0x2D, "VPFE1"},
454 { 0x30, "GEMAC"},
455 { 0x34, "USB0 RD"},
456 { 0x35, "USB0 WR"},
457 { 0x36, "USB1 RD"},
458 { 0x37, "USB1 WR"},
459};
460
461static struct l3_flagmux_data *am4372_l3_flagmux[] = {
462 &am4372_l3_flagmux_200f,
463 &am4372_l3_flagmux_100s,
464};
465
466static const struct omap_l3 am4372_l3_data = {
467 .l3_flagmux = am4372_l3_flagmux,
468 .num_modules = ARRAY_SIZE(am4372_l3_flagmux),
469 .l3_masters = am4372_l3_masters,
470 .num_masters = ARRAY_SIZE(am4372_l3_masters),
471 /* All 6 bits of register field used to distinguish initiator */
472 .mst_addr_mask = 0x3F,
473};
474
475#endif /* __OMAP_L3_NOC_H */
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 870e18b9a687..1fad4c5e3f5d 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -24,6 +24,8 @@
24#define APLL_CON0 0x100 24#define APLL_CON0 0x100
25#define SRC_CPU 0x200 25#define SRC_CPU 0x200
26#define DIV_CPU0 0x500 26#define DIV_CPU0 0x500
27#define PWR_CTRL1 0x1020
28#define PWR_CTRL2 0x1024
27#define MPLL_LOCK 0x4000 29#define MPLL_LOCK 0x4000
28#define MPLL_CON0 0x4100 30#define MPLL_CON0 0x4100
29#define SRC_CORE1 0x4204 31#define SRC_CORE1 0x4204
@@ -84,6 +86,23 @@
84#define SRC_CDREX 0x20200 86#define SRC_CDREX 0x20200
85#define PLL_DIV2_SEL 0x20a24 87#define PLL_DIV2_SEL 0x20a24
86 88
89/*Below definitions are used for PWR_CTRL settings*/
90#define PWR_CTRL1_CORE2_DOWN_RATIO (7 << 28)
91#define PWR_CTRL1_CORE1_DOWN_RATIO (7 << 16)
92#define PWR_CTRL1_DIV2_DOWN_EN (1 << 9)
93#define PWR_CTRL1_DIV1_DOWN_EN (1 << 8)
94#define PWR_CTRL1_USE_CORE1_WFE (1 << 5)
95#define PWR_CTRL1_USE_CORE0_WFE (1 << 4)
96#define PWR_CTRL1_USE_CORE1_WFI (1 << 1)
97#define PWR_CTRL1_USE_CORE0_WFI (1 << 0)
98
99#define PWR_CTRL2_DIV2_UP_EN (1 << 25)
100#define PWR_CTRL2_DIV1_UP_EN (1 << 24)
101#define PWR_CTRL2_DUR_STANDBY2_VAL (1 << 16)
102#define PWR_CTRL2_DUR_STANDBY1_VAL (1 << 8)
103#define PWR_CTRL2_CORE2_UP_RATIO (1 << 4)
104#define PWR_CTRL2_CORE1_UP_RATIO (1 << 0)
105
87/* list of PLLs to be registered */ 106/* list of PLLs to be registered */
88enum exynos5250_plls { 107enum exynos5250_plls {
89 apll, mpll, cpll, epll, vpll, gpll, bpll, 108 apll, mpll, cpll, epll, vpll, gpll, bpll,
@@ -102,6 +121,8 @@ static struct samsung_clk_reg_dump *exynos5250_save;
102static unsigned long exynos5250_clk_regs[] __initdata = { 121static unsigned long exynos5250_clk_regs[] __initdata = {
103 SRC_CPU, 122 SRC_CPU,
104 DIV_CPU0, 123 DIV_CPU0,
124 PWR_CTRL1,
125 PWR_CTRL2,
105 SRC_CORE1, 126 SRC_CORE1,
106 SRC_TOP0, 127 SRC_TOP0,
107 SRC_TOP1, 128 SRC_TOP1,
@@ -736,6 +757,7 @@ static struct of_device_id ext_clk_match[] __initdata = {
736static void __init exynos5250_clk_init(struct device_node *np) 757static void __init exynos5250_clk_init(struct device_node *np)
737{ 758{
738 struct samsung_clk_provider *ctx; 759 struct samsung_clk_provider *ctx;
760 unsigned int tmp;
739 761
740 if (np) { 762 if (np) {
741 reg_base = of_iomap(np, 0); 763 reg_base = of_iomap(np, 0);
@@ -776,6 +798,26 @@ static void __init exynos5250_clk_init(struct device_node *np)
776 samsung_clk_register_gate(ctx, exynos5250_gate_clks, 798 samsung_clk_register_gate(ctx, exynos5250_gate_clks,
777 ARRAY_SIZE(exynos5250_gate_clks)); 799 ARRAY_SIZE(exynos5250_gate_clks));
778 800
801 /*
802 * Enable arm clock down (in idle) and set arm divider
803 * ratios in WFI/WFE state.
804 */
805 tmp = (PWR_CTRL1_CORE2_DOWN_RATIO | PWR_CTRL1_CORE1_DOWN_RATIO |
806 PWR_CTRL1_DIV2_DOWN_EN | PWR_CTRL1_DIV1_DOWN_EN |
807 PWR_CTRL1_USE_CORE1_WFE | PWR_CTRL1_USE_CORE0_WFE |
808 PWR_CTRL1_USE_CORE1_WFI | PWR_CTRL1_USE_CORE0_WFI);
809 __raw_writel(tmp, reg_base + PWR_CTRL1);
810
811 /*
812 * Enable arm clock up (on exiting idle). Set arm divider
813 * ratios when not in idle along with the standby duration
814 * ratios.
815 */
816 tmp = (PWR_CTRL2_DIV2_UP_EN | PWR_CTRL2_DIV1_UP_EN |
817 PWR_CTRL2_DUR_STANDBY2_VAL | PWR_CTRL2_DUR_STANDBY1_VAL |
818 PWR_CTRL2_CORE2_UP_RATIO | PWR_CTRL2_CORE1_UP_RATIO);
819 __raw_writel(tmp, reg_base + PWR_CTRL2);
820
779 exynos5250_clk_sleep_init(); 821 exynos5250_clk_sleep_init();
780 822
781 pr_info("Exynos5250: clock setup completed, armclk=%ld\n", 823 pr_info("Exynos5250: clock setup completed, armclk=%ld\n",
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index acf5a329d538..8d6420013a04 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -24,6 +24,7 @@
24#include <linux/of_irq.h> 24#include <linux/of_irq.h>
25#include <linux/of_address.h> 25#include <linux/of_address.h>
26#include <linux/clocksource.h> 26#include <linux/clocksource.h>
27#include <linux/sched_clock.h>
27 28
28#define EXYNOS4_MCTREG(x) (x) 29#define EXYNOS4_MCTREG(x) (x)
29#define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100) 30#define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100)
@@ -192,12 +193,19 @@ struct clocksource mct_frc = {
192 .resume = exynos4_frc_resume, 193 .resume = exynos4_frc_resume,
193}; 194};
194 195
196static u64 notrace exynos4_read_sched_clock(void)
197{
198 return exynos4_frc_read(&mct_frc);
199}
200
195static void __init exynos4_clocksource_init(void) 201static void __init exynos4_clocksource_init(void)
196{ 202{
197 exynos4_mct_frc_start(0, 0); 203 exynos4_mct_frc_start(0, 0);
198 204
199 if (clocksource_register_hz(&mct_frc, clk_rate)) 205 if (clocksource_register_hz(&mct_frc, clk_rate))
200 panic("%s: can't register clocksource\n", mct_frc.name); 206 panic("%s: can't register clocksource\n", mct_frc.name);
207
208 sched_clock_register(exynos4_read_sched_clock, 64, clk_rate);
201} 209}
202 210
203static void exynos4_mct_comp0_stop(void) 211static void exynos4_mct_comp0_stop(void)
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 580503513f0f..d2c7b4b8ffd5 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -30,7 +30,7 @@ config ARM_EXYNOS_CPUFREQ
30 30
31config ARM_EXYNOS4210_CPUFREQ 31config ARM_EXYNOS4210_CPUFREQ
32 bool "SAMSUNG EXYNOS4210" 32 bool "SAMSUNG EXYNOS4210"
33 depends on CPU_EXYNOS4210 && !ARCH_MULTIPLATFORM 33 depends on CPU_EXYNOS4210
34 default y 34 default y
35 select ARM_EXYNOS_CPUFREQ 35 select ARM_EXYNOS_CPUFREQ
36 help 36 help
@@ -41,7 +41,7 @@ config ARM_EXYNOS4210_CPUFREQ
41 41
42config ARM_EXYNOS4X12_CPUFREQ 42config ARM_EXYNOS4X12_CPUFREQ
43 bool "SAMSUNG EXYNOS4x12" 43 bool "SAMSUNG EXYNOS4x12"
44 depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM 44 depends on SOC_EXYNOS4212 || SOC_EXYNOS4412
45 default y 45 default y
46 select ARM_EXYNOS_CPUFREQ 46 select ARM_EXYNOS_CPUFREQ
47 help 47 help
@@ -52,7 +52,7 @@ config ARM_EXYNOS4X12_CPUFREQ
52 52
53config ARM_EXYNOS5250_CPUFREQ 53config ARM_EXYNOS5250_CPUFREQ
54 bool "SAMSUNG EXYNOS5250" 54 bool "SAMSUNG EXYNOS5250"
55 depends on SOC_EXYNOS5250 && !ARCH_MULTIPLATFORM 55 depends on SOC_EXYNOS5250
56 default y 56 default y
57 select ARM_EXYNOS_CPUFREQ 57 select ARM_EXYNOS_CPUFREQ
58 help 58 help
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index e8a4a7ed38c1..348c8bafe436 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -19,8 +19,6 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/of.h> 20#include <linux/of.h>
21 21
22#include <plat/cpu.h>
23
24#include "exynos-cpufreq.h" 22#include "exynos-cpufreq.h"
25 23
26static struct exynos_dvfs_info *exynos_info; 24static struct exynos_dvfs_info *exynos_info;
diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h
index f189547bb447..51af42e1b7fe 100644
--- a/drivers/cpufreq/exynos-cpufreq.h
+++ b/drivers/cpufreq/exynos-cpufreq.h
@@ -49,6 +49,7 @@ struct exynos_dvfs_info {
49 struct cpufreq_frequency_table *freq_table; 49 struct cpufreq_frequency_table *freq_table;
50 void (*set_freq)(unsigned int, unsigned int); 50 void (*set_freq)(unsigned int, unsigned int);
51 bool (*need_apll_change)(unsigned int, unsigned int); 51 bool (*need_apll_change)(unsigned int, unsigned int);
52 void __iomem *cmu_regs;
52}; 53};
53 54
54#ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ 55#ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ
@@ -76,24 +77,21 @@ static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
76} 77}
77#endif 78#endif
78 79
79#include <plat/cpu.h> 80#define EXYNOS4_CLKSRC_CPU 0x14200
80#include <mach/map.h> 81#define EXYNOS4_CLKMUX_STATCPU 0x14400
81 82
82#define EXYNOS4_CLKSRC_CPU (S5P_VA_CMU + 0x14200) 83#define EXYNOS4_CLKDIV_CPU 0x14500
83#define EXYNOS4_CLKMUX_STATCPU (S5P_VA_CMU + 0x14400) 84#define EXYNOS4_CLKDIV_CPU1 0x14504
84 85#define EXYNOS4_CLKDIV_STATCPU 0x14600
85#define EXYNOS4_CLKDIV_CPU (S5P_VA_CMU + 0x14500) 86#define EXYNOS4_CLKDIV_STATCPU1 0x14604
86#define EXYNOS4_CLKDIV_CPU1 (S5P_VA_CMU + 0x14504)
87#define EXYNOS4_CLKDIV_STATCPU (S5P_VA_CMU + 0x14600)
88#define EXYNOS4_CLKDIV_STATCPU1 (S5P_VA_CMU + 0x14604)
89 87
90#define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT (16) 88#define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT (16)
91#define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT) 89#define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)
92 90
93#define EXYNOS5_APLL_LOCK (S5P_VA_CMU + 0x00000) 91#define EXYNOS5_APLL_LOCK 0x00000
94#define EXYNOS5_APLL_CON0 (S5P_VA_CMU + 0x00100) 92#define EXYNOS5_APLL_CON0 0x00100
95#define EXYNOS5_CLKMUX_STATCPU (S5P_VA_CMU + 0x00400) 93#define EXYNOS5_CLKMUX_STATCPU 0x00400
96#define EXYNOS5_CLKDIV_CPU0 (S5P_VA_CMU + 0x00500) 94#define EXYNOS5_CLKDIV_CPU0 0x00500
97#define EXYNOS5_CLKDIV_CPU1 (S5P_VA_CMU + 0x00504) 95#define EXYNOS5_CLKDIV_CPU1 0x00504
98#define EXYNOS5_CLKDIV_STATCPU0 (S5P_VA_CMU + 0x00600) 96#define EXYNOS5_CLKDIV_STATCPU0 0x00600
99#define EXYNOS5_CLKDIV_STATCPU1 (S5P_VA_CMU + 0x00604) 97#define EXYNOS5_CLKDIV_STATCPU1 0x00604
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index 6384e5b9a347..61a54310a1b9 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -16,6 +16,8 @@
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
19#include <linux/of.h>
20#include <linux/of_address.h>
19 21
20#include "exynos-cpufreq.h" 22#include "exynos-cpufreq.h"
21 23
@@ -23,6 +25,7 @@ static struct clk *cpu_clk;
23static struct clk *moutcore; 25static struct clk *moutcore;
24static struct clk *mout_mpll; 26static struct clk *mout_mpll;
25static struct clk *mout_apll; 27static struct clk *mout_apll;
28static struct exynos_dvfs_info *cpufreq;
26 29
27static unsigned int exynos4210_volt_table[] = { 30static unsigned int exynos4210_volt_table[] = {
28 1250000, 1150000, 1050000, 975000, 950000, 31 1250000, 1150000, 1050000, 975000, 950000,
@@ -60,20 +63,20 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
60 63
61 tmp = apll_freq_4210[div_index].clk_div_cpu0; 64 tmp = apll_freq_4210[div_index].clk_div_cpu0;
62 65
63 __raw_writel(tmp, EXYNOS4_CLKDIV_CPU); 66 __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU);
64 67
65 do { 68 do {
66 tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU); 69 tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU);
67 } while (tmp & 0x1111111); 70 } while (tmp & 0x1111111);
68 71
69 /* Change Divider - CPU1 */ 72 /* Change Divider - CPU1 */
70 73
71 tmp = apll_freq_4210[div_index].clk_div_cpu1; 74 tmp = apll_freq_4210[div_index].clk_div_cpu1;
72 75
73 __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1); 76 __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1);
74 77
75 do { 78 do {
76 tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU1); 79 tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1);
77 } while (tmp & 0x11); 80 } while (tmp & 0x11);
78} 81}
79 82
@@ -85,7 +88,7 @@ static void exynos4210_set_apll(unsigned int index)
85 clk_set_parent(moutcore, mout_mpll); 88 clk_set_parent(moutcore, mout_mpll);
86 89
87 do { 90 do {
88 tmp = (__raw_readl(EXYNOS4_CLKMUX_STATCPU) 91 tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU)
89 >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT); 92 >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT);
90 tmp &= 0x7; 93 tmp &= 0x7;
91 } while (tmp != 0x2); 94 } while (tmp != 0x2);
@@ -96,7 +99,7 @@ static void exynos4210_set_apll(unsigned int index)
96 clk_set_parent(moutcore, mout_apll); 99 clk_set_parent(moutcore, mout_apll);
97 100
98 do { 101 do {
99 tmp = __raw_readl(EXYNOS4_CLKMUX_STATCPU); 102 tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU);
100 tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK; 103 tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK;
101 } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); 104 } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
102} 105}
@@ -115,8 +118,30 @@ static void exynos4210_set_frequency(unsigned int old_index,
115 118
116int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) 119int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
117{ 120{
121 struct device_node *np;
118 unsigned long rate; 122 unsigned long rate;
119 123
124 /*
125 * HACK: This is a temporary workaround to get access to clock
126 * controller registers directly and remove static mappings and
127 * dependencies on platform headers. It is necessary to enable
128 * Exynos multi-platform support and will be removed together with
129 * this whole driver as soon as Exynos gets migrated to use
130 * cpufreq-cpu0 driver.
131 */
132 np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock");
133 if (!np) {
134 pr_err("%s: failed to find clock controller DT node\n",
135 __func__);
136 return -ENODEV;
137 }
138
139 info->cmu_regs = of_iomap(np, 0);
140 if (!info->cmu_regs) {
141 pr_err("%s: failed to map CMU registers\n", __func__);
142 return -EFAULT;
143 }
144
120 cpu_clk = clk_get(NULL, "armclk"); 145 cpu_clk = clk_get(NULL, "armclk");
121 if (IS_ERR(cpu_clk)) 146 if (IS_ERR(cpu_clk))
122 return PTR_ERR(cpu_clk); 147 return PTR_ERR(cpu_clk);
@@ -143,6 +168,8 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
143 info->freq_table = exynos4210_freq_table; 168 info->freq_table = exynos4210_freq_table;
144 info->set_freq = exynos4210_set_frequency; 169 info->set_freq = exynos4210_set_frequency;
145 170
171 cpufreq = info;
172
146 return 0; 173 return 0;
147 174
148err_mout_apll: 175err_mout_apll:
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 63a3907ce578..351a2074cfea 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -16,6 +16,8 @@
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
19#include <linux/of.h>
20#include <linux/of_address.h>
19 21
20#include "exynos-cpufreq.h" 22#include "exynos-cpufreq.h"
21 23
@@ -23,6 +25,7 @@ static struct clk *cpu_clk;
23static struct clk *moutcore; 25static struct clk *moutcore;
24static struct clk *mout_mpll; 26static struct clk *mout_mpll;
25static struct clk *mout_apll; 27static struct clk *mout_apll;
28static struct exynos_dvfs_info *cpufreq;
26 29
27static unsigned int exynos4x12_volt_table[] = { 30static unsigned int exynos4x12_volt_table[] = {
28 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500, 31 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500,
@@ -105,19 +108,20 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
105 108
106 tmp = apll_freq_4x12[div_index].clk_div_cpu0; 109 tmp = apll_freq_4x12[div_index].clk_div_cpu0;
107 110
108 __raw_writel(tmp, EXYNOS4_CLKDIV_CPU); 111 __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU);
109 112
110 while (__raw_readl(EXYNOS4_CLKDIV_STATCPU) & 0x11111111) 113 while (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU)
114 & 0x11111111)
111 cpu_relax(); 115 cpu_relax();
112 116
113 /* Change Divider - CPU1 */ 117 /* Change Divider - CPU1 */
114 tmp = apll_freq_4x12[div_index].clk_div_cpu1; 118 tmp = apll_freq_4x12[div_index].clk_div_cpu1;
115 119
116 __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1); 120 __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1);
117 121
118 do { 122 do {
119 cpu_relax(); 123 cpu_relax();
120 tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU1); 124 tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1);
121 } while (tmp != 0x0); 125 } while (tmp != 0x0);
122} 126}
123 127
@@ -130,7 +134,7 @@ static void exynos4x12_set_apll(unsigned int index)
130 134
131 do { 135 do {
132 cpu_relax(); 136 cpu_relax();
133 tmp = (__raw_readl(EXYNOS4_CLKMUX_STATCPU) 137 tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU)
134 >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT); 138 >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT);
135 tmp &= 0x7; 139 tmp &= 0x7;
136 } while (tmp != 0x2); 140 } while (tmp != 0x2);
@@ -142,7 +146,7 @@ static void exynos4x12_set_apll(unsigned int index)
142 146
143 do { 147 do {
144 cpu_relax(); 148 cpu_relax();
145 tmp = __raw_readl(EXYNOS4_CLKMUX_STATCPU); 149 tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU);
146 tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK; 150 tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK;
147 } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); 151 } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
148} 152}
@@ -161,8 +165,30 @@ static void exynos4x12_set_frequency(unsigned int old_index,
161 165
162int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) 166int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
163{ 167{
168 struct device_node *np;
164 unsigned long rate; 169 unsigned long rate;
165 170
171 /*
172 * HACK: This is a temporary workaround to get access to clock
173 * controller registers directly and remove static mappings and
174 * dependencies on platform headers. It is necessary to enable
175 * Exynos multi-platform support and will be removed together with
176 * this whole driver as soon as Exynos gets migrated to use
177 * cpufreq-cpu0 driver.
178 */
179 np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock");
180 if (!np) {
181 pr_err("%s: failed to find clock controller DT node\n",
182 __func__);
183 return -ENODEV;
184 }
185
186 info->cmu_regs = of_iomap(np, 0);
187 if (!info->cmu_regs) {
188 pr_err("%s: failed to map CMU registers\n", __func__);
189 return -EFAULT;
190 }
191
166 cpu_clk = clk_get(NULL, "armclk"); 192 cpu_clk = clk_get(NULL, "armclk");
167 if (IS_ERR(cpu_clk)) 193 if (IS_ERR(cpu_clk))
168 return PTR_ERR(cpu_clk); 194 return PTR_ERR(cpu_clk);
@@ -194,6 +220,8 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
194 info->freq_table = exynos4x12_freq_table; 220 info->freq_table = exynos4x12_freq_table;
195 info->set_freq = exynos4x12_set_frequency; 221 info->set_freq = exynos4x12_set_frequency;
196 222
223 cpufreq = info;
224
197 return 0; 225 return 0;
198 226
199err_mout_apll: 227err_mout_apll:
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index 363a0b3fe1b1..c91ce69dc631 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -16,8 +16,8 @@
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
19 19#include <linux/of.h>
20#include <mach/map.h> 20#include <linux/of_address.h>
21 21
22#include "exynos-cpufreq.h" 22#include "exynos-cpufreq.h"
23 23
@@ -25,6 +25,7 @@ static struct clk *cpu_clk;
25static struct clk *moutcore; 25static struct clk *moutcore;
26static struct clk *mout_mpll; 26static struct clk *mout_mpll;
27static struct clk *mout_apll; 27static struct clk *mout_apll;
28static struct exynos_dvfs_info *cpufreq;
28 29
29static unsigned int exynos5250_volt_table[] = { 30static unsigned int exynos5250_volt_table[] = {
30 1300000, 1250000, 1225000, 1200000, 1150000, 31 1300000, 1250000, 1225000, 1200000, 1150000,
@@ -87,17 +88,18 @@ static void set_clkdiv(unsigned int div_index)
87 88
88 tmp = apll_freq_5250[div_index].clk_div_cpu0; 89 tmp = apll_freq_5250[div_index].clk_div_cpu0;
89 90
90 __raw_writel(tmp, EXYNOS5_CLKDIV_CPU0); 91 __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU0);
91 92
92 while (__raw_readl(EXYNOS5_CLKDIV_STATCPU0) & 0x11111111) 93 while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU0)
94 & 0x11111111)
93 cpu_relax(); 95 cpu_relax();
94 96
95 /* Change Divider - CPU1 */ 97 /* Change Divider - CPU1 */
96 tmp = apll_freq_5250[div_index].clk_div_cpu1; 98 tmp = apll_freq_5250[div_index].clk_div_cpu1;
97 99
98 __raw_writel(tmp, EXYNOS5_CLKDIV_CPU1); 100 __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU1);
99 101
100 while (__raw_readl(EXYNOS5_CLKDIV_STATCPU1) & 0x11) 102 while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU1) & 0x11)
101 cpu_relax(); 103 cpu_relax();
102} 104}
103 105
@@ -111,7 +113,8 @@ static void set_apll(unsigned int index)
111 113
112 do { 114 do {
113 cpu_relax(); 115 cpu_relax();
114 tmp = (__raw_readl(EXYNOS5_CLKMUX_STATCPU) >> 16); 116 tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU)
117 >> 16);
115 tmp &= 0x7; 118 tmp &= 0x7;
116 } while (tmp != 0x2); 119 } while (tmp != 0x2);
117 120
@@ -122,7 +125,7 @@ static void set_apll(unsigned int index)
122 125
123 do { 126 do {
124 cpu_relax(); 127 cpu_relax();
125 tmp = __raw_readl(EXYNOS5_CLKMUX_STATCPU); 128 tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU);
126 tmp &= (0x7 << 16); 129 tmp &= (0x7 << 16);
127 } while (tmp != (0x1 << 16)); 130 } while (tmp != (0x1 << 16));
128} 131}
@@ -141,8 +144,30 @@ static void exynos5250_set_frequency(unsigned int old_index,
141 144
142int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) 145int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
143{ 146{
147 struct device_node *np;
144 unsigned long rate; 148 unsigned long rate;
145 149
150 /*
151 * HACK: This is a temporary workaround to get access to clock
152 * controller registers directly and remove static mappings and
153 * dependencies on platform headers. It is necessary to enable
154 * Exynos multi-platform support and will be removed together with
155 * this whole driver as soon as Exynos gets migrated to use
156 * cpufreq-cpu0 driver.
157 */
158 np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock");
159 if (!np) {
160 pr_err("%s: failed to find clock controller DT node\n",
161 __func__);
162 return -ENODEV;
163 }
164
165 info->cmu_regs = of_iomap(np, 0);
166 if (!info->cmu_regs) {
167 pr_err("%s: failed to map CMU registers\n", __func__);
168 return -EFAULT;
169 }
170
146 cpu_clk = clk_get(NULL, "armclk"); 171 cpu_clk = clk_get(NULL, "armclk");
147 if (IS_ERR(cpu_clk)) 172 if (IS_ERR(cpu_clk))
148 return PTR_ERR(cpu_clk); 173 return PTR_ERR(cpu_clk);
@@ -169,6 +194,8 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
169 info->freq_table = exynos5250_freq_table; 194 info->freq_table = exynos5250_freq_table;
170 info->set_freq = exynos5250_set_frequency; 195 info->set_freq = exynos5250_set_frequency;
171 196
197 cpufreq = info;
198
172 return 0; 199 return 0;
173 200
174err_mout_apll: 201err_mout_apll:
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 5bb94780d377..ae1d78ea7df7 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -49,3 +49,9 @@ config ARM_AT91_CPUIDLE
49 depends on ARCH_AT91 49 depends on ARCH_AT91
50 help 50 help
51 Select this to enable cpuidle for AT91 processors 51 Select this to enable cpuidle for AT91 processors
52
53config ARM_EXYNOS_CPUIDLE
54 bool "Cpu Idle Driver for the Exynos processors"
55 depends on ARCH_EXYNOS
56 help
57 Select this to enable cpuidle for Exynos processors
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 9902d052bd87..cd3ab59f8461 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o
14obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o 14obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
15obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o 15obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
16obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o 16obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
17obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o
17 18
18############################################################################### 19###############################################################################
19# POWERPC drivers 20# POWERPC drivers
diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c
new file mode 100644
index 000000000000..7c0151263828
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-exynos.c
@@ -0,0 +1,99 @@
1/* linux/arch/arm/mach-exynos/cpuidle.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9*/
10
11#include <linux/cpuidle.h>
12#include <linux/cpu_pm.h>
13#include <linux/export.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16
17#include <asm/proc-fns.h>
18#include <asm/suspend.h>
19#include <asm/cpuidle.h>
20
21static void (*exynos_enter_aftr)(void);
22
23static int idle_finisher(unsigned long flags)
24{
25 exynos_enter_aftr();
26 cpu_do_idle();
27
28 return 1;
29}
30
31static int exynos_enter_core0_aftr(struct cpuidle_device *dev,
32 struct cpuidle_driver *drv,
33 int index)
34{
35 cpu_pm_enter();
36 cpu_suspend(0, idle_finisher);
37 cpu_pm_exit();
38
39 return index;
40}
41
42static int exynos_enter_lowpower(struct cpuidle_device *dev,
43 struct cpuidle_driver *drv,
44 int index)
45{
46 int new_index = index;
47
48 /* AFTR can only be entered when cores other than CPU0 are offline */
49 if (num_online_cpus() > 1 || dev->cpu != 0)
50 new_index = drv->safe_state_index;
51
52 if (new_index == 0)
53 return arm_cpuidle_simple_enter(dev, drv, new_index);
54 else
55 return exynos_enter_core0_aftr(dev, drv, new_index);
56}
57
58static struct cpuidle_driver exynos_idle_driver = {
59 .name = "exynos_idle",
60 .owner = THIS_MODULE,
61 .states = {
62 [0] = ARM_CPUIDLE_WFI_STATE,
63 [1] = {
64 .enter = exynos_enter_lowpower,
65 .exit_latency = 300,
66 .target_residency = 100000,
67 .flags = CPUIDLE_FLAG_TIME_VALID,
68 .name = "C1",
69 .desc = "ARM power down",
70 },
71 },
72 .state_count = 2,
73 .safe_state_index = 0,
74};
75
76static int exynos_cpuidle_probe(struct platform_device *pdev)
77{
78 int ret;
79
80 exynos_enter_aftr = (void *)(pdev->dev.platform_data);
81
82 ret = cpuidle_register(&exynos_idle_driver, NULL);
83 if (ret) {
84 dev_err(&pdev->dev, "failed to register cpuidle driver\n");
85 return ret;
86 }
87
88 return 0;
89}
90
91static struct platform_driver exynos_cpuidle_driver = {
92 .probe = exynos_cpuidle_probe,
93 .driver = {
94 .name = "exynos_cpuidle",
95 .owner = THIS_MODULE,
96 },
97};
98
99module_platform_driver(exynos_cpuidle_driver);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 926360c2db6a..d08c4dedef35 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -57,14 +57,48 @@
57#define EDMA_MAX_SLOTS MAX_NR_SG 57#define EDMA_MAX_SLOTS MAX_NR_SG
58#define EDMA_DESCRIPTORS 16 58#define EDMA_DESCRIPTORS 16
59 59
60struct edma_pset {
61 u32 len;
62 dma_addr_t addr;
63 struct edmacc_param param;
64};
65
60struct edma_desc { 66struct edma_desc {
61 struct virt_dma_desc vdesc; 67 struct virt_dma_desc vdesc;
62 struct list_head node; 68 struct list_head node;
69 enum dma_transfer_direction direction;
63 int cyclic; 70 int cyclic;
64 int absync; 71 int absync;
65 int pset_nr; 72 int pset_nr;
73 struct edma_chan *echan;
66 int processed; 74 int processed;
67 struct edmacc_param pset[0]; 75
76 /*
77 * The following 4 elements are used for residue accounting.
78 *
79 * - processed_stat: the number of SG elements we have traversed
80 * so far to cover accounting. This is updated directly to processed
81 * during edma_callback and is always <= processed, because processed
82 * refers to the number of pending transfer (programmed to EDMA
83 * controller), where as processed_stat tracks number of transfers
84 * accounted for so far.
85 *
86 * - residue: The amount of bytes we have left to transfer for this desc
87 *
88 * - residue_stat: The residue in bytes of data we have covered
89 * so far for accounting. This is updated directly to residue
90 * during callbacks to keep it current.
91 *
92 * - sg_len: Tracks the length of the current intermediate transfer,
93 * this is required to update the residue during intermediate transfer
94 * completion callback.
95 */
96 int processed_stat;
97 u32 sg_len;
98 u32 residue;
99 u32 residue_stat;
100
101 struct edma_pset pset[0];
68}; 102};
69 103
70struct edma_cc; 104struct edma_cc;
@@ -136,12 +170,14 @@ static void edma_execute(struct edma_chan *echan)
136 /* Find out how many left */ 170 /* Find out how many left */
137 left = edesc->pset_nr - edesc->processed; 171 left = edesc->pset_nr - edesc->processed;
138 nslots = min(MAX_NR_SG, left); 172 nslots = min(MAX_NR_SG, left);
173 edesc->sg_len = 0;
139 174
140 /* Write descriptor PaRAM set(s) */ 175 /* Write descriptor PaRAM set(s) */
141 for (i = 0; i < nslots; i++) { 176 for (i = 0; i < nslots; i++) {
142 j = i + edesc->processed; 177 j = i + edesc->processed;
143 edma_write_slot(echan->slot[i], &edesc->pset[j]); 178 edma_write_slot(echan->slot[i], &edesc->pset[j].param);
144 dev_dbg(echan->vchan.chan.device->dev, 179 edesc->sg_len += edesc->pset[j].len;
180 dev_vdbg(echan->vchan.chan.device->dev,
145 "\n pset[%d]:\n" 181 "\n pset[%d]:\n"
146 " chnum\t%d\n" 182 " chnum\t%d\n"
147 " slot\t%d\n" 183 " slot\t%d\n"
@@ -154,14 +190,14 @@ static void edma_execute(struct edma_chan *echan)
154 " cidx\t%08x\n" 190 " cidx\t%08x\n"
155 " lkrld\t%08x\n", 191 " lkrld\t%08x\n",
156 j, echan->ch_num, echan->slot[i], 192 j, echan->ch_num, echan->slot[i],
157 edesc->pset[j].opt, 193 edesc->pset[j].param.opt,
158 edesc->pset[j].src, 194 edesc->pset[j].param.src,
159 edesc->pset[j].dst, 195 edesc->pset[j].param.dst,
160 edesc->pset[j].a_b_cnt, 196 edesc->pset[j].param.a_b_cnt,
161 edesc->pset[j].ccnt, 197 edesc->pset[j].param.ccnt,
162 edesc->pset[j].src_dst_bidx, 198 edesc->pset[j].param.src_dst_bidx,
163 edesc->pset[j].src_dst_cidx, 199 edesc->pset[j].param.src_dst_cidx,
164 edesc->pset[j].link_bcntrld); 200 edesc->pset[j].param.link_bcntrld);
165 /* Link to the previous slot if not the last set */ 201 /* Link to the previous slot if not the last set */
166 if (i != (nslots - 1)) 202 if (i != (nslots - 1))
167 edma_link(echan->slot[i], echan->slot[i+1]); 203 edma_link(echan->slot[i], echan->slot[i+1]);
@@ -183,7 +219,8 @@ static void edma_execute(struct edma_chan *echan)
183 } 219 }
184 220
185 if (edesc->processed <= MAX_NR_SG) { 221 if (edesc->processed <= MAX_NR_SG) {
186 dev_dbg(dev, "first transfer starting %d\n", echan->ch_num); 222 dev_dbg(dev, "first transfer starting on channel %d\n",
223 echan->ch_num);
187 edma_start(echan->ch_num); 224 edma_start(echan->ch_num);
188 } else { 225 } else {
189 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", 226 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
@@ -197,7 +234,7 @@ static void edma_execute(struct edma_chan *echan)
197 * MAX_NR_SG 234 * MAX_NR_SG
198 */ 235 */
199 if (echan->missed) { 236 if (echan->missed) {
200 dev_dbg(dev, "missed event in execute detected\n"); 237 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
201 edma_clean_channel(echan->ch_num); 238 edma_clean_channel(echan->ch_num);
202 edma_stop(echan->ch_num); 239 edma_stop(echan->ch_num);
203 edma_start(echan->ch_num); 240 edma_start(echan->ch_num);
@@ -242,6 +279,26 @@ static int edma_slave_config(struct edma_chan *echan,
242 return 0; 279 return 0;
243} 280}
244 281
282static int edma_dma_pause(struct edma_chan *echan)
283{
284 /* Pause/Resume only allowed with cyclic mode */
285 if (!echan->edesc->cyclic)
286 return -EINVAL;
287
288 edma_pause(echan->ch_num);
289 return 0;
290}
291
292static int edma_dma_resume(struct edma_chan *echan)
293{
294 /* Pause/Resume only allowed with cyclic mode */
295 if (!echan->edesc->cyclic)
296 return -EINVAL;
297
298 edma_resume(echan->ch_num);
299 return 0;
300}
301
245static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 302static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
246 unsigned long arg) 303 unsigned long arg)
247{ 304{
@@ -257,6 +314,14 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
257 config = (struct dma_slave_config *)arg; 314 config = (struct dma_slave_config *)arg;
258 ret = edma_slave_config(echan, config); 315 ret = edma_slave_config(echan, config);
259 break; 316 break;
317 case DMA_PAUSE:
318 ret = edma_dma_pause(echan);
319 break;
320
321 case DMA_RESUME:
322 ret = edma_dma_resume(echan);
323 break;
324
260 default: 325 default:
261 ret = -ENOSYS; 326 ret = -ENOSYS;
262 } 327 }
@@ -275,18 +340,23 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
275 * @dma_length: Total length of the DMA transfer 340 * @dma_length: Total length of the DMA transfer
276 * @direction: Direction of the transfer 341 * @direction: Direction of the transfer
277 */ 342 */
278static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, 343static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
279 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, 344 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
280 enum dma_slave_buswidth dev_width, unsigned int dma_length, 345 enum dma_slave_buswidth dev_width, unsigned int dma_length,
281 enum dma_transfer_direction direction) 346 enum dma_transfer_direction direction)
282{ 347{
283 struct edma_chan *echan = to_edma_chan(chan); 348 struct edma_chan *echan = to_edma_chan(chan);
284 struct device *dev = chan->device->dev; 349 struct device *dev = chan->device->dev;
350 struct edmacc_param *param = &epset->param;
285 int acnt, bcnt, ccnt, cidx; 351 int acnt, bcnt, ccnt, cidx;
286 int src_bidx, dst_bidx, src_cidx, dst_cidx; 352 int src_bidx, dst_bidx, src_cidx, dst_cidx;
287 int absync; 353 int absync;
288 354
289 acnt = dev_width; 355 acnt = dev_width;
356
357 /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
358 if (!burst)
359 burst = 1;
290 /* 360 /*
291 * If the maxburst is equal to the fifo width, use 361 * If the maxburst is equal to the fifo width, use
292 * A-synced transfers. This allows for large contiguous 362 * A-synced transfers. This allows for large contiguous
@@ -337,41 +407,50 @@ static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
337 cidx = acnt * bcnt; 407 cidx = acnt * bcnt;
338 } 408 }
339 409
410 epset->len = dma_length;
411
340 if (direction == DMA_MEM_TO_DEV) { 412 if (direction == DMA_MEM_TO_DEV) {
341 src_bidx = acnt; 413 src_bidx = acnt;
342 src_cidx = cidx; 414 src_cidx = cidx;
343 dst_bidx = 0; 415 dst_bidx = 0;
344 dst_cidx = 0; 416 dst_cidx = 0;
417 epset->addr = src_addr;
345 } else if (direction == DMA_DEV_TO_MEM) { 418 } else if (direction == DMA_DEV_TO_MEM) {
346 src_bidx = 0; 419 src_bidx = 0;
347 src_cidx = 0; 420 src_cidx = 0;
348 dst_bidx = acnt; 421 dst_bidx = acnt;
349 dst_cidx = cidx; 422 dst_cidx = cidx;
423 epset->addr = dst_addr;
424 } else if (direction == DMA_MEM_TO_MEM) {
425 src_bidx = acnt;
426 src_cidx = cidx;
427 dst_bidx = acnt;
428 dst_cidx = cidx;
350 } else { 429 } else {
351 dev_err(dev, "%s: direction not implemented yet\n", __func__); 430 dev_err(dev, "%s: direction not implemented yet\n", __func__);
352 return -EINVAL; 431 return -EINVAL;
353 } 432 }
354 433
355 pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); 434 param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
356 /* Configure A or AB synchronized transfers */ 435 /* Configure A or AB synchronized transfers */
357 if (absync) 436 if (absync)
358 pset->opt |= SYNCDIM; 437 param->opt |= SYNCDIM;
359 438
360 pset->src = src_addr; 439 param->src = src_addr;
361 pset->dst = dst_addr; 440 param->dst = dst_addr;
362 441
363 pset->src_dst_bidx = (dst_bidx << 16) | src_bidx; 442 param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
364 pset->src_dst_cidx = (dst_cidx << 16) | src_cidx; 443 param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
365 444
366 pset->a_b_cnt = bcnt << 16 | acnt; 445 param->a_b_cnt = bcnt << 16 | acnt;
367 pset->ccnt = ccnt; 446 param->ccnt = ccnt;
368 /* 447 /*
369 * Only time when (bcntrld) auto reload is required is for 448 * Only time when (bcntrld) auto reload is required is for
370 * A-sync case, and in this case, a requirement of reload value 449 * A-sync case, and in this case, a requirement of reload value
371 * of SZ_64K-1 only is assured. 'link' is initially set to NULL 450 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
372 * and then later will be populated by edma_execute. 451 * and then later will be populated by edma_execute.
373 */ 452 */
374 pset->link_bcntrld = 0xffffffff; 453 param->link_bcntrld = 0xffffffff;
375 return absync; 454 return absync;
376} 455}
377 456
@@ -401,23 +480,26 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
401 dev_width = echan->cfg.dst_addr_width; 480 dev_width = echan->cfg.dst_addr_width;
402 burst = echan->cfg.dst_maxburst; 481 burst = echan->cfg.dst_maxburst;
403 } else { 482 } else {
404 dev_err(dev, "%s: bad direction?\n", __func__); 483 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
405 return NULL; 484 return NULL;
406 } 485 }
407 486
408 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { 487 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
409 dev_err(dev, "Undefined slave buswidth\n"); 488 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
410 return NULL; 489 return NULL;
411 } 490 }
412 491
413 edesc = kzalloc(sizeof(*edesc) + sg_len * 492 edesc = kzalloc(sizeof(*edesc) + sg_len *
414 sizeof(edesc->pset[0]), GFP_ATOMIC); 493 sizeof(edesc->pset[0]), GFP_ATOMIC);
415 if (!edesc) { 494 if (!edesc) {
416 dev_dbg(dev, "Failed to allocate a descriptor\n"); 495 dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
417 return NULL; 496 return NULL;
418 } 497 }
419 498
420 edesc->pset_nr = sg_len; 499 edesc->pset_nr = sg_len;
500 edesc->residue = 0;
501 edesc->direction = direction;
502 edesc->echan = echan;
421 503
422 /* Allocate a PaRAM slot, if needed */ 504 /* Allocate a PaRAM slot, if needed */
423 nslots = min_t(unsigned, MAX_NR_SG, sg_len); 505 nslots = min_t(unsigned, MAX_NR_SG, sg_len);
@@ -429,7 +511,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
429 EDMA_SLOT_ANY); 511 EDMA_SLOT_ANY);
430 if (echan->slot[i] < 0) { 512 if (echan->slot[i] < 0) {
431 kfree(edesc); 513 kfree(edesc);
432 dev_err(dev, "Failed to allocate slot\n"); 514 dev_err(dev, "%s: Failed to allocate slot\n",
515 __func__);
433 return NULL; 516 return NULL;
434 } 517 }
435 } 518 }
@@ -452,16 +535,56 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
452 } 535 }
453 536
454 edesc->absync = ret; 537 edesc->absync = ret;
538 edesc->residue += sg_dma_len(sg);
455 539
456 /* If this is the last in a current SG set of transactions, 540 /* If this is the last in a current SG set of transactions,
457 enable interrupts so that next set is processed */ 541 enable interrupts so that next set is processed */
458 if (!((i+1) % MAX_NR_SG)) 542 if (!((i+1) % MAX_NR_SG))
459 edesc->pset[i].opt |= TCINTEN; 543 edesc->pset[i].param.opt |= TCINTEN;
460 544
461 /* If this is the last set, enable completion interrupt flag */ 545 /* If this is the last set, enable completion interrupt flag */
462 if (i == sg_len - 1) 546 if (i == sg_len - 1)
463 edesc->pset[i].opt |= TCINTEN; 547 edesc->pset[i].param.opt |= TCINTEN;
464 } 548 }
549 edesc->residue_stat = edesc->residue;
550
551 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
552}
553
554struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
555 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
556 size_t len, unsigned long tx_flags)
557{
558 int ret;
559 struct edma_desc *edesc;
560 struct device *dev = chan->device->dev;
561 struct edma_chan *echan = to_edma_chan(chan);
562
563 if (unlikely(!echan || !len))
564 return NULL;
565
566 edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC);
567 if (!edesc) {
568 dev_dbg(dev, "Failed to allocate a descriptor\n");
569 return NULL;
570 }
571
572 edesc->pset_nr = 1;
573
574 ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
575 DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM);
576 if (ret < 0)
577 return NULL;
578
579 edesc->absync = ret;
580
581 /*
582 * Enable intermediate transfer chaining to re-trigger channel
583 * on completion of every TR, and enable transfer-completion
584 * interrupt on completion of the whole transfer.
585 */
586 edesc->pset[0].param.opt |= ITCCHEN;
587 edesc->pset[0].param.opt |= TCINTEN;
465 588
466 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 589 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
467} 590}
@@ -493,12 +616,12 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
493 dev_width = echan->cfg.dst_addr_width; 616 dev_width = echan->cfg.dst_addr_width;
494 burst = echan->cfg.dst_maxburst; 617 burst = echan->cfg.dst_maxburst;
495 } else { 618 } else {
496 dev_err(dev, "%s: bad direction?\n", __func__); 619 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
497 return NULL; 620 return NULL;
498 } 621 }
499 622
500 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { 623 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
501 dev_err(dev, "Undefined slave buswidth\n"); 624 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
502 return NULL; 625 return NULL;
503 } 626 }
504 627
@@ -523,16 +646,18 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
523 edesc = kzalloc(sizeof(*edesc) + nslots * 646 edesc = kzalloc(sizeof(*edesc) + nslots *
524 sizeof(edesc->pset[0]), GFP_ATOMIC); 647 sizeof(edesc->pset[0]), GFP_ATOMIC);
525 if (!edesc) { 648 if (!edesc) {
526 dev_dbg(dev, "Failed to allocate a descriptor\n"); 649 dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
527 return NULL; 650 return NULL;
528 } 651 }
529 652
530 edesc->cyclic = 1; 653 edesc->cyclic = 1;
531 edesc->pset_nr = nslots; 654 edesc->pset_nr = nslots;
655 edesc->residue = edesc->residue_stat = buf_len;
656 edesc->direction = direction;
657 edesc->echan = echan;
532 658
533 dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots); 659 dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
534 dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len); 660 __func__, echan->ch_num, nslots, period_len, buf_len);
535 dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len);
536 661
537 for (i = 0; i < nslots; i++) { 662 for (i = 0; i < nslots; i++) {
538 /* Allocate a PaRAM slot, if needed */ 663 /* Allocate a PaRAM slot, if needed */
@@ -542,7 +667,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
542 EDMA_SLOT_ANY); 667 EDMA_SLOT_ANY);
543 if (echan->slot[i] < 0) { 668 if (echan->slot[i] < 0) {
544 kfree(edesc); 669 kfree(edesc);
545 dev_err(dev, "Failed to allocate slot\n"); 670 dev_err(dev, "%s: Failed to allocate slot\n",
671 __func__);
546 return NULL; 672 return NULL;
547 } 673 }
548 } 674 }
@@ -566,8 +692,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
566 else 692 else
567 src_addr += period_len; 693 src_addr += period_len;
568 694
569 dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i); 695 dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
570 dev_dbg(dev, 696 dev_vdbg(dev,
571 "\n pset[%d]:\n" 697 "\n pset[%d]:\n"
572 " chnum\t%d\n" 698 " chnum\t%d\n"
573 " slot\t%d\n" 699 " slot\t%d\n"
@@ -580,14 +706,14 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
580 " cidx\t%08x\n" 706 " cidx\t%08x\n"
581 " lkrld\t%08x\n", 707 " lkrld\t%08x\n",
582 i, echan->ch_num, echan->slot[i], 708 i, echan->ch_num, echan->slot[i],
583 edesc->pset[i].opt, 709 edesc->pset[i].param.opt,
584 edesc->pset[i].src, 710 edesc->pset[i].param.src,
585 edesc->pset[i].dst, 711 edesc->pset[i].param.dst,
586 edesc->pset[i].a_b_cnt, 712 edesc->pset[i].param.a_b_cnt,
587 edesc->pset[i].ccnt, 713 edesc->pset[i].param.ccnt,
588 edesc->pset[i].src_dst_bidx, 714 edesc->pset[i].param.src_dst_bidx,
589 edesc->pset[i].src_dst_cidx, 715 edesc->pset[i].param.src_dst_cidx,
590 edesc->pset[i].link_bcntrld); 716 edesc->pset[i].param.link_bcntrld);
591 717
592 edesc->absync = ret; 718 edesc->absync = ret;
593 719
@@ -595,7 +721,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
595 * Enable interrupts for every period because callback 721 * Enable interrupts for every period because callback
596 * has to be called for every period. 722 * has to be called for every period.
597 */ 723 */
598 edesc->pset[i].opt |= TCINTEN; 724 edesc->pset[i].param.opt |= TCINTEN;
599 } 725 }
600 726
601 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 727 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
@@ -606,7 +732,6 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
606 struct edma_chan *echan = data; 732 struct edma_chan *echan = data;
607 struct device *dev = echan->vchan.chan.device->dev; 733 struct device *dev = echan->vchan.chan.device->dev;
608 struct edma_desc *edesc; 734 struct edma_desc *edesc;
609 unsigned long flags;
610 struct edmacc_param p; 735 struct edmacc_param p;
611 736
612 edesc = echan->edesc; 737 edesc = echan->edesc;
@@ -617,27 +742,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
617 742
618 switch (ch_status) { 743 switch (ch_status) {
619 case EDMA_DMA_COMPLETE: 744 case EDMA_DMA_COMPLETE:
620 spin_lock_irqsave(&echan->vchan.lock, flags); 745 spin_lock(&echan->vchan.lock);
621 746
622 if (edesc) { 747 if (edesc) {
623 if (edesc->cyclic) { 748 if (edesc->cyclic) {
624 vchan_cyclic_callback(&edesc->vdesc); 749 vchan_cyclic_callback(&edesc->vdesc);
625 } else if (edesc->processed == edesc->pset_nr) { 750 } else if (edesc->processed == edesc->pset_nr) {
626 dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); 751 dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
752 edesc->residue = 0;
627 edma_stop(echan->ch_num); 753 edma_stop(echan->ch_num);
628 vchan_cookie_complete(&edesc->vdesc); 754 vchan_cookie_complete(&edesc->vdesc);
629 edma_execute(echan); 755 edma_execute(echan);
630 } else { 756 } else {
631 dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); 757 dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
758
759 /* Update statistics for tx_status */
760 edesc->residue -= edesc->sg_len;
761 edesc->residue_stat = edesc->residue;
762 edesc->processed_stat = edesc->processed;
763
632 edma_execute(echan); 764 edma_execute(echan);
633 } 765 }
634 } 766 }
635 767
636 spin_unlock_irqrestore(&echan->vchan.lock, flags); 768 spin_unlock(&echan->vchan.lock);
637 769
638 break; 770 break;
639 case EDMA_DMA_CC_ERROR: 771 case EDMA_DMA_CC_ERROR:
640 spin_lock_irqsave(&echan->vchan.lock, flags); 772 spin_lock(&echan->vchan.lock);
641 773
642 edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); 774 edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
643 775
@@ -668,7 +800,7 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
668 edma_trigger_channel(echan->ch_num); 800 edma_trigger_channel(echan->ch_num);
669 } 801 }
670 802
671 spin_unlock_irqrestore(&echan->vchan.lock, flags); 803 spin_unlock(&echan->vchan.lock);
672 804
673 break; 805 break;
674 default: 806 default:
@@ -704,7 +836,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
704 echan->alloced = true; 836 echan->alloced = true;
705 echan->slot[0] = echan->ch_num; 837 echan->slot[0] = echan->ch_num;
706 838
707 dev_dbg(dev, "allocated channel for %u:%u\n", 839 dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num,
708 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); 840 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
709 841
710 return 0; 842 return 0;
@@ -756,23 +888,52 @@ static void edma_issue_pending(struct dma_chan *chan)
756 spin_unlock_irqrestore(&echan->vchan.lock, flags); 888 spin_unlock_irqrestore(&echan->vchan.lock, flags);
757} 889}
758 890
759static size_t edma_desc_size(struct edma_desc *edesc) 891static u32 edma_residue(struct edma_desc *edesc)
760{ 892{
893 bool dst = edesc->direction == DMA_DEV_TO_MEM;
894 struct edma_pset *pset = edesc->pset;
895 dma_addr_t done, pos;
761 int i; 896 int i;
762 size_t size; 897
763 898 /*
764 if (edesc->absync) 899 * We always read the dst/src position from the first RamPar
765 for (size = i = 0; i < edesc->pset_nr; i++) 900 * pset. That's the one which is active now.
766 size += (edesc->pset[i].a_b_cnt & 0xffff) * 901 */
767 (edesc->pset[i].a_b_cnt >> 16) * 902 pos = edma_get_position(edesc->echan->slot[0], dst);
768 edesc->pset[i].ccnt; 903
769 else 904 /*
770 size = (edesc->pset[0].a_b_cnt & 0xffff) * 905 * Cyclic is simple. Just subtract pset[0].addr from pos.
771 (edesc->pset[0].a_b_cnt >> 16) + 906 *
772 (edesc->pset[0].a_b_cnt & 0xffff) * 907 * We never update edesc->residue in the cyclic case, so we
773 (SZ_64K - 1) * edesc->pset[0].ccnt; 908 * can tell the remaining room to the end of the circular
774 909 * buffer.
775 return size; 910 */
911 if (edesc->cyclic) {
912 done = pos - pset->addr;
913 edesc->residue_stat = edesc->residue - done;
914 return edesc->residue_stat;
915 }
916
917 /*
918 * For SG operation we catch up with the last processed
919 * status.
920 */
921 pset += edesc->processed_stat;
922
923 for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
924 /*
925 * If we are inside this pset address range, we know
926 * this is the active one. Get the current delta and
927 * stop walking the psets.
928 */
929 if (pos >= pset->addr && pos < pset->addr + pset->len)
930 return edesc->residue_stat - (pos - pset->addr);
931
932 /* Otherwise mark it done and update residue_stat. */
933 edesc->processed_stat++;
934 edesc->residue_stat -= pset->len;
935 }
936 return edesc->residue_stat;
776} 937}
777 938
778/* Check request completion status */ 939/* Check request completion status */
@@ -790,13 +951,10 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
790 return ret; 951 return ret;
791 952
792 spin_lock_irqsave(&echan->vchan.lock, flags); 953 spin_lock_irqsave(&echan->vchan.lock, flags);
793 vdesc = vchan_find_desc(&echan->vchan, cookie); 954 if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
794 if (vdesc) { 955 txstate->residue = edma_residue(echan->edesc);
795 txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); 956 else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
796 } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { 957 txstate->residue = to_edma_desc(&vdesc->tx)->residue;
797 struct edma_desc *edesc = echan->edesc;
798 txstate->residue = edma_desc_size(edesc);
799 }
800 spin_unlock_irqrestore(&echan->vchan.lock, flags); 958 spin_unlock_irqrestore(&echan->vchan.lock, flags);
801 959
802 return ret; 960 return ret;
@@ -822,18 +980,43 @@ static void __init edma_chan_init(struct edma_cc *ecc,
822 } 980 }
823} 981}
824 982
983#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
984 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
985 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
986
987static int edma_dma_device_slave_caps(struct dma_chan *dchan,
988 struct dma_slave_caps *caps)
989{
990 caps->src_addr_widths = EDMA_DMA_BUSWIDTHS;
991 caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS;
992 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
993 caps->cmd_pause = true;
994 caps->cmd_terminate = true;
995 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
996
997 return 0;
998}
999
825static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, 1000static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
826 struct device *dev) 1001 struct device *dev)
827{ 1002{
828 dma->device_prep_slave_sg = edma_prep_slave_sg; 1003 dma->device_prep_slave_sg = edma_prep_slave_sg;
829 dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; 1004 dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
1005 dma->device_prep_dma_memcpy = edma_prep_dma_memcpy;
830 dma->device_alloc_chan_resources = edma_alloc_chan_resources; 1006 dma->device_alloc_chan_resources = edma_alloc_chan_resources;
831 dma->device_free_chan_resources = edma_free_chan_resources; 1007 dma->device_free_chan_resources = edma_free_chan_resources;
832 dma->device_issue_pending = edma_issue_pending; 1008 dma->device_issue_pending = edma_issue_pending;
833 dma->device_tx_status = edma_tx_status; 1009 dma->device_tx_status = edma_tx_status;
834 dma->device_control = edma_control; 1010 dma->device_control = edma_control;
1011 dma->device_slave_caps = edma_dma_device_slave_caps;
835 dma->dev = dev; 1012 dma->dev = dev;
836 1013
1014 /*
1015 * code using dma memcpy must make sure alignment of
1016 * length is at dma->copy_align boundary.
1017 */
1018 dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES;
1019
837 INIT_LIST_HEAD(&dma->channels); 1020 INIT_LIST_HEAD(&dma->channels);
838} 1021}
839 1022
@@ -861,6 +1044,8 @@ static int edma_probe(struct platform_device *pdev)
861 1044
862 dma_cap_zero(ecc->dma_slave.cap_mask); 1045 dma_cap_zero(ecc->dma_slave.cap_mask);
863 dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); 1046 dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
1047 dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask);
1048 dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask);
864 1049
865 edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); 1050 edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
866 1051
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
index b59a17fb7c3e..ff7138fd66d1 100644
--- a/drivers/memory/mvebu-devbus.c
+++ b/drivers/memory/mvebu-devbus.c
@@ -2,7 +2,7 @@
2 * Marvell EBU SoC Device Bus Controller 2 * Marvell EBU SoC Device Bus Controller
3 * (memory controller for NOR/NAND/SRAM/FPGA devices) 3 * (memory controller for NOR/NAND/SRAM/FPGA devices)
4 * 4 *
5 * Copyright (C) 2013 Marvell 5 * Copyright (C) 2013-2014 Marvell
6 * 6 *
7 * This program is free software: you can redistribute it and/or modify 7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -30,19 +30,47 @@
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31 31
32/* Register definitions */ 32/* Register definitions */
33#define DEV_WIDTH_BIT 30 33#define ARMADA_DEV_WIDTH_SHIFT 30
34#define BADR_SKEW_BIT 28 34#define ARMADA_BADR_SKEW_SHIFT 28
35#define RD_HOLD_BIT 23 35#define ARMADA_RD_HOLD_SHIFT 23
36#define ACC_NEXT_BIT 17 36#define ARMADA_ACC_NEXT_SHIFT 17
37#define RD_SETUP_BIT 12 37#define ARMADA_RD_SETUP_SHIFT 12
38#define ACC_FIRST_BIT 6 38#define ARMADA_ACC_FIRST_SHIFT 6
39 39
40#define SYNC_ENABLE_BIT 24 40#define ARMADA_SYNC_ENABLE_SHIFT 24
41#define WR_HIGH_BIT 16 41#define ARMADA_WR_HIGH_SHIFT 16
42#define WR_LOW_BIT 8 42#define ARMADA_WR_LOW_SHIFT 8
43 43
44#define READ_PARAM_OFFSET 0x0 44#define ARMADA_READ_PARAM_OFFSET 0x0
45#define WRITE_PARAM_OFFSET 0x4 45#define ARMADA_WRITE_PARAM_OFFSET 0x4
46
47#define ORION_RESERVED (0x2 << 30)
48#define ORION_BADR_SKEW_SHIFT 28
49#define ORION_WR_HIGH_EXT_BIT BIT(27)
50#define ORION_WR_HIGH_EXT_MASK 0x8
51#define ORION_WR_LOW_EXT_BIT BIT(26)
52#define ORION_WR_LOW_EXT_MASK 0x8
53#define ORION_ALE_WR_EXT_BIT BIT(25)
54#define ORION_ALE_WR_EXT_MASK 0x8
55#define ORION_ACC_NEXT_EXT_BIT BIT(24)
56#define ORION_ACC_NEXT_EXT_MASK 0x10
57#define ORION_ACC_FIRST_EXT_BIT BIT(23)
58#define ORION_ACC_FIRST_EXT_MASK 0x10
59#define ORION_TURN_OFF_EXT_BIT BIT(22)
60#define ORION_TURN_OFF_EXT_MASK 0x8
61#define ORION_DEV_WIDTH_SHIFT 20
62#define ORION_WR_HIGH_SHIFT 17
63#define ORION_WR_HIGH_MASK 0x7
64#define ORION_WR_LOW_SHIFT 14
65#define ORION_WR_LOW_MASK 0x7
66#define ORION_ALE_WR_SHIFT 11
67#define ORION_ALE_WR_MASK 0x7
68#define ORION_ACC_NEXT_SHIFT 7
69#define ORION_ACC_NEXT_MASK 0xF
70#define ORION_ACC_FIRST_SHIFT 3
71#define ORION_ACC_FIRST_MASK 0xF
72#define ORION_TURN_OFF_SHIFT 0
73#define ORION_TURN_OFF_MASK 0x7
46 74
47struct devbus_read_params { 75struct devbus_read_params {
48 u32 bus_width; 76 u32 bus_width;
@@ -89,19 +117,14 @@ static int get_timing_param_ps(struct devbus *devbus,
89 return 0; 117 return 0;
90} 118}
91 119
92static int devbus_set_timing_params(struct devbus *devbus, 120static int devbus_get_timing_params(struct devbus *devbus,
93 struct device_node *node) 121 struct device_node *node,
122 struct devbus_read_params *r,
123 struct devbus_write_params *w)
94{ 124{
95 struct devbus_read_params r;
96 struct devbus_write_params w;
97 u32 value;
98 int err; 125 int err;
99 126
100 dev_dbg(devbus->dev, "Setting timing parameter, tick is %lu ps\n", 127 err = of_property_read_u32(node, "devbus,bus-width", &r->bus_width);
101 devbus->tick_ps);
102
103 /* Get read timings */
104 err = of_property_read_u32(node, "devbus,bus-width", &r.bus_width);
105 if (err < 0) { 128 if (err < 0) {
106 dev_err(devbus->dev, 129 dev_err(devbus->dev,
107 "%s has no 'devbus,bus-width' property\n", 130 "%s has no 'devbus,bus-width' property\n",
@@ -113,104 +136,148 @@ static int devbus_set_timing_params(struct devbus *devbus,
113 * The bus width is encoded into the register as 0 for 8 bits, 136 * The bus width is encoded into the register as 0 for 8 bits,
114 * and 1 for 16 bits, so we do the necessary conversion here. 137 * and 1 for 16 bits, so we do the necessary conversion here.
115 */ 138 */
116 if (r.bus_width == 8) 139 if (r->bus_width == 8)
117 r.bus_width = 0; 140 r->bus_width = 0;
118 else if (r.bus_width == 16) 141 else if (r->bus_width == 16)
119 r.bus_width = 1; 142 r->bus_width = 1;
120 else { 143 else {
121 dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width); 144 dev_err(devbus->dev, "invalid bus width %d\n", r->bus_width);
122 return -EINVAL; 145 return -EINVAL;
123 } 146 }
124 147
125 err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps", 148 err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
126 &r.badr_skew); 149 &r->badr_skew);
127 if (err < 0) 150 if (err < 0)
128 return err; 151 return err;
129 152
130 err = get_timing_param_ps(devbus, node, "devbus,turn-off-ps", 153 err = get_timing_param_ps(devbus, node, "devbus,turn-off-ps",
131 &r.turn_off); 154 &r->turn_off);
132 if (err < 0) 155 if (err < 0)
133 return err; 156 return err;
134 157
135 err = get_timing_param_ps(devbus, node, "devbus,acc-first-ps", 158 err = get_timing_param_ps(devbus, node, "devbus,acc-first-ps",
136 &r.acc_first); 159 &r->acc_first);
137 if (err < 0) 160 if (err < 0)
138 return err; 161 return err;
139 162
140 err = get_timing_param_ps(devbus, node, "devbus,acc-next-ps", 163 err = get_timing_param_ps(devbus, node, "devbus,acc-next-ps",
141 &r.acc_next); 164 &r->acc_next);
142 if (err < 0)
143 return err;
144
145 err = get_timing_param_ps(devbus, node, "devbus,rd-setup-ps",
146 &r.rd_setup);
147 if (err < 0) 165 if (err < 0)
148 return err; 166 return err;
149 167
150 err = get_timing_param_ps(devbus, node, "devbus,rd-hold-ps", 168 if (of_device_is_compatible(devbus->dev->of_node, "marvell,mvebu-devbus")) {
151 &r.rd_hold); 169 err = get_timing_param_ps(devbus, node, "devbus,rd-setup-ps",
152 if (err < 0) 170 &r->rd_setup);
153 return err; 171 if (err < 0)
154 172 return err;
155 /* Get write timings */ 173
156 err = of_property_read_u32(node, "devbus,sync-enable", 174 err = get_timing_param_ps(devbus, node, "devbus,rd-hold-ps",
157 &w.sync_enable); 175 &r->rd_hold);
158 if (err < 0) { 176 if (err < 0)
159 dev_err(devbus->dev, 177 return err;
160 "%s has no 'devbus,sync-enable' property\n", 178
161 node->full_name); 179 err = of_property_read_u32(node, "devbus,sync-enable",
162 return err; 180 &w->sync_enable);
181 if (err < 0) {
182 dev_err(devbus->dev,
183 "%s has no 'devbus,sync-enable' property\n",
184 node->full_name);
185 return err;
186 }
163 } 187 }
164 188
165 err = get_timing_param_ps(devbus, node, "devbus,ale-wr-ps", 189 err = get_timing_param_ps(devbus, node, "devbus,ale-wr-ps",
166 &w.ale_wr); 190 &w->ale_wr);
167 if (err < 0) 191 if (err < 0)
168 return err; 192 return err;
169 193
170 err = get_timing_param_ps(devbus, node, "devbus,wr-low-ps", 194 err = get_timing_param_ps(devbus, node, "devbus,wr-low-ps",
171 &w.wr_low); 195 &w->wr_low);
172 if (err < 0) 196 if (err < 0)
173 return err; 197 return err;
174 198
175 err = get_timing_param_ps(devbus, node, "devbus,wr-high-ps", 199 err = get_timing_param_ps(devbus, node, "devbus,wr-high-ps",
176 &w.wr_high); 200 &w->wr_high);
177 if (err < 0) 201 if (err < 0)
178 return err; 202 return err;
179 203
204 return 0;
205}
206
207static void devbus_orion_set_timing_params(struct devbus *devbus,
208 struct device_node *node,
209 struct devbus_read_params *r,
210 struct devbus_write_params *w)
211{
212 u32 value;
213
214 /*
215 * The hardware designers found it would be a good idea to
216 * split most of the values in the register into two fields:
217 * one containing all the low-order bits, and another one
218 * containing just the high-order bit. For all of those
219 * fields, we have to split the value into these two parts.
220 */
221 value = (r->turn_off & ORION_TURN_OFF_MASK) << ORION_TURN_OFF_SHIFT |
222 (r->acc_first & ORION_ACC_FIRST_MASK) << ORION_ACC_FIRST_SHIFT |
223 (r->acc_next & ORION_ACC_NEXT_MASK) << ORION_ACC_NEXT_SHIFT |
224 (w->ale_wr & ORION_ALE_WR_MASK) << ORION_ALE_WR_SHIFT |
225 (w->wr_low & ORION_WR_LOW_MASK) << ORION_WR_LOW_SHIFT |
226 (w->wr_high & ORION_WR_HIGH_MASK) << ORION_WR_HIGH_SHIFT |
227 r->bus_width << ORION_DEV_WIDTH_SHIFT |
228 ((r->turn_off & ORION_TURN_OFF_EXT_MASK) ? ORION_TURN_OFF_EXT_BIT : 0) |
229 ((r->acc_first & ORION_ACC_FIRST_EXT_MASK) ? ORION_ACC_FIRST_EXT_BIT : 0) |
230 ((r->acc_next & ORION_ACC_NEXT_EXT_MASK) ? ORION_ACC_NEXT_EXT_BIT : 0) |
231 ((w->ale_wr & ORION_ALE_WR_EXT_MASK) ? ORION_ALE_WR_EXT_BIT : 0) |
232 ((w->wr_low & ORION_WR_LOW_EXT_MASK) ? ORION_WR_LOW_EXT_BIT : 0) |
233 ((w->wr_high & ORION_WR_HIGH_EXT_MASK) ? ORION_WR_HIGH_EXT_BIT : 0) |
234 (r->badr_skew << ORION_BADR_SKEW_SHIFT) |
235 ORION_RESERVED;
236
237 writel(value, devbus->base);
238}
239
240static void devbus_armada_set_timing_params(struct devbus *devbus,
241 struct device_node *node,
242 struct devbus_read_params *r,
243 struct devbus_write_params *w)
244{
245 u32 value;
246
180 /* Set read timings */ 247 /* Set read timings */
181 value = r.bus_width << DEV_WIDTH_BIT | 248 value = r->bus_width << ARMADA_DEV_WIDTH_SHIFT |
182 r.badr_skew << BADR_SKEW_BIT | 249 r->badr_skew << ARMADA_BADR_SKEW_SHIFT |
183 r.rd_hold << RD_HOLD_BIT | 250 r->rd_hold << ARMADA_RD_HOLD_SHIFT |
184 r.acc_next << ACC_NEXT_BIT | 251 r->acc_next << ARMADA_ACC_NEXT_SHIFT |
185 r.rd_setup << RD_SETUP_BIT | 252 r->rd_setup << ARMADA_RD_SETUP_SHIFT |
186 r.acc_first << ACC_FIRST_BIT | 253 r->acc_first << ARMADA_ACC_FIRST_SHIFT |
187 r.turn_off; 254 r->turn_off;
188 255
189 dev_dbg(devbus->dev, "read parameters register 0x%p = 0x%x\n", 256 dev_dbg(devbus->dev, "read parameters register 0x%p = 0x%x\n",
190 devbus->base + READ_PARAM_OFFSET, 257 devbus->base + ARMADA_READ_PARAM_OFFSET,
191 value); 258 value);
192 259
193 writel(value, devbus->base + READ_PARAM_OFFSET); 260 writel(value, devbus->base + ARMADA_READ_PARAM_OFFSET);
194 261
195 /* Set write timings */ 262 /* Set write timings */
196 value = w.sync_enable << SYNC_ENABLE_BIT | 263 value = w->sync_enable << ARMADA_SYNC_ENABLE_SHIFT |
197 w.wr_low << WR_LOW_BIT | 264 w->wr_low << ARMADA_WR_LOW_SHIFT |
198 w.wr_high << WR_HIGH_BIT | 265 w->wr_high << ARMADA_WR_HIGH_SHIFT |
199 w.ale_wr; 266 w->ale_wr;
200 267
201 dev_dbg(devbus->dev, "write parameters register: 0x%p = 0x%x\n", 268 dev_dbg(devbus->dev, "write parameters register: 0x%p = 0x%x\n",
202 devbus->base + WRITE_PARAM_OFFSET, 269 devbus->base + ARMADA_WRITE_PARAM_OFFSET,
203 value); 270 value);
204 271
205 writel(value, devbus->base + WRITE_PARAM_OFFSET); 272 writel(value, devbus->base + ARMADA_WRITE_PARAM_OFFSET);
206
207 return 0;
208} 273}
209 274
210static int mvebu_devbus_probe(struct platform_device *pdev) 275static int mvebu_devbus_probe(struct platform_device *pdev)
211{ 276{
212 struct device *dev = &pdev->dev; 277 struct device *dev = &pdev->dev;
213 struct device_node *node = pdev->dev.of_node; 278 struct device_node *node = pdev->dev.of_node;
279 struct devbus_read_params r;
280 struct devbus_write_params w;
214 struct devbus *devbus; 281 struct devbus *devbus;
215 struct resource *res; 282 struct resource *res;
216 struct clk *clk; 283 struct clk *clk;
@@ -240,10 +307,21 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
240 rate = clk_get_rate(clk) / 1000; 307 rate = clk_get_rate(clk) / 1000;
241 devbus->tick_ps = 1000000000 / rate; 308 devbus->tick_ps = 1000000000 / rate;
242 309
243 /* Read the device tree node and set the new timing parameters */ 310 dev_dbg(devbus->dev, "Setting timing parameter, tick is %lu ps\n",
244 err = devbus_set_timing_params(devbus, node); 311 devbus->tick_ps);
245 if (err < 0) 312
246 return err; 313 if (!of_property_read_bool(node, "devbus,keep-config")) {
314 /* Read the Device Tree node */
315 err = devbus_get_timing_params(devbus, node, &r, &w);
316 if (err < 0)
317 return err;
318
319 /* Set the new timing parameters */
320 if (of_device_is_compatible(node, "marvell,orion-devbus"))
321 devbus_orion_set_timing_params(devbus, node, &r, &w);
322 else
323 devbus_armada_set_timing_params(devbus, node, &r, &w);
324 }
247 325
248 /* 326 /*
249 * We need to create a child device explicitly from here to 327 * We need to create a child device explicitly from here to
@@ -259,6 +337,7 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
259 337
260static const struct of_device_id mvebu_devbus_of_match[] = { 338static const struct of_device_id mvebu_devbus_of_match[] = {
261 { .compatible = "marvell,mvebu-devbus" }, 339 { .compatible = "marvell,mvebu-devbus" },
340 { .compatible = "marvell,orion-devbus" },
262 {}, 341 {},
263}; 342};
264MODULE_DEVICE_TABLE(of, mvebu_devbus_of_match); 343MODULE_DEVICE_TABLE(of, mvebu_devbus_of_match);
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 49b46e6ca959..bdcf5173e377 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -51,6 +51,13 @@ config POWER_RESET_RESTART
51 Instead they restart, and u-boot holds the SoC until the 51 Instead they restart, and u-boot holds the SoC until the
52 user presses a key. u-boot then boots into Linux. 52 user presses a key. u-boot then boots into Linux.
53 53
54config POWER_RESET_SUN6I
55 bool "Allwinner A31 SoC reset driver"
56 depends on ARCH_SUNXI
57 depends on POWER_RESET
58 help
59 Reboot support for the Allwinner A31 SoCs.
60
54config POWER_RESET_VEXPRESS 61config POWER_RESET_VEXPRESS
55 bool "ARM Versatile Express power-off and reset driver" 62 bool "ARM Versatile Express power-off and reset driver"
56 depends on ARM || ARM64 63 depends on ARM || ARM64
@@ -65,3 +72,11 @@ config POWER_RESET_XGENE
65 depends on POWER_RESET 72 depends on POWER_RESET
66 help 73 help
67 Reboot support for the APM SoC X-Gene Eval boards. 74 Reboot support for the APM SoC X-Gene Eval boards.
75
76config POWER_RESET_KEYSTONE
77 bool "Keystone reset driver"
78 depends on ARCH_KEYSTONE
79 select MFD_SYSCON
80 help
81 Reboot support for the KEYSTONE SoCs.
82
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 16c0516e5a19..dde2e8bbac53 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -4,5 +4,7 @@ obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
4obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o 4obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
5obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o 5obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
6obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o 6obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
7obj-$(CONFIG_POWER_RESET_SUN6I) += sun6i-reboot.o
7obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o 8obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o
8obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o 9obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o
10obj-$(CONFIG_POWER_RESET_KEYSTONE) += keystone-reset.o
diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c
new file mode 100644
index 000000000000..408a18fd91cb
--- /dev/null
+++ b/drivers/power/reset/keystone-reset.c
@@ -0,0 +1,166 @@
1/*
2 * TI keystone reboot driver
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated. http://www.ti.com/
5 *
6 * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/io.h>
14#include <linux/module.h>
15#include <linux/reboot.h>
16#include <linux/regmap.h>
17#include <asm/system_misc.h>
18#include <linux/mfd/syscon.h>
19#include <linux/of_platform.h>
20
21#define RSTYPE_RG 0x0
22#define RSCTRL_RG 0x4
23#define RSCFG_RG 0x8
24#define RSISO_RG 0xc
25
26#define RSCTRL_KEY_MASK 0x0000ffff
27#define RSCTRL_RESET_MASK BIT(16)
28#define RSCTRL_KEY 0x5a69
29
30#define RSMUX_OMODE_MASK 0xe
31#define RSMUX_OMODE_RESET_ON 0xa
32#define RSMUX_OMODE_RESET_OFF 0x0
33#define RSMUX_LOCK_MASK 0x1
34#define RSMUX_LOCK_SET 0x1
35
36#define RSCFG_RSTYPE_SOFT 0x300f
37#define RSCFG_RSTYPE_HARD 0x0
38
39#define WDT_MUX_NUMBER 0x4
40
41static int rspll_offset;
42static struct regmap *pllctrl_regs;
43
44/**
45 * rsctrl_enable_rspll_write - enable access to RSCTRL, RSCFG
46 * To be able to access to RSCTRL, RSCFG registers
47 * we have to write a key before
48 */
49static inline int rsctrl_enable_rspll_write(void)
50{
51 return regmap_update_bits(pllctrl_regs, rspll_offset + RSCTRL_RG,
52 RSCTRL_KEY_MASK, RSCTRL_KEY);
53}
54
55static void rsctrl_restart(enum reboot_mode mode, const char *cmd)
56{
57 /* enable write access to RSTCTRL */
58 rsctrl_enable_rspll_write();
59
60 /* reset the SOC */
61 regmap_update_bits(pllctrl_regs, rspll_offset + RSCTRL_RG,
62 RSCTRL_RESET_MASK, 0);
63}
64
65static struct of_device_id rsctrl_of_match[] = {
66 {.compatible = "ti,keystone-reset", },
67 {},
68};
69
70static int rsctrl_probe(struct platform_device *pdev)
71{
72 int i;
73 int ret;
74 u32 val;
75 unsigned int rg;
76 u32 rsmux_offset;
77 struct regmap *devctrl_regs;
78 struct device *dev = &pdev->dev;
79 struct device_node *np = dev->of_node;
80
81 if (!np)
82 return -ENODEV;
83
84 /* get regmaps */
85 pllctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pll");
86 if (IS_ERR(pllctrl_regs))
87 return PTR_ERR(pllctrl_regs);
88
89 devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
90 if (IS_ERR(devctrl_regs))
91 return PTR_ERR(devctrl_regs);
92
93 ret = of_property_read_u32_index(np, "ti,syscon-pll", 1, &rspll_offset);
94 if (ret) {
95 dev_err(dev, "couldn't read the reset pll offset!\n");
96 return -EINVAL;
97 }
98
99 ret = of_property_read_u32_index(np, "ti,syscon-dev", 1, &rsmux_offset);
100 if (ret) {
101 dev_err(dev, "couldn't read the rsmux offset!\n");
102 return -EINVAL;
103 }
104
105 /* set soft/hard reset */
106 val = of_property_read_bool(np, "ti,soft-reset");
107 val = val ? RSCFG_RSTYPE_SOFT : RSCFG_RSTYPE_HARD;
108
109 ret = rsctrl_enable_rspll_write();
110 if (ret)
111 return ret;
112
113 ret = regmap_write(pllctrl_regs, rspll_offset + RSCFG_RG, val);
114 if (ret)
115 return ret;
116
117 arm_pm_restart = rsctrl_restart;
118
119 /* disable a reset isolation for all module clocks */
120 ret = regmap_write(pllctrl_regs, rspll_offset + RSISO_RG, 0);
121 if (ret)
122 return ret;
123
124 /* enable a reset for watchdogs from wdt-list */
125 for (i = 0; i < WDT_MUX_NUMBER; i++) {
126 ret = of_property_read_u32_index(np, "ti,wdt-list", i, &val);
127 if (ret == -EOVERFLOW && !i) {
128 dev_err(dev, "ti,wdt-list property has to contain at"
129 "least one entry\n");
130 return -EINVAL;
131 } else if (ret) {
132 break;
133 }
134
135 if (val >= WDT_MUX_NUMBER) {
136 dev_err(dev, "ti,wdt-list property can contain"
137 "only numbers < 4\n");
138 return -EINVAL;
139 }
140
141 rg = rsmux_offset + val * 4;
142
143 ret = regmap_update_bits(devctrl_regs, rg, RSMUX_OMODE_MASK,
144 RSMUX_OMODE_RESET_ON |
145 RSMUX_LOCK_SET);
146 if (ret)
147 return ret;
148 }
149
150 return 0;
151}
152
153static struct platform_driver rsctrl_driver = {
154 .probe = rsctrl_probe,
155 .driver = {
156 .owner = THIS_MODULE,
157 .name = KBUILD_MODNAME,
158 .of_match_table = rsctrl_of_match,
159 },
160};
161module_platform_driver(rsctrl_driver);
162
163MODULE_AUTHOR("Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>");
164MODULE_DESCRIPTION("Texas Instruments keystone reset driver");
165MODULE_LICENSE("GPL v2");
166MODULE_ALIAS("platform:" KBUILD_MODNAME);
diff --git a/drivers/power/reset/sun6i-reboot.c b/drivers/power/reset/sun6i-reboot.c
new file mode 100644
index 000000000000..af2cd7ff2fe8
--- /dev/null
+++ b/drivers/power/reset/sun6i-reboot.c
@@ -0,0 +1,85 @@
1/*
2 * Allwinner A31 SoCs reset code
3 *
4 * Copyright (C) 2012-2014 Maxime Ripard
5 *
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/delay.h>
14#include <linux/io.h>
15#include <linux/module.h>
16#include <linux/of_address.h>
17#include <linux/platform_device.h>
18#include <linux/reboot.h>
19
20#include <asm/system_misc.h>
21
22#define SUN6I_WATCHDOG1_IRQ_REG 0x00
23#define SUN6I_WATCHDOG1_CTRL_REG 0x10
24#define SUN6I_WATCHDOG1_CTRL_RESTART BIT(0)
25#define SUN6I_WATCHDOG1_CONFIG_REG 0x14
26#define SUN6I_WATCHDOG1_CONFIG_RESTART BIT(0)
27#define SUN6I_WATCHDOG1_CONFIG_IRQ BIT(1)
28#define SUN6I_WATCHDOG1_MODE_REG 0x18
29#define SUN6I_WATCHDOG1_MODE_ENABLE BIT(0)
30
31static void __iomem *wdt_base;
32
33static void sun6i_wdt_restart(enum reboot_mode mode, const char *cmd)
34{
35 if (!wdt_base)
36 return;
37
38 /* Disable interrupts */
39 writel(0, wdt_base + SUN6I_WATCHDOG1_IRQ_REG);
40
41 /* We want to disable the IRQ and just reset the whole system */
42 writel(SUN6I_WATCHDOG1_CONFIG_RESTART,
43 wdt_base + SUN6I_WATCHDOG1_CONFIG_REG);
44
45 /* Enable timer. The default and lowest interval value is 0.5s */
46 writel(SUN6I_WATCHDOG1_MODE_ENABLE,
47 wdt_base + SUN6I_WATCHDOG1_MODE_REG);
48
49 /* Restart the watchdog. */
50 writel(SUN6I_WATCHDOG1_CTRL_RESTART,
51 wdt_base + SUN6I_WATCHDOG1_CTRL_REG);
52
53 while (1) {
54 mdelay(5);
55 writel(SUN6I_WATCHDOG1_MODE_ENABLE,
56 wdt_base + SUN6I_WATCHDOG1_MODE_REG);
57 }
58}
59
60static int sun6i_reboot_probe(struct platform_device *pdev)
61{
62 wdt_base = of_iomap(pdev->dev.of_node, 0);
63 if (!wdt_base) {
64 WARN(1, "failed to map watchdog base address");
65 return -ENODEV;
66 }
67
68 arm_pm_restart = sun6i_wdt_restart;
69
70 return 0;
71}
72
73static struct of_device_id sun6i_reboot_of_match[] = {
74 { .compatible = "allwinner,sun6i-a31-wdt" },
75 {}
76};
77
78static struct platform_driver sun6i_reboot_driver = {
79 .probe = sun6i_reboot_probe,
80 .driver = {
81 .name = "sun6i-reboot",
82 .of_match_table = sun6i_reboot_of_match,
83 },
84};
85module_platform_driver(sun6i_reboot_driver);
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 4f60caf750ce..60fed3d7820b 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,3 +1,4 @@
1obj-$(CONFIG_RESET_CONTROLLER) += core.o 1obj-$(CONFIG_RESET_CONTROLLER) += core.o
2obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o
2obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o 3obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
3obj-$(CONFIG_ARCH_STI) += sti/ 4obj-$(CONFIG_ARCH_STI) += sti/
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
new file mode 100644
index 000000000000..79c32ca84ef1
--- /dev/null
+++ b/drivers/reset/reset-socfpga.c
@@ -0,0 +1,146 @@
1/*
2 * Copyright 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
3 *
4 * based on
5 * Allwinner SoCs Reset Controller driver
6 *
7 * Copyright 2013 Maxime Ripard
8 *
9 * Maxime Ripard <maxime.ripard@free-electrons.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 */
16
17#include <linux/err.h>
18#include <linux/io.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/reset-controller.h>
23#include <linux/spinlock.h>
24#include <linux/types.h>
25
26#define NR_BANKS 4
27#define OFFSET_MODRST 0x10
28
29struct socfpga_reset_data {
30 spinlock_t lock;
31 void __iomem *membase;
32 struct reset_controller_dev rcdev;
33};
34
35static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
36 unsigned long id)
37{
38 struct socfpga_reset_data *data = container_of(rcdev,
39 struct socfpga_reset_data,
40 rcdev);
41 int bank = id / BITS_PER_LONG;
42 int offset = id % BITS_PER_LONG;
43 unsigned long flags;
44 u32 reg;
45
46 spin_lock_irqsave(&data->lock, flags);
47
48 reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS));
49 writel(reg | BIT(offset), data->membase + OFFSET_MODRST +
50 (bank * NR_BANKS));
51 spin_unlock_irqrestore(&data->lock, flags);
52
53 return 0;
54}
55
56static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
57 unsigned long id)
58{
59 struct socfpga_reset_data *data = container_of(rcdev,
60 struct socfpga_reset_data,
61 rcdev);
62
63 int bank = id / BITS_PER_LONG;
64 int offset = id % BITS_PER_LONG;
65 unsigned long flags;
66 u32 reg;
67
68 spin_lock_irqsave(&data->lock, flags);
69
70 reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS));
71 writel(reg & ~BIT(offset), data->membase + OFFSET_MODRST +
72 (bank * NR_BANKS));
73
74 spin_unlock_irqrestore(&data->lock, flags);
75
76 return 0;
77}
78
79static struct reset_control_ops socfpga_reset_ops = {
80 .assert = socfpga_reset_assert,
81 .deassert = socfpga_reset_deassert,
82};
83
84static int socfpga_reset_probe(struct platform_device *pdev)
85{
86 struct socfpga_reset_data *data;
87 struct resource *res;
88
89 /*
90 * The binding was mainlined without the required property.
91 * Do not continue, when we encounter an old DT.
92 */
93 if (!of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) {
94 dev_err(&pdev->dev, "%s missing #reset-cells property\n",
95 pdev->dev.of_node->full_name);
96 return -EINVAL;
97 }
98
99 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
100 if (!data)
101 return -ENOMEM;
102
103 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
104 data->membase = devm_ioremap_resource(&pdev->dev, res);
105 if (IS_ERR(data->membase))
106 return PTR_ERR(data->membase);
107
108 spin_lock_init(&data->lock);
109
110 data->rcdev.owner = THIS_MODULE;
111 data->rcdev.nr_resets = NR_BANKS * BITS_PER_LONG;
112 data->rcdev.ops = &socfpga_reset_ops;
113 data->rcdev.of_node = pdev->dev.of_node;
114 reset_controller_register(&data->rcdev);
115
116 return 0;
117}
118
119static int socfpga_reset_remove(struct platform_device *pdev)
120{
121 struct socfpga_reset_data *data = platform_get_drvdata(pdev);
122
123 reset_controller_unregister(&data->rcdev);
124
125 return 0;
126}
127
128static const struct of_device_id socfpga_reset_dt_ids[] = {
129 { .compatible = "altr,rst-mgr", },
130 { /* sentinel */ },
131};
132
133static struct platform_driver socfpga_reset_driver = {
134 .probe = socfpga_reset_probe,
135 .remove = socfpga_reset_remove,
136 .driver = {
137 .name = "socfpga-reset",
138 .owner = THIS_MODULE,
139 .of_match_table = socfpga_reset_dt_ids,
140 },
141};
142module_platform_driver(socfpga_reset_driver);
143
144MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de");
145MODULE_DESCRIPTION("Socfpga Reset Controller Driver");
146MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index 695bd3496eba..a94e7a7820b4 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -145,7 +145,24 @@ MODULE_DEVICE_TABLE(of, sunxi_reset_dt_ids);
145 145
146static int sunxi_reset_probe(struct platform_device *pdev) 146static int sunxi_reset_probe(struct platform_device *pdev)
147{ 147{
148 return sunxi_reset_init(pdev->dev.of_node); 148 struct sunxi_reset_data *data;
149 struct resource *res;
150
151 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
152 if (!data)
153 return -ENOMEM;
154
155 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
156 data->membase = devm_ioremap_resource(&pdev->dev, res);
157 if (IS_ERR(data->membase))
158 return PTR_ERR(data->membase);
159
160 data->rcdev.owner = THIS_MODULE;
161 data->rcdev.nr_resets = resource_size(res) * 32;
162 data->rcdev.ops = &sunxi_reset_ops;
163 data->rcdev.of_node = pdev->dev.of_node;
164
165 return reset_controller_register(&data->rcdev);
149} 166}
150 167
151static int sunxi_reset_remove(struct platform_device *pdev) 168static int sunxi_reset_remove(struct platform_device *pdev)
@@ -153,8 +170,6 @@ static int sunxi_reset_remove(struct platform_device *pdev)
153 struct sunxi_reset_data *data = platform_get_drvdata(pdev); 170 struct sunxi_reset_data *data = platform_get_drvdata(pdev);
154 171
155 reset_controller_unregister(&data->rcdev); 172 reset_controller_unregister(&data->rcdev);
156 iounmap(data->membase);
157 kfree(data);
158 173
159 return 0; 174 return 0;
160} 175}
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
new file mode 100644
index 000000000000..c8543855aa82
--- /dev/null
+++ b/drivers/soc/Kconfig
@@ -0,0 +1,5 @@
1menu "SOC (System On Chip) specific Drivers"
2
3source "drivers/soc/qcom/Kconfig"
4
5endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
new file mode 100644
index 000000000000..0f7c44793b29
--- /dev/null
+++ b/drivers/soc/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Linux Kernel SOC specific device drivers.
3#
4
5obj-$(CONFIG_ARCH_QCOM) += qcom/
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
new file mode 100644
index 000000000000..7bd2c94f54a4
--- /dev/null
+++ b/drivers/soc/qcom/Kconfig
@@ -0,0 +1,11 @@
1#
2# QCOM Soc drivers
3#
4config QCOM_GSBI
5 tristate "QCOM General Serial Bus Interface"
6 depends on ARCH_QCOM
7 help
8 Say y here to enable GSBI support. The GSBI provides control
9 functions for connecting the underlying serial UART, SPI, and I2C
10 devices to the output pins.
11
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
new file mode 100644
index 000000000000..438901257ac1
--- /dev/null
+++ b/drivers/soc/qcom/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
new file mode 100644
index 000000000000..447458e696a9
--- /dev/null
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -0,0 +1,85 @@
1/*
2 * Copyright (c) 2014, The Linux foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License rev 2 and
6 * only rev 2 as published by the free Software foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk.h>
15#include <linux/err.h>
16#include <linux/io.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/of_platform.h>
20#include <linux/platform_device.h>
21
22#define GSBI_CTRL_REG 0x0000
23#define GSBI_PROTOCOL_SHIFT 4
24
25static int gsbi_probe(struct platform_device *pdev)
26{
27 struct device_node *node = pdev->dev.of_node;
28 struct resource *res;
29 void __iomem *base;
30 struct clk *hclk;
31 u32 mode, crci = 0;
32
33 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
34 base = devm_ioremap_resource(&pdev->dev, res);
35 if (IS_ERR(base))
36 return PTR_ERR(base);
37
38 if (of_property_read_u32(node, "qcom,mode", &mode)) {
39 dev_err(&pdev->dev, "missing mode configuration\n");
40 return -EINVAL;
41 }
42
43 /* not required, so default to 0 if not present */
44 of_property_read_u32(node, "qcom,crci", &crci);
45
46 dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n", mode, crci);
47
48 hclk = devm_clk_get(&pdev->dev, "iface");
49 if (IS_ERR(hclk))
50 return PTR_ERR(hclk);
51
52 clk_prepare_enable(hclk);
53
54 writel_relaxed((mode << GSBI_PROTOCOL_SHIFT) | crci,
55 base + GSBI_CTRL_REG);
56
57 /* make sure the gsbi control write is not reordered */
58 wmb();
59
60 clk_disable_unprepare(hclk);
61
62 return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
63}
64
65static const struct of_device_id gsbi_dt_match[] = {
66 { .compatible = "qcom,gsbi-v1.0.0", },
67 { },
68};
69
70MODULE_DEVICE_TABLE(of, gsbi_dt_match);
71
72static struct platform_driver gsbi_driver = {
73 .driver = {
74 .name = "gsbi",
75 .owner = THIS_MODULE,
76 .of_match_table = gsbi_dt_match,
77 },
78 .probe = gsbi_probe,
79};
80
81module_platform_driver(gsbi_driver);
82
83MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
84MODULE_DESCRIPTION("QCOM GSBI driver");
85MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 053b98eb46c8..778e376f197e 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -52,7 +52,6 @@ struct msm_port {
52 struct clk *clk; 52 struct clk *clk;
53 struct clk *pclk; 53 struct clk *pclk;
54 unsigned int imr; 54 unsigned int imr;
55 void __iomem *gsbi_base;
56 int is_uartdm; 55 int is_uartdm;
57 unsigned int old_snap_state; 56 unsigned int old_snap_state;
58}; 57};
@@ -599,9 +598,7 @@ static const char *msm_type(struct uart_port *port)
599static void msm_release_port(struct uart_port *port) 598static void msm_release_port(struct uart_port *port)
600{ 599{
601 struct platform_device *pdev = to_platform_device(port->dev); 600 struct platform_device *pdev = to_platform_device(port->dev);
602 struct msm_port *msm_port = UART_TO_MSM(port);
603 struct resource *uart_resource; 601 struct resource *uart_resource;
604 struct resource *gsbi_resource;
605 resource_size_t size; 602 resource_size_t size;
606 603
607 uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); 604 uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -612,28 +609,12 @@ static void msm_release_port(struct uart_port *port)
612 release_mem_region(port->mapbase, size); 609 release_mem_region(port->mapbase, size);
613 iounmap(port->membase); 610 iounmap(port->membase);
614 port->membase = NULL; 611 port->membase = NULL;
615
616 if (msm_port->gsbi_base) {
617 writel_relaxed(GSBI_PROTOCOL_IDLE,
618 msm_port->gsbi_base + GSBI_CONTROL);
619
620 gsbi_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
621 if (unlikely(!gsbi_resource))
622 return;
623
624 size = resource_size(gsbi_resource);
625 release_mem_region(gsbi_resource->start, size);
626 iounmap(msm_port->gsbi_base);
627 msm_port->gsbi_base = NULL;
628 }
629} 612}
630 613
631static int msm_request_port(struct uart_port *port) 614static int msm_request_port(struct uart_port *port)
632{ 615{
633 struct msm_port *msm_port = UART_TO_MSM(port);
634 struct platform_device *pdev = to_platform_device(port->dev); 616 struct platform_device *pdev = to_platform_device(port->dev);
635 struct resource *uart_resource; 617 struct resource *uart_resource;
636 struct resource *gsbi_resource;
637 resource_size_t size; 618 resource_size_t size;
638 int ret; 619 int ret;
639 620
@@ -652,30 +633,8 @@ static int msm_request_port(struct uart_port *port)
652 goto fail_release_port; 633 goto fail_release_port;
653 } 634 }
654 635
655 gsbi_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
656 /* Is this a GSBI-based port? */
657 if (gsbi_resource) {
658 size = resource_size(gsbi_resource);
659
660 if (!request_mem_region(gsbi_resource->start, size,
661 "msm_serial")) {
662 ret = -EBUSY;
663 goto fail_release_port_membase;
664 }
665
666 msm_port->gsbi_base = ioremap(gsbi_resource->start, size);
667 if (!msm_port->gsbi_base) {
668 ret = -EBUSY;
669 goto fail_release_gsbi;
670 }
671 }
672
673 return 0; 636 return 0;
674 637
675fail_release_gsbi:
676 release_mem_region(gsbi_resource->start, size);
677fail_release_port_membase:
678 iounmap(port->membase);
679fail_release_port: 638fail_release_port:
680 release_mem_region(port->mapbase, size); 639 release_mem_region(port->mapbase, size);
681 return ret; 640 return ret;
@@ -683,7 +642,6 @@ fail_release_port:
683 642
684static void msm_config_port(struct uart_port *port, int flags) 643static void msm_config_port(struct uart_port *port, int flags)
685{ 644{
686 struct msm_port *msm_port = UART_TO_MSM(port);
687 int ret; 645 int ret;
688 if (flags & UART_CONFIG_TYPE) { 646 if (flags & UART_CONFIG_TYPE) {
689 port->type = PORT_MSM; 647 port->type = PORT_MSM;
@@ -691,9 +649,6 @@ static void msm_config_port(struct uart_port *port, int flags)
691 if (ret) 649 if (ret)
692 return; 650 return;
693 } 651 }
694 if (msm_port->gsbi_base)
695 writel_relaxed(GSBI_PROTOCOL_UART,
696 msm_port->gsbi_base + GSBI_CONTROL);
697} 652}
698 653
699static int msm_verify_port(struct uart_port *port, struct serial_struct *ser) 654static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
@@ -1110,6 +1065,7 @@ static struct of_device_id msm_match_table[] = {
1110 1065
1111static struct platform_driver msm_platform_driver = { 1066static struct platform_driver msm_platform_driver = {
1112 .remove = msm_serial_remove, 1067 .remove = msm_serial_remove,
1068 .probe = msm_serial_probe,
1113 .driver = { 1069 .driver = {
1114 .name = "msm_serial", 1070 .name = "msm_serial",
1115 .owner = THIS_MODULE, 1071 .owner = THIS_MODULE,
@@ -1125,7 +1081,7 @@ static int __init msm_serial_init(void)
1125 if (unlikely(ret)) 1081 if (unlikely(ret))
1126 return ret; 1082 return ret;
1127 1083
1128 ret = platform_driver_probe(&msm_platform_driver, msm_serial_probe); 1084 ret = platform_driver_register(&msm_platform_driver);
1129 if (unlikely(ret)) 1085 if (unlikely(ret))
1130 uart_unregister_driver(&msm_uart_driver); 1086 uart_unregister_driver(&msm_uart_driver);
1131 1087
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index 1e9b68b6f9eb..d98d45efdf86 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -109,11 +109,6 @@
109#define UART_ISR 0x0014 109#define UART_ISR 0x0014
110#define UART_ISR_TX_READY (1 << 7) 110#define UART_ISR_TX_READY (1 << 7)
111 111
112#define GSBI_CONTROL 0x0
113#define GSBI_PROTOCOL_CODE 0x30
114#define GSBI_PROTOCOL_UART 0x40
115#define GSBI_PROTOCOL_IDLE 0x0
116
117#define UARTDM_RXFS 0x50 112#define UARTDM_RXFS 0x50
118#define UARTDM_RXFS_BUF_SHIFT 0x7 113#define UARTDM_RXFS_BUF_SHIFT 0x7
119#define UARTDM_RXFS_BUF_MASK 0x7 114#define UARTDM_RXFS_BUF_MASK 0x7