aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-26 15:22:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-26 15:22:27 -0400
commitce53044c68cf4fb6c50a2a0d88786be65fae7235 (patch)
tree19c21da7d261412192e189ef3fd1a9ff4e7ba5c2 /drivers
parent0877aa3908aaeeae8fc2850691668c4315d3db56 (diff)
parent046fae440d32cc6dec8148c7e06a8b4b987f8a2f (diff)
Merge tag 'drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull arm-soc driver specific updates from Olof Johansson: "These changes are specific to some driver that may be used by multiple boards or socs. The most significant change in here is the move of the samsung iommu code from a platform specific in-kernel interface to the generic iommu subsystem." Fix up trivial conflicts in arch/arm/mach-exynos/Kconfig * tag 'drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (28 commits) mmc: dt: Consolidate DT bindings iommu/exynos: Add iommu driver for EXYNOS Platforms ARM: davinci: optimize the DMA ISR ARM: davinci: implement DEBUG_LL port choice ARM: tegra: Add SMMU enabler in AHB ARM: tegra: Add Tegra AHB driver Input: pxa27x_keypad add choice to set direct_key_mask Input: pxa27x_keypad direct key may be low active Input: pxa27x_keypad bug fix for direct_key_mask Input: pxa27x_keypad keep clock on as wakeup source ARM: dt: tegra: pinmux changes for USB ULPI ARM: tegra: add USB ULPI PHY reset GPIO to device tree ARM: tegra: don't hard-code USB ULPI PHY reset_gpio ARM: tegra: change pll_p_out4's rate to 24MHz ARM: tegra: fix pclk rate ARM: tegra: reparent sclk to pll_c_out1 ARM: tegra: Add pllc clock init table ARM: dt: tegra cardhu: basic audio support ARM: dt: tegra30.dtsi: Add audio-related nodes ARM: tegra: add AUXDATA required for audio ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/amba/Makefile4
-rw-r--r--drivers/amba/tegra-ahb.c293
-rw-r--r--drivers/dma/ep93xx_dma.c117
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c52
-rw-r--r--drivers/iommu/Kconfig21
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/exynos-iommu.c1076
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c8
-rw-r--r--drivers/usb/host/ehci-tegra.c5
12 files changed, 1536 insertions, 47 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 0ee98d50f975..2ba29ffef2cb 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_SFI) += sfi/
18# PnP must come after ACPI since it will eventually need to check if acpi 18# PnP must come after ACPI since it will eventually need to check if acpi
19# was used and do nothing if so 19# was used and do nothing if so
20obj-$(CONFIG_PNP) += pnp/ 20obj-$(CONFIG_PNP) += pnp/
21obj-$(CONFIG_ARM_AMBA) += amba/ 21obj-y += amba/
22# Many drivers will want to use DMA so this has to be made available 22# Many drivers will want to use DMA so this has to be made available
23# really early. 23# really early.
24obj-$(CONFIG_DMA_ENGINE) += dma/ 24obj-$(CONFIG_DMA_ENGINE) += dma/
diff --git a/drivers/amba/Makefile b/drivers/amba/Makefile
index 40fe74097be2..66e81c2f1e3c 100644
--- a/drivers/amba/Makefile
+++ b/drivers/amba/Makefile
@@ -1,2 +1,2 @@
1obj-y += bus.o 1obj-$(CONFIG_ARM_AMBA) += bus.o
2 2obj-$(CONFIG_TEGRA_AHB) += tegra-ahb.o
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
new file mode 100644
index 000000000000..aa0b1f160528
--- /dev/null
+++ b/drivers/amba/tegra-ahb.c
@@ -0,0 +1,293 @@
1/*
2 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
3 * Copyright (C) 2011 Google, Inc.
4 *
5 * Author:
6 * Jay Cheng <jacheng@nvidia.com>
7 * James Wylder <james.wylder@motorola.com>
8 * Benoit Goby <benoit@android.com>
9 * Colin Cross <ccross@android.com>
10 * Hiroshi DOYU <hdoyu@nvidia.com>
11 *
12 * This software is licensed under the terms of the GNU General Public
13 * License version 2, as published by the Free Software Foundation, and
14 * may be copied, distributed, and modified under those terms.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/io.h>
27
28#define DRV_NAME "tegra-ahb"
29
30#define AHB_ARBITRATION_DISABLE 0x00
31#define AHB_ARBITRATION_PRIORITY_CTRL 0x04
32#define AHB_PRIORITY_WEIGHT(x) (((x) & 0x7) << 29)
33#define PRIORITY_SELECT_USB BIT(6)
34#define PRIORITY_SELECT_USB2 BIT(18)
35#define PRIORITY_SELECT_USB3 BIT(17)
36
37#define AHB_GIZMO_AHB_MEM 0x0c
38#define ENB_FAST_REARBITRATE BIT(2)
39#define DONT_SPLIT_AHB_WR BIT(7)
40
41#define AHB_GIZMO_APB_DMA 0x10
42#define AHB_GIZMO_IDE 0x18
43#define AHB_GIZMO_USB 0x1c
44#define AHB_GIZMO_AHB_XBAR_BRIDGE 0x20
45#define AHB_GIZMO_CPU_AHB_BRIDGE 0x24
46#define AHB_GIZMO_COP_AHB_BRIDGE 0x28
47#define AHB_GIZMO_XBAR_APB_CTLR 0x2c
48#define AHB_GIZMO_VCP_AHB_BRIDGE 0x30
49#define AHB_GIZMO_NAND 0x3c
50#define AHB_GIZMO_SDMMC4 0x44
51#define AHB_GIZMO_XIO 0x48
52#define AHB_GIZMO_BSEV 0x60
53#define AHB_GIZMO_BSEA 0x70
54#define AHB_GIZMO_NOR 0x74
55#define AHB_GIZMO_USB2 0x78
56#define AHB_GIZMO_USB3 0x7c
57#define IMMEDIATE BIT(18)
58
59#define AHB_GIZMO_SDMMC1 0x80
60#define AHB_GIZMO_SDMMC2 0x84
61#define AHB_GIZMO_SDMMC3 0x88
62#define AHB_MEM_PREFETCH_CFG_X 0xd8
63#define AHB_ARBITRATION_XBAR_CTRL 0xdc
64#define AHB_MEM_PREFETCH_CFG3 0xe0
65#define AHB_MEM_PREFETCH_CFG4 0xe4
66#define AHB_MEM_PREFETCH_CFG1 0xec
67#define AHB_MEM_PREFETCH_CFG2 0xf0
68#define PREFETCH_ENB BIT(31)
69#define MST_ID(x) (((x) & 0x1f) << 26)
70#define AHBDMA_MST_ID MST_ID(5)
71#define USB_MST_ID MST_ID(6)
72#define USB2_MST_ID MST_ID(18)
73#define USB3_MST_ID MST_ID(17)
74#define ADDR_BNDRY(x) (((x) & 0xf) << 21)
75#define INACTIVITY_TIMEOUT(x) (((x) & 0xffff) << 0)
76
77#define AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID 0xf8
78
79#define AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE BIT(17)
80
81static struct platform_driver tegra_ahb_driver;
82
83static const u32 tegra_ahb_gizmo[] = {
84 AHB_ARBITRATION_DISABLE,
85 AHB_ARBITRATION_PRIORITY_CTRL,
86 AHB_GIZMO_AHB_MEM,
87 AHB_GIZMO_APB_DMA,
88 AHB_GIZMO_IDE,
89 AHB_GIZMO_USB,
90 AHB_GIZMO_AHB_XBAR_BRIDGE,
91 AHB_GIZMO_CPU_AHB_BRIDGE,
92 AHB_GIZMO_COP_AHB_BRIDGE,
93 AHB_GIZMO_XBAR_APB_CTLR,
94 AHB_GIZMO_VCP_AHB_BRIDGE,
95 AHB_GIZMO_NAND,
96 AHB_GIZMO_SDMMC4,
97 AHB_GIZMO_XIO,
98 AHB_GIZMO_BSEV,
99 AHB_GIZMO_BSEA,
100 AHB_GIZMO_NOR,
101 AHB_GIZMO_USB2,
102 AHB_GIZMO_USB3,
103 AHB_GIZMO_SDMMC1,
104 AHB_GIZMO_SDMMC2,
105 AHB_GIZMO_SDMMC3,
106 AHB_MEM_PREFETCH_CFG_X,
107 AHB_ARBITRATION_XBAR_CTRL,
108 AHB_MEM_PREFETCH_CFG3,
109 AHB_MEM_PREFETCH_CFG4,
110 AHB_MEM_PREFETCH_CFG1,
111 AHB_MEM_PREFETCH_CFG2,
112 AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID,
113};
114
115struct tegra_ahb {
116 void __iomem *regs;
117 struct device *dev;
118 u32 ctx[0];
119};
120
121static inline u32 gizmo_readl(struct tegra_ahb *ahb, u32 offset)
122{
123 return readl(ahb->regs + offset);
124}
125
126static inline void gizmo_writel(struct tegra_ahb *ahb, u32 value, u32 offset)
127{
128 writel(value, ahb->regs + offset);
129}
130
131#ifdef CONFIG_ARCH_TEGRA_3x_SOC
132static int tegra_ahb_match_by_smmu(struct device *dev, void *data)
133{
134 struct tegra_ahb *ahb = dev_get_drvdata(dev);
135 struct device_node *dn = data;
136
137 return (ahb->dev->of_node == dn) ? 1 : 0;
138}
139
140int tegra_ahb_enable_smmu(struct device_node *dn)
141{
142 struct device *dev;
143 u32 val;
144 struct tegra_ahb *ahb;
145
146 dev = driver_find_device(&tegra_ahb_driver.driver, NULL, dn,
147 tegra_ahb_match_by_smmu);
148 if (!dev)
149 return -EPROBE_DEFER;
150 ahb = dev_get_drvdata(dev);
151 val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL);
152 val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE;
153 gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL);
154 return 0;
155}
156EXPORT_SYMBOL(tegra_ahb_enable_smmu);
157#endif
158
159static int tegra_ahb_suspend(struct device *dev)
160{
161 int i;
162 struct tegra_ahb *ahb = dev_get_drvdata(dev);
163
164 for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
165 ahb->ctx[i] = gizmo_readl(ahb, tegra_ahb_gizmo[i]);
166 return 0;
167}
168
169static int tegra_ahb_resume(struct device *dev)
170{
171 int i;
172 struct tegra_ahb *ahb = dev_get_drvdata(dev);
173
174 for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
175 gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
176 return 0;
177}
178
179static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
180 tegra_ahb_suspend,
181 tegra_ahb_resume, NULL);
182
183static void tegra_ahb_gizmo_init(struct tegra_ahb *ahb)
184{
185 u32 val;
186
187 val = gizmo_readl(ahb, AHB_GIZMO_AHB_MEM);
188 val |= ENB_FAST_REARBITRATE | IMMEDIATE | DONT_SPLIT_AHB_WR;
189 gizmo_writel(ahb, val, AHB_GIZMO_AHB_MEM);
190
191 val = gizmo_readl(ahb, AHB_GIZMO_USB);
192 val |= IMMEDIATE;
193 gizmo_writel(ahb, val, AHB_GIZMO_USB);
194
195 val = gizmo_readl(ahb, AHB_GIZMO_USB2);
196 val |= IMMEDIATE;
197 gizmo_writel(ahb, val, AHB_GIZMO_USB2);
198
199 val = gizmo_readl(ahb, AHB_GIZMO_USB3);
200 val |= IMMEDIATE;
201 gizmo_writel(ahb, val, AHB_GIZMO_USB3);
202
203 val = gizmo_readl(ahb, AHB_ARBITRATION_PRIORITY_CTRL);
204 val |= PRIORITY_SELECT_USB |
205 PRIORITY_SELECT_USB2 |
206 PRIORITY_SELECT_USB3 |
207 AHB_PRIORITY_WEIGHT(7);
208 gizmo_writel(ahb, val, AHB_ARBITRATION_PRIORITY_CTRL);
209
210 val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG1);
211 val &= ~MST_ID(~0);
212 val |= PREFETCH_ENB |
213 AHBDMA_MST_ID |
214 ADDR_BNDRY(0xc) |
215 INACTIVITY_TIMEOUT(0x1000);
216 gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG1);
217
218 val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG2);
219 val &= ~MST_ID(~0);
220 val |= PREFETCH_ENB |
221 USB_MST_ID |
222 ADDR_BNDRY(0xc) |
223 INACTIVITY_TIMEOUT(0x1000);
224 gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG2);
225
226 val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG3);
227 val &= ~MST_ID(~0);
228 val |= PREFETCH_ENB |
229 USB3_MST_ID |
230 ADDR_BNDRY(0xc) |
231 INACTIVITY_TIMEOUT(0x1000);
232 gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG3);
233
234 val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG4);
235 val &= ~MST_ID(~0);
236 val |= PREFETCH_ENB |
237 USB2_MST_ID |
238 ADDR_BNDRY(0xc) |
239 INACTIVITY_TIMEOUT(0x1000);
240 gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG4);
241}
242
243static int __devinit tegra_ahb_probe(struct platform_device *pdev)
244{
245 struct resource *res;
246 struct tegra_ahb *ahb;
247 size_t bytes;
248
249 bytes = sizeof(*ahb) + sizeof(u32) * ARRAY_SIZE(tegra_ahb_gizmo);
250 ahb = devm_kzalloc(&pdev->dev, bytes, GFP_KERNEL);
251 if (!ahb)
252 return -ENOMEM;
253
254 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
255 if (!res)
256 return -ENODEV;
257 ahb->regs = devm_request_and_ioremap(&pdev->dev, res);
258 if (!ahb->regs)
259 return -EBUSY;
260
261 ahb->dev = &pdev->dev;
262 platform_set_drvdata(pdev, ahb);
263 tegra_ahb_gizmo_init(ahb);
264 return 0;
265}
266
267static int __devexit tegra_ahb_remove(struct platform_device *pdev)
268{
269 return 0;
270}
271
272static const struct of_device_id tegra_ahb_of_match[] __devinitconst = {
273 { .compatible = "nvidia,tegra30-ahb", },
274 { .compatible = "nvidia,tegra20-ahb", },
275 {},
276};
277
278static struct platform_driver tegra_ahb_driver = {
279 .probe = tegra_ahb_probe,
280 .remove = __devexit_p(tegra_ahb_remove),
281 .driver = {
282 .name = DRV_NAME,
283 .owner = THIS_MODULE,
284 .of_match_table = tegra_ahb_of_match,
285 .pm = &tegra_ahb_pm,
286 },
287};
288module_platform_driver(tegra_ahb_driver);
289
290MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
291MODULE_DESCRIPTION("Tegra AHB driver");
292MODULE_LICENSE("GPL v2");
293MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index f6e9b572b998..c64917ec313d 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -71,6 +71,7 @@
71#define M2M_CONTROL_TM_SHIFT 13 71#define M2M_CONTROL_TM_SHIFT 13
72#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT) 72#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
73#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT) 73#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
74#define M2M_CONTROL_NFBINT BIT(21)
74#define M2M_CONTROL_RSS_SHIFT 22 75#define M2M_CONTROL_RSS_SHIFT 22
75#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT) 76#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
76#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT) 77#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
@@ -79,7 +80,22 @@
79#define M2M_CONTROL_PWSC_SHIFT 25 80#define M2M_CONTROL_PWSC_SHIFT 25
80 81
81#define M2M_INTERRUPT 0x0004 82#define M2M_INTERRUPT 0x0004
82#define M2M_INTERRUPT_DONEINT BIT(1) 83#define M2M_INTERRUPT_MASK 6
84
85#define M2M_STATUS 0x000c
86#define M2M_STATUS_CTL_SHIFT 1
87#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
88#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
89#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
90#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
91#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
92#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
93#define M2M_STATUS_BUF_SHIFT 4
94#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
95#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
96#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
97#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
98#define M2M_STATUS_DONE BIT(6)
83 99
84#define M2M_BCR0 0x0010 100#define M2M_BCR0 0x0010
85#define M2M_BCR1 0x0014 101#define M2M_BCR1 0x0014
@@ -426,15 +442,6 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
426 442
427/* 443/*
428 * M2M DMA implementation 444 * M2M DMA implementation
429 *
430 * For the M2M transfers we don't use NFB at all. This is because it simply
431 * doesn't work well with memcpy transfers. When you submit both buffers it is
432 * extremely unlikely that you get an NFB interrupt, but it instead reports
433 * DONE interrupt and both buffers are already transferred which means that we
434 * weren't able to update the next buffer.
435 *
436 * So for now we "simulate" NFB by just submitting buffer after buffer
437 * without double buffering.
438 */ 445 */
439 446
440static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) 447static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
@@ -543,6 +550,11 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
543 m2m_fill_desc(edmac); 550 m2m_fill_desc(edmac);
544 control |= M2M_CONTROL_DONEINT; 551 control |= M2M_CONTROL_DONEINT;
545 552
553 if (ep93xx_dma_advance_active(edmac)) {
554 m2m_fill_desc(edmac);
555 control |= M2M_CONTROL_NFBINT;
556 }
557
546 /* 558 /*
547 * Now we can finally enable the channel. For M2M channel this must be 559 * Now we can finally enable the channel. For M2M channel this must be
548 * done _after_ the BCRx registers are programmed. 560 * done _after_ the BCRx registers are programmed.
@@ -560,32 +572,89 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
560 } 572 }
561} 573}
562 574
575/*
576 * According to EP93xx User's Guide, we should receive DONE interrupt when all
577 * M2M DMA controller transactions complete normally. This is not always the
578 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
579 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
580 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
581 * In effect, disabling the channel when only DONE bit is set could stop
582 * currently running DMA transfer. To avoid this, we use Buffer FSM and
583 * Control FSM to check current state of DMA channel.
584 */
563static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac) 585static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
564{ 586{
587 u32 status = readl(edmac->regs + M2M_STATUS);
588 u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
589 u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
590 bool done = status & M2M_STATUS_DONE;
591 bool last_done;
565 u32 control; 592 u32 control;
593 struct ep93xx_dma_desc *desc;
566 594
567 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT)) 595 /* Accept only DONE and NFB interrupts */
596 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
568 return INTERRUPT_UNKNOWN; 597 return INTERRUPT_UNKNOWN;
569 598
570 /* Clear the DONE bit */ 599 if (done) {
571 writel(0, edmac->regs + M2M_INTERRUPT); 600 /* Clear the DONE bit */
601 writel(0, edmac->regs + M2M_INTERRUPT);
602 }
572 603
573 /* Disable interrupts and the channel */ 604 /*
574 control = readl(edmac->regs + M2M_CONTROL); 605 * Check whether we are done with descriptors or not. This, together
575 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE); 606 * with DMA channel state, determines action to take in interrupt.
576 writel(control, edmac->regs + M2M_CONTROL); 607 */
608 desc = ep93xx_dma_get_active(edmac);
609 last_done = !desc || desc->txd.cookie;
577 610
578 /* 611 /*
579 * Since we only get DONE interrupt we have to find out ourselves 612 * Use M2M DMA Buffer FSM and Control FSM to check current state of
580 * whether there still is something to process. So we try to advance 613 * DMA channel. Using DONE and NFB bits from channel status register
581 * the chain an see whether it succeeds. 614 * or bits from channel interrupt register is not reliable.
582 */ 615 */
583 if (ep93xx_dma_advance_active(edmac)) { 616 if (!last_done &&
584 edmac->edma->hw_submit(edmac); 617 (buf_fsm == M2M_STATUS_BUF_NO ||
585 return INTERRUPT_NEXT_BUFFER; 618 buf_fsm == M2M_STATUS_BUF_ON)) {
619 /*
620 * Two buffers are ready for update when Buffer FSM is in
621 * DMA_NO_BUF state. Only one buffer can be prepared without
622 * disabling the channel or polling the DONE bit.
623 * To simplify things, always prepare only one buffer.
624 */
625 if (ep93xx_dma_advance_active(edmac)) {
626 m2m_fill_desc(edmac);
627 if (done && !edmac->chan.private) {
628 /* Software trigger for memcpy channel */
629 control = readl(edmac->regs + M2M_CONTROL);
630 control |= M2M_CONTROL_START;
631 writel(control, edmac->regs + M2M_CONTROL);
632 }
633 return INTERRUPT_NEXT_BUFFER;
634 } else {
635 last_done = true;
636 }
637 }
638
639 /*
640 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
641 * and Control FSM is in DMA_STALL state.
642 */
643 if (last_done &&
644 buf_fsm == M2M_STATUS_BUF_NO &&
645 ctl_fsm == M2M_STATUS_CTL_STALL) {
646 /* Disable interrupts and the channel */
647 control = readl(edmac->regs + M2M_CONTROL);
648 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
649 | M2M_CONTROL_ENABLE);
650 writel(control, edmac->regs + M2M_CONTROL);
651 return INTERRUPT_DONE;
586 } 652 }
587 653
588 return INTERRUPT_DONE; 654 /*
655 * Nothing to do this time.
656 */
657 return INTERRUPT_NEXT_BUFFER;
589} 658}
590 659
591/* 660/*
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 29fe1b2be1c1..7f7b72464a37 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -311,7 +311,15 @@ static void pxa27x_keypad_scan_direct(struct pxa27x_keypad *keypad)
311 if (pdata->enable_rotary0 || pdata->enable_rotary1) 311 if (pdata->enable_rotary0 || pdata->enable_rotary1)
312 pxa27x_keypad_scan_rotary(keypad); 312 pxa27x_keypad_scan_rotary(keypad);
313 313
314 new_state = KPDK_DK(kpdk) & keypad->direct_key_mask; 314 /*
315 * The KPDR_DK only output the key pin level, so it relates to board,
316 * and low level may be active.
317 */
318 if (pdata->direct_key_low_active)
319 new_state = ~KPDK_DK(kpdk) & keypad->direct_key_mask;
320 else
321 new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
322
315 bits_changed = keypad->direct_key_state ^ new_state; 323 bits_changed = keypad->direct_key_state ^ new_state;
316 324
317 if (bits_changed == 0) 325 if (bits_changed == 0)
@@ -383,7 +391,14 @@ static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
383 if (pdata->direct_key_num > direct_key_num) 391 if (pdata->direct_key_num > direct_key_num)
384 direct_key_num = pdata->direct_key_num; 392 direct_key_num = pdata->direct_key_num;
385 393
386 keypad->direct_key_mask = ((2 << direct_key_num) - 1) & ~mask; 394 /*
395 * Direct keys usage may not start from KP_DKIN0, check the platfrom
396 * mask data to config the specific.
397 */
398 if (pdata->direct_key_mask)
399 keypad->direct_key_mask = pdata->direct_key_mask;
400 else
401 keypad->direct_key_mask = ((1 << direct_key_num) - 1) & ~mask;
387 402
388 /* enable direct key */ 403 /* enable direct key */
389 if (direct_key_num) 404 if (direct_key_num)
@@ -399,7 +414,7 @@ static int pxa27x_keypad_open(struct input_dev *dev)
399 struct pxa27x_keypad *keypad = input_get_drvdata(dev); 414 struct pxa27x_keypad *keypad = input_get_drvdata(dev);
400 415
401 /* Enable unit clock */ 416 /* Enable unit clock */
402 clk_enable(keypad->clk); 417 clk_prepare_enable(keypad->clk);
403 pxa27x_keypad_config(keypad); 418 pxa27x_keypad_config(keypad);
404 419
405 return 0; 420 return 0;
@@ -410,7 +425,7 @@ static void pxa27x_keypad_close(struct input_dev *dev)
410 struct pxa27x_keypad *keypad = input_get_drvdata(dev); 425 struct pxa27x_keypad *keypad = input_get_drvdata(dev);
411 426
412 /* Disable clock unit */ 427 /* Disable clock unit */
413 clk_disable(keypad->clk); 428 clk_disable_unprepare(keypad->clk);
414} 429}
415 430
416#ifdef CONFIG_PM 431#ifdef CONFIG_PM
@@ -419,10 +434,14 @@ static int pxa27x_keypad_suspend(struct device *dev)
419 struct platform_device *pdev = to_platform_device(dev); 434 struct platform_device *pdev = to_platform_device(dev);
420 struct pxa27x_keypad *keypad = platform_get_drvdata(pdev); 435 struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
421 436
422 clk_disable(keypad->clk); 437 /*
423 438 * If the keypad is used a wake up source, clock can not be disabled.
439 * Or it can not detect the key pressing.
440 */
424 if (device_may_wakeup(&pdev->dev)) 441 if (device_may_wakeup(&pdev->dev))
425 enable_irq_wake(keypad->irq); 442 enable_irq_wake(keypad->irq);
443 else
444 clk_disable_unprepare(keypad->clk);
426 445
427 return 0; 446 return 0;
428} 447}
@@ -433,19 +452,24 @@ static int pxa27x_keypad_resume(struct device *dev)
433 struct pxa27x_keypad *keypad = platform_get_drvdata(pdev); 452 struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
434 struct input_dev *input_dev = keypad->input_dev; 453 struct input_dev *input_dev = keypad->input_dev;
435 454
436 if (device_may_wakeup(&pdev->dev)) 455 /*
456 * If the keypad is used as wake up source, the clock is not turned
457 * off. So do not need configure it again.
458 */
459 if (device_may_wakeup(&pdev->dev)) {
437 disable_irq_wake(keypad->irq); 460 disable_irq_wake(keypad->irq);
461 } else {
462 mutex_lock(&input_dev->mutex);
438 463
439 mutex_lock(&input_dev->mutex); 464 if (input_dev->users) {
465 /* Enable unit clock */
466 clk_prepare_enable(keypad->clk);
467 pxa27x_keypad_config(keypad);
468 }
440 469
441 if (input_dev->users) { 470 mutex_unlock(&input_dev->mutex);
442 /* Enable unit clock */
443 clk_enable(keypad->clk);
444 pxa27x_keypad_config(keypad);
445 } 471 }
446 472
447 mutex_unlock(&input_dev->mutex);
448
449 return 0; 473 return 0;
450} 474}
451 475
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c69843742bb0..340893727538 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -162,4 +162,25 @@ config TEGRA_IOMMU_SMMU
162 space through the SMMU (System Memory Management Unit) 162 space through the SMMU (System Memory Management Unit)
163 hardware included on Tegra SoCs. 163 hardware included on Tegra SoCs.
164 164
165config EXYNOS_IOMMU
166 bool "Exynos IOMMU Support"
167 depends on ARCH_EXYNOS && EXYNOS_DEV_SYSMMU
168 select IOMMU_API
169 help
170 Support for the IOMMU(System MMU) of Samsung Exynos application
171 processor family. This enables H/W multimedia accellerators to see
172 non-linear physical memory chunks as a linear memory in their
173 address spaces
174
175 If unsure, say N here.
176
177config EXYNOS_IOMMU_DEBUG
178 bool "Debugging log for Exynos IOMMU"
179 depends on EXYNOS_IOMMU
180 help
181 Select this to see the detailed log message that shows what
182 happens in the IOMMU driver
183
184 Say N unless you need kernel log message for IOMMU debugging
185
165endif # IOMMU_SUPPORT 186endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 3e5e82ae9f0d..76e54ef796de 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
10obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o 10obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
11obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o 11obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
12obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o 12obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
13obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
new file mode 100644
index 000000000000..9a114b9ff170
--- /dev/null
+++ b/drivers/iommu/exynos-iommu.c
@@ -0,0 +1,1076 @@
1/* linux/drivers/iommu/exynos_iommu.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12#define DEBUG
13#endif
14
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/pm_runtime.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/mm.h>
23#include <linux/iommu.h>
24#include <linux/errno.h>
25#include <linux/list.h>
26#include <linux/memblock.h>
27#include <linux/export.h>
28
29#include <asm/cacheflush.h>
30#include <asm/pgtable.h>
31
32#include <mach/sysmmu.h>
33
34/* We does not consider super section mapping (16MB) */
35#define SECT_ORDER 20
36#define LPAGE_ORDER 16
37#define SPAGE_ORDER 12
38
39#define SECT_SIZE (1 << SECT_ORDER)
40#define LPAGE_SIZE (1 << LPAGE_ORDER)
41#define SPAGE_SIZE (1 << SPAGE_ORDER)
42
43#define SECT_MASK (~(SECT_SIZE - 1))
44#define LPAGE_MASK (~(LPAGE_SIZE - 1))
45#define SPAGE_MASK (~(SPAGE_SIZE - 1))
46
47#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
48#define lv1ent_page(sent) ((*(sent) & 3) == 1)
49#define lv1ent_section(sent) ((*(sent) & 3) == 2)
50
51#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
52#define lv2ent_small(pent) ((*(pent) & 2) == 2)
53#define lv2ent_large(pent) ((*(pent) & 3) == 1)
54
55#define section_phys(sent) (*(sent) & SECT_MASK)
56#define section_offs(iova) ((iova) & 0xFFFFF)
57#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
58#define lpage_offs(iova) ((iova) & 0xFFFF)
59#define spage_phys(pent) (*(pent) & SPAGE_MASK)
60#define spage_offs(iova) ((iova) & 0xFFF)
61
62#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
63#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
64
65#define NUM_LV1ENTRIES 4096
66#define NUM_LV2ENTRIES 256
67
68#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
69
70#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
71
72#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
73
74#define mk_lv1ent_sect(pa) ((pa) | 2)
75#define mk_lv1ent_page(pa) ((pa) | 1)
76#define mk_lv2ent_lpage(pa) ((pa) | 1)
77#define mk_lv2ent_spage(pa) ((pa) | 2)
78
79#define CTRL_ENABLE 0x5
80#define CTRL_BLOCK 0x7
81#define CTRL_DISABLE 0x0
82
83#define REG_MMU_CTRL 0x000
84#define REG_MMU_CFG 0x004
85#define REG_MMU_STATUS 0x008
86#define REG_MMU_FLUSH 0x00C
87#define REG_MMU_FLUSH_ENTRY 0x010
88#define REG_PT_BASE_ADDR 0x014
89#define REG_INT_STATUS 0x018
90#define REG_INT_CLEAR 0x01C
91
92#define REG_PAGE_FAULT_ADDR 0x024
93#define REG_AW_FAULT_ADDR 0x028
94#define REG_AR_FAULT_ADDR 0x02C
95#define REG_DEFAULT_SLAVE_ADDR 0x030
96
97#define REG_MMU_VERSION 0x034
98
99#define REG_PB0_SADDR 0x04C
100#define REG_PB0_EADDR 0x050
101#define REG_PB1_SADDR 0x054
102#define REG_PB1_EADDR 0x058
103
104static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
105{
106 return pgtable + lv1ent_offset(iova);
107}
108
109static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
110{
111 return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
112}
113
114enum exynos_sysmmu_inttype {
115 SYSMMU_PAGEFAULT,
116 SYSMMU_AR_MULTIHIT,
117 SYSMMU_AW_MULTIHIT,
118 SYSMMU_BUSERROR,
119 SYSMMU_AR_SECURITY,
120 SYSMMU_AR_ACCESS,
121 SYSMMU_AW_SECURITY,
122 SYSMMU_AW_PROTECTION, /* 7 */
123 SYSMMU_FAULT_UNKNOWN,
124 SYSMMU_FAULTS_NUM
125};
126
127/*
128 * @itype: type of fault.
129 * @pgtable_base: the physical address of page table base. This is 0 if @itype
130 * is SYSMMU_BUSERROR.
131 * @fault_addr: the device (virtual) address that the System MMU tried to
132 * translated. This is 0 if @itype is SYSMMU_BUSERROR.
133 */
134typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
135 unsigned long pgtable_base, unsigned long fault_addr);
136
137static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
138 REG_PAGE_FAULT_ADDR,
139 REG_AR_FAULT_ADDR,
140 REG_AW_FAULT_ADDR,
141 REG_DEFAULT_SLAVE_ADDR,
142 REG_AR_FAULT_ADDR,
143 REG_AR_FAULT_ADDR,
144 REG_AW_FAULT_ADDR,
145 REG_AW_FAULT_ADDR
146};
147
148static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
149 "PAGE FAULT",
150 "AR MULTI-HIT FAULT",
151 "AW MULTI-HIT FAULT",
152 "BUS ERROR",
153 "AR SECURITY PROTECTION FAULT",
154 "AR ACCESS PROTECTION FAULT",
155 "AW SECURITY PROTECTION FAULT",
156 "AW ACCESS PROTECTION FAULT",
157 "UNKNOWN FAULT"
158};
159
160struct exynos_iommu_domain {
161 struct list_head clients; /* list of sysmmu_drvdata.node */
162 unsigned long *pgtable; /* lv1 page table, 16KB */
163 short *lv2entcnt; /* free lv2 entry counter for each section */
164 spinlock_t lock; /* lock for this structure */
165 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
166};
167
168struct sysmmu_drvdata {
169 struct list_head node; /* entry of exynos_iommu_domain.clients */
170 struct device *sysmmu; /* System MMU's device descriptor */
171 struct device *dev; /* Owner of system MMU */
172 char *dbgname;
173 int nsfrs;
174 void __iomem **sfrbases;
175 struct clk *clk[2];
176 int activations;
177 rwlock_t lock;
178 struct iommu_domain *domain;
179 sysmmu_fault_handler_t fault_handler;
180 unsigned long pgtable;
181};
182
183static bool set_sysmmu_active(struct sysmmu_drvdata *data)
184{
185 /* return true if the System MMU was not active previously
186 and it needs to be initialized */
187 return ++data->activations == 1;
188}
189
190static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
191{
192 /* return true if the System MMU is needed to be disabled */
193 BUG_ON(data->activations < 1);
194 return --data->activations == 0;
195}
196
197static bool is_sysmmu_active(struct sysmmu_drvdata *data)
198{
199 return data->activations > 0;
200}
201
202static void sysmmu_unblock(void __iomem *sfrbase)
203{
204 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
205}
206
207static bool sysmmu_block(void __iomem *sfrbase)
208{
209 int i = 120;
210
211 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
212 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
213 --i;
214
215 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
216 sysmmu_unblock(sfrbase);
217 return false;
218 }
219
220 return true;
221}
222
223static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
224{
225 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
226}
227
228static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
229 unsigned long iova)
230{
231 __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
232}
233
234static void __sysmmu_set_ptbase(void __iomem *sfrbase,
235 unsigned long pgd)
236{
237 __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
238 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
239
240 __sysmmu_tlb_invalidate(sfrbase);
241}
242
243static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
244 unsigned long size, int idx)
245{
246 __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
247 __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8);
248}
249
250void exynos_sysmmu_set_prefbuf(struct device *dev,
251 unsigned long base0, unsigned long size0,
252 unsigned long base1, unsigned long size1)
253{
254 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
255 unsigned long flags;
256 int i;
257
258 BUG_ON((base0 + size0) <= base0);
259 BUG_ON((size1 > 0) && ((base1 + size1) <= base1));
260
261 read_lock_irqsave(&data->lock, flags);
262 if (!is_sysmmu_active(data))
263 goto finish;
264
265 for (i = 0; i < data->nsfrs; i++) {
266 if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
267 if (!sysmmu_block(data->sfrbases[i]))
268 continue;
269
270 if (size1 == 0) {
271 if (size0 <= SZ_128K) {
272 base1 = base0;
273 size1 = size0;
274 } else {
275 size1 = size0 -
276 ALIGN(size0 / 2, SZ_64K);
277 size0 = size0 - size1;
278 base1 = base0 + size0;
279 }
280 }
281
282 __sysmmu_set_prefbuf(
283 data->sfrbases[i], base0, size0, 0);
284 __sysmmu_set_prefbuf(
285 data->sfrbases[i], base1, size1, 1);
286
287 sysmmu_unblock(data->sfrbases[i]);
288 }
289 }
290finish:
291 read_unlock_irqrestore(&data->lock, flags);
292}
293
294static void __set_fault_handler(struct sysmmu_drvdata *data,
295 sysmmu_fault_handler_t handler)
296{
297 unsigned long flags;
298
299 write_lock_irqsave(&data->lock, flags);
300 data->fault_handler = handler;
301 write_unlock_irqrestore(&data->lock, flags);
302}
303
304void exynos_sysmmu_set_fault_handler(struct device *dev,
305 sysmmu_fault_handler_t handler)
306{
307 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
308
309 __set_fault_handler(data, handler);
310}
311
312static int default_fault_handler(enum exynos_sysmmu_inttype itype,
313 unsigned long pgtable_base, unsigned long fault_addr)
314{
315 unsigned long *ent;
316
317 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
318 itype = SYSMMU_FAULT_UNKNOWN;
319
320 pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",
321 sysmmu_fault_name[itype], fault_addr, pgtable_base);
322
323 ent = section_entry(__va(pgtable_base), fault_addr);
324 pr_err("\tLv1 entry: 0x%lx\n", *ent);
325
326 if (lv1ent_page(ent)) {
327 ent = page_entry(ent, fault_addr);
328 pr_err("\t Lv2 entry: 0x%lx\n", *ent);
329 }
330
331 pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
332
333 BUG();
334
335 return 0;
336}
337
338static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
339{
340 /* SYSMMU is in blocked when interrupt occurred. */
341 struct sysmmu_drvdata *data = dev_id;
342 struct resource *irqres;
343 struct platform_device *pdev;
344 enum exynos_sysmmu_inttype itype;
345 unsigned long addr = -1;
346
347 int i, ret = -ENOSYS;
348
349 read_lock(&data->lock);
350
351 WARN_ON(!is_sysmmu_active(data));
352
353 pdev = to_platform_device(data->sysmmu);
354 for (i = 0; i < (pdev->num_resources / 2); i++) {
355 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
356 if (irqres && ((int)irqres->start == irq))
357 break;
358 }
359
360 if (i == pdev->num_resources) {
361 itype = SYSMMU_FAULT_UNKNOWN;
362 } else {
363 itype = (enum exynos_sysmmu_inttype)
364 __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
365 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
366 itype = SYSMMU_FAULT_UNKNOWN;
367 else
368 addr = __raw_readl(
369 data->sfrbases[i] + fault_reg_offset[itype]);
370 }
371
372 if (data->domain)
373 ret = report_iommu_fault(data->domain, data->dev,
374 addr, itype);
375
376 if ((ret == -ENOSYS) && data->fault_handler) {
377 unsigned long base = data->pgtable;
378 if (itype != SYSMMU_FAULT_UNKNOWN)
379 base = __raw_readl(
380 data->sfrbases[i] + REG_PT_BASE_ADDR);
381 ret = data->fault_handler(itype, base, addr);
382 }
383
384 if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
385 __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
386 else
387 dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
388 data->dbgname, sysmmu_fault_name[itype]);
389
390 if (itype != SYSMMU_FAULT_UNKNOWN)
391 sysmmu_unblock(data->sfrbases[i]);
392
393 read_unlock(&data->lock);
394
395 return IRQ_HANDLED;
396}
397
398static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
399{
400 unsigned long flags;
401 bool disabled = false;
402 int i;
403
404 write_lock_irqsave(&data->lock, flags);
405
406 if (!set_sysmmu_inactive(data))
407 goto finish;
408
409 for (i = 0; i < data->nsfrs; i++)
410 __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
411
412 if (data->clk[1])
413 clk_disable(data->clk[1]);
414 if (data->clk[0])
415 clk_disable(data->clk[0]);
416
417 disabled = true;
418 data->pgtable = 0;
419 data->domain = NULL;
420finish:
421 write_unlock_irqrestore(&data->lock, flags);
422
423 if (disabled)
424 dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
425 else
426 dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
427 data->dbgname, data->activations);
428
429 return disabled;
430}
431
432/* __exynos_sysmmu_enable: Enables System MMU
433 *
434 * returns -error if an error occurred and System MMU is not enabled,
435 * 0 if the System MMU has been just enabled and 1 if System MMU was already
436 * enabled before.
437 */
438static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
439 unsigned long pgtable, struct iommu_domain *domain)
440{
441 int i, ret = 0;
442 unsigned long flags;
443
444 write_lock_irqsave(&data->lock, flags);
445
446 if (!set_sysmmu_active(data)) {
447 if (WARN_ON(pgtable != data->pgtable)) {
448 ret = -EBUSY;
449 set_sysmmu_inactive(data);
450 } else {
451 ret = 1;
452 }
453
454 dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
455 goto finish;
456 }
457
458 if (data->clk[0])
459 clk_enable(data->clk[0]);
460 if (data->clk[1])
461 clk_enable(data->clk[1]);
462
463 data->pgtable = pgtable;
464
465 for (i = 0; i < data->nsfrs; i++) {
466 __sysmmu_set_ptbase(data->sfrbases[i], pgtable);
467
468 if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
469 /* System MMU version is 3.x */
470 __raw_writel((1 << 12) | (2 << 28),
471 data->sfrbases[i] + REG_MMU_CFG);
472 __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
473 __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
474 }
475
476 __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
477 }
478
479 data->domain = domain;
480
481 dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
482finish:
483 write_unlock_irqrestore(&data->lock, flags);
484
485 return ret;
486}
487
488int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
489{
490 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
491 int ret;
492
493 BUG_ON(!memblock_is_memory(pgtable));
494
495 ret = pm_runtime_get_sync(data->sysmmu);
496 if (ret < 0) {
497 dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
498 return ret;
499 }
500
501 ret = __exynos_sysmmu_enable(data, pgtable, NULL);
502 if (WARN_ON(ret < 0)) {
503 pm_runtime_put(data->sysmmu);
504 dev_err(data->sysmmu,
505 "(%s) Already enabled with page table %#lx\n",
506 data->dbgname, data->pgtable);
507 } else {
508 data->dev = dev;
509 }
510
511 return ret;
512}
513
514bool exynos_sysmmu_disable(struct device *dev)
515{
516 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
517 bool disabled;
518
519 disabled = __exynos_sysmmu_disable(data);
520 pm_runtime_put(data->sysmmu);
521
522 return disabled;
523}
524
525static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
526{
527 unsigned long flags;
528 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
529
530 read_lock_irqsave(&data->lock, flags);
531
532 if (is_sysmmu_active(data)) {
533 int i;
534 for (i = 0; i < data->nsfrs; i++) {
535 if (sysmmu_block(data->sfrbases[i])) {
536 __sysmmu_tlb_invalidate_entry(
537 data->sfrbases[i], iova);
538 sysmmu_unblock(data->sfrbases[i]);
539 }
540 }
541 } else {
542 dev_dbg(data->sysmmu,
543 "(%s) Disabled. Skipping invalidating TLB.\n",
544 data->dbgname);
545 }
546
547 read_unlock_irqrestore(&data->lock, flags);
548}
549
550void exynos_sysmmu_tlb_invalidate(struct device *dev)
551{
552 unsigned long flags;
553 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
554
555 read_lock_irqsave(&data->lock, flags);
556
557 if (is_sysmmu_active(data)) {
558 int i;
559 for (i = 0; i < data->nsfrs; i++) {
560 if (sysmmu_block(data->sfrbases[i])) {
561 __sysmmu_tlb_invalidate(data->sfrbases[i]);
562 sysmmu_unblock(data->sfrbases[i]);
563 }
564 }
565 } else {
566 dev_dbg(data->sysmmu,
567 "(%s) Disabled. Skipping invalidating TLB.\n",
568 data->dbgname);
569 }
570
571 read_unlock_irqrestore(&data->lock, flags);
572}
573
574static int exynos_sysmmu_probe(struct platform_device *pdev)
575{
576 int i, ret;
577 struct device *dev;
578 struct sysmmu_drvdata *data;
579
580 dev = &pdev->dev;
581
582 data = kzalloc(sizeof(*data), GFP_KERNEL);
583 if (!data) {
584 dev_dbg(dev, "Not enough memory\n");
585 ret = -ENOMEM;
586 goto err_alloc;
587 }
588
589 ret = dev_set_drvdata(dev, data);
590 if (ret) {
591 dev_dbg(dev, "Unabled to initialize driver data\n");
592 goto err_init;
593 }
594
595 data->nsfrs = pdev->num_resources / 2;
596 data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
597 GFP_KERNEL);
598 if (data->sfrbases == NULL) {
599 dev_dbg(dev, "Not enough memory\n");
600 ret = -ENOMEM;
601 goto err_init;
602 }
603
604 for (i = 0; i < data->nsfrs; i++) {
605 struct resource *res;
606 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
607 if (!res) {
608 dev_dbg(dev, "Unable to find IOMEM region\n");
609 ret = -ENOENT;
610 goto err_res;
611 }
612
613 data->sfrbases[i] = ioremap(res->start, resource_size(res));
614 if (!data->sfrbases[i]) {
615 dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
616 res->start);
617 ret = -ENOENT;
618 goto err_res;
619 }
620 }
621
622 for (i = 0; i < data->nsfrs; i++) {
623 ret = platform_get_irq(pdev, i);
624 if (ret <= 0) {
625 dev_dbg(dev, "Unable to find IRQ resource\n");
626 goto err_irq;
627 }
628
629 ret = request_irq(ret, exynos_sysmmu_irq, 0,
630 dev_name(dev), data);
631 if (ret) {
632 dev_dbg(dev, "Unabled to register interrupt handler\n");
633 goto err_irq;
634 }
635 }
636
637 if (dev_get_platdata(dev)) {
638 char *deli, *beg;
639 struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
640
641 beg = platdata->clockname;
642
643 for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
644 /* NOTHING */;
645
646 if (*deli == '\0')
647 deli = NULL;
648 else
649 *deli = '\0';
650
651 data->clk[0] = clk_get(dev, beg);
652 if (IS_ERR(data->clk[0])) {
653 data->clk[0] = NULL;
654 dev_dbg(dev, "No clock descriptor registered\n");
655 }
656
657 if (data->clk[0] && deli) {
658 *deli = ',';
659 data->clk[1] = clk_get(dev, deli + 1);
660 if (IS_ERR(data->clk[1]))
661 data->clk[1] = NULL;
662 }
663
664 data->dbgname = platdata->dbgname;
665 }
666
667 data->sysmmu = dev;
668 rwlock_init(&data->lock);
669 INIT_LIST_HEAD(&data->node);
670
671 __set_fault_handler(data, &default_fault_handler);
672
673 if (dev->parent)
674 pm_runtime_enable(dev);
675
676 dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
677 return 0;
678err_irq:
679 while (i-- > 0) {
680 int irq;
681
682 irq = platform_get_irq(pdev, i);
683 free_irq(irq, data);
684 }
685err_res:
686 while (data->nsfrs-- > 0)
687 iounmap(data->sfrbases[data->nsfrs]);
688 kfree(data->sfrbases);
689err_init:
690 kfree(data);
691err_alloc:
692 dev_err(dev, "Failed to initialize\n");
693 return ret;
694}
695
696static struct platform_driver exynos_sysmmu_driver = {
697 .probe = exynos_sysmmu_probe,
698 .driver = {
699 .owner = THIS_MODULE,
700 .name = "exynos-sysmmu",
701 }
702};
703
704static inline void pgtable_flush(void *vastart, void *vaend)
705{
706 dmac_flush_range(vastart, vaend);
707 outer_flush_range(virt_to_phys(vastart),
708 virt_to_phys(vaend));
709}
710
711static int exynos_iommu_domain_init(struct iommu_domain *domain)
712{
713 struct exynos_iommu_domain *priv;
714
715 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
716 if (!priv)
717 return -ENOMEM;
718
719 priv->pgtable = (unsigned long *)__get_free_pages(
720 GFP_KERNEL | __GFP_ZERO, 2);
721 if (!priv->pgtable)
722 goto err_pgtable;
723
724 priv->lv2entcnt = (short *)__get_free_pages(
725 GFP_KERNEL | __GFP_ZERO, 1);
726 if (!priv->lv2entcnt)
727 goto err_counter;
728
729 pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
730
731 spin_lock_init(&priv->lock);
732 spin_lock_init(&priv->pgtablelock);
733 INIT_LIST_HEAD(&priv->clients);
734
735 domain->priv = priv;
736 return 0;
737
738err_counter:
739 free_pages((unsigned long)priv->pgtable, 2);
740err_pgtable:
741 kfree(priv);
742 return -ENOMEM;
743}
744
745static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
746{
747 struct exynos_iommu_domain *priv = domain->priv;
748 struct sysmmu_drvdata *data;
749 unsigned long flags;
750 int i;
751
752 WARN_ON(!list_empty(&priv->clients));
753
754 spin_lock_irqsave(&priv->lock, flags);
755
756 list_for_each_entry(data, &priv->clients, node) {
757 while (!exynos_sysmmu_disable(data->dev))
758 ; /* until System MMU is actually disabled */
759 }
760
761 spin_unlock_irqrestore(&priv->lock, flags);
762
763 for (i = 0; i < NUM_LV1ENTRIES; i++)
764 if (lv1ent_page(priv->pgtable + i))
765 kfree(__va(lv2table_base(priv->pgtable + i)));
766
767 free_pages((unsigned long)priv->pgtable, 2);
768 free_pages((unsigned long)priv->lv2entcnt, 1);
769 kfree(domain->priv);
770 domain->priv = NULL;
771}
772
773static int exynos_iommu_attach_device(struct iommu_domain *domain,
774 struct device *dev)
775{
776 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
777 struct exynos_iommu_domain *priv = domain->priv;
778 unsigned long flags;
779 int ret;
780
781 ret = pm_runtime_get_sync(data->sysmmu);
782 if (ret < 0)
783 return ret;
784
785 ret = 0;
786
787 spin_lock_irqsave(&priv->lock, flags);
788
789 ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
790
791 if (ret == 0) {
792 /* 'data->node' must not be appeared in priv->clients */
793 BUG_ON(!list_empty(&data->node));
794 data->dev = dev;
795 list_add_tail(&data->node, &priv->clients);
796 }
797
798 spin_unlock_irqrestore(&priv->lock, flags);
799
800 if (ret < 0) {
801 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
802 __func__, __pa(priv->pgtable));
803 pm_runtime_put(data->sysmmu);
804 } else if (ret > 0) {
805 dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
806 __func__, __pa(priv->pgtable));
807 } else {
808 dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
809 __func__, __pa(priv->pgtable));
810 }
811
812 return ret;
813}
814
815static void exynos_iommu_detach_device(struct iommu_domain *domain,
816 struct device *dev)
817{
818 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
819 struct exynos_iommu_domain *priv = domain->priv;
820 struct list_head *pos;
821 unsigned long flags;
822 bool found = false;
823
824 spin_lock_irqsave(&priv->lock, flags);
825
826 list_for_each(pos, &priv->clients) {
827 if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
828 found = true;
829 break;
830 }
831 }
832
833 if (!found)
834 goto finish;
835
836 if (__exynos_sysmmu_disable(data)) {
837 dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
838 __func__, __pa(priv->pgtable));
839 list_del(&data->node);
840 INIT_LIST_HEAD(&data->node);
841
842 } else {
843 dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
844 __func__, __pa(priv->pgtable));
845 }
846
847finish:
848 spin_unlock_irqrestore(&priv->lock, flags);
849
850 if (found)
851 pm_runtime_put(data->sysmmu);
852}
853
854static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
855 short *pgcounter)
856{
857 if (lv1ent_fault(sent)) {
858 unsigned long *pent;
859
860 pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
861 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
862 if (!pent)
863 return NULL;
864
865 *sent = mk_lv1ent_page(__pa(pent));
866 *pgcounter = NUM_LV2ENTRIES;
867 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
868 pgtable_flush(sent, sent + 1);
869 }
870
871 return page_entry(sent, iova);
872}
873
874static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
875{
876 if (lv1ent_section(sent))
877 return -EADDRINUSE;
878
879 if (lv1ent_page(sent)) {
880 if (*pgcnt != NUM_LV2ENTRIES)
881 return -EADDRINUSE;
882
883 kfree(page_entry(sent, 0));
884
885 *pgcnt = 0;
886 }
887
888 *sent = mk_lv1ent_sect(paddr);
889
890 pgtable_flush(sent, sent + 1);
891
892 return 0;
893}
894
895static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
896 short *pgcnt)
897{
898 if (size == SPAGE_SIZE) {
899 if (!lv2ent_fault(pent))
900 return -EADDRINUSE;
901
902 *pent = mk_lv2ent_spage(paddr);
903 pgtable_flush(pent, pent + 1);
904 *pgcnt -= 1;
905 } else { /* size == LPAGE_SIZE */
906 int i;
907 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
908 if (!lv2ent_fault(pent)) {
909 memset(pent, 0, sizeof(*pent) * i);
910 return -EADDRINUSE;
911 }
912
913 *pent = mk_lv2ent_lpage(paddr);
914 }
915 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
916 *pgcnt -= SPAGES_PER_LPAGE;
917 }
918
919 return 0;
920}
921
922static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
923 phys_addr_t paddr, size_t size, int prot)
924{
925 struct exynos_iommu_domain *priv = domain->priv;
926 unsigned long *entry;
927 unsigned long flags;
928 int ret = -ENOMEM;
929
930 BUG_ON(priv->pgtable == NULL);
931
932 spin_lock_irqsave(&priv->pgtablelock, flags);
933
934 entry = section_entry(priv->pgtable, iova);
935
936 if (size == SECT_SIZE) {
937 ret = lv1set_section(entry, paddr,
938 &priv->lv2entcnt[lv1ent_offset(iova)]);
939 } else {
940 unsigned long *pent;
941
942 pent = alloc_lv2entry(entry, iova,
943 &priv->lv2entcnt[lv1ent_offset(iova)]);
944
945 if (!pent)
946 ret = -ENOMEM;
947 else
948 ret = lv2set_page(pent, paddr, size,
949 &priv->lv2entcnt[lv1ent_offset(iova)]);
950 }
951
952 if (ret) {
953 pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
954 __func__, iova, size);
955 }
956
957 spin_unlock_irqrestore(&priv->pgtablelock, flags);
958
959 return ret;
960}
961
962static size_t exynos_iommu_unmap(struct iommu_domain *domain,
963 unsigned long iova, size_t size)
964{
965 struct exynos_iommu_domain *priv = domain->priv;
966 struct sysmmu_drvdata *data;
967 unsigned long flags;
968 unsigned long *ent;
969
970 BUG_ON(priv->pgtable == NULL);
971
972 spin_lock_irqsave(&priv->pgtablelock, flags);
973
974 ent = section_entry(priv->pgtable, iova);
975
976 if (lv1ent_section(ent)) {
977 BUG_ON(size < SECT_SIZE);
978
979 *ent = 0;
980 pgtable_flush(ent, ent + 1);
981 size = SECT_SIZE;
982 goto done;
983 }
984
985 if (unlikely(lv1ent_fault(ent))) {
986 if (size > SECT_SIZE)
987 size = SECT_SIZE;
988 goto done;
989 }
990
991 /* lv1ent_page(sent) == true here */
992
993 ent = page_entry(ent, iova);
994
995 if (unlikely(lv2ent_fault(ent))) {
996 size = SPAGE_SIZE;
997 goto done;
998 }
999
1000 if (lv2ent_small(ent)) {
1001 *ent = 0;
1002 size = SPAGE_SIZE;
1003 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
1004 goto done;
1005 }
1006
1007 /* lv1ent_large(ent) == true here */
1008 BUG_ON(size < LPAGE_SIZE);
1009
1010 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1011
1012 size = LPAGE_SIZE;
1013 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1014done:
1015 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1016
1017 spin_lock_irqsave(&priv->lock, flags);
1018 list_for_each_entry(data, &priv->clients, node)
1019 sysmmu_tlb_invalidate_entry(data->dev, iova);
1020 spin_unlock_irqrestore(&priv->lock, flags);
1021
1022
1023 return size;
1024}
1025
1026static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
1027 unsigned long iova)
1028{
1029 struct exynos_iommu_domain *priv = domain->priv;
1030 unsigned long *entry;
1031 unsigned long flags;
1032 phys_addr_t phys = 0;
1033
1034 spin_lock_irqsave(&priv->pgtablelock, flags);
1035
1036 entry = section_entry(priv->pgtable, iova);
1037
1038 if (lv1ent_section(entry)) {
1039 phys = section_phys(entry) + section_offs(iova);
1040 } else if (lv1ent_page(entry)) {
1041 entry = page_entry(entry, iova);
1042
1043 if (lv2ent_large(entry))
1044 phys = lpage_phys(entry) + lpage_offs(iova);
1045 else if (lv2ent_small(entry))
1046 phys = spage_phys(entry) + spage_offs(iova);
1047 }
1048
1049 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1050
1051 return phys;
1052}
1053
1054static struct iommu_ops exynos_iommu_ops = {
1055 .domain_init = &exynos_iommu_domain_init,
1056 .domain_destroy = &exynos_iommu_domain_destroy,
1057 .attach_dev = &exynos_iommu_attach_device,
1058 .detach_dev = &exynos_iommu_detach_device,
1059 .map = &exynos_iommu_map,
1060 .unmap = &exynos_iommu_unmap,
1061 .iova_to_phys = &exynos_iommu_iova_to_phys,
1062 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1063};
1064
1065static int __init exynos_iommu_init(void)
1066{
1067 int ret;
1068
1069 ret = platform_driver_register(&exynos_sysmmu_driver);
1070
1071 if (ret == 0)
1072 bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1073
1074 return ret;
1075}
1076subsys_initcall(exynos_iommu_init);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index a9fc714fb38d..9a7a60aeb19e 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1781,7 +1781,7 @@ static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
1781 pdata->slots[0].nonremovable = true; 1781 pdata->slots[0].nonremovable = true;
1782 pdata->slots[0].no_regulator_off_init = true; 1782 pdata->slots[0].no_regulator_off_init = true;
1783 } 1783 }
1784 of_property_read_u32(np, "ti,bus-width", &bus_width); 1784 of_property_read_u32(np, "bus-width", &bus_width);
1785 if (bus_width == 4) 1785 if (bus_width == 4)
1786 pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA; 1786 pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA;
1787 else if (bus_width == 8) 1787 else if (bus_width == 8)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d190d04636a7..365b16c230f8 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -404,7 +404,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
404 if (!np) 404 if (!np)
405 return -ENODEV; 405 return -ENODEV;
406 406
407 if (of_get_property(np, "fsl,card-wired", NULL)) 407 if (of_get_property(np, "non-removable", NULL))
408 boarddata->cd_type = ESDHC_CD_PERMANENT; 408 boarddata->cd_type = ESDHC_CD_PERMANENT;
409 409
410 if (of_get_property(np, "fsl,cd-controller", NULL)) 410 if (of_get_property(np, "fsl,cd-controller", NULL))
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index c5c2a48bdd94..d9a4ef4f1ed0 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -42,7 +42,8 @@ static struct sdhci_ops sdhci_pltfm_ops = {
42#ifdef CONFIG_OF 42#ifdef CONFIG_OF
43static bool sdhci_of_wp_inverted(struct device_node *np) 43static bool sdhci_of_wp_inverted(struct device_node *np)
44{ 44{
45 if (of_get_property(np, "sdhci,wp-inverted", NULL)) 45 if (of_get_property(np, "sdhci,wp-inverted", NULL) ||
46 of_get_property(np, "wp-inverted", NULL))
46 return true; 47 return true;
47 48
48 /* Old device trees don't have the wp-inverted property. */ 49 /* Old device trees don't have the wp-inverted property. */
@@ -59,13 +60,16 @@ void sdhci_get_of_property(struct platform_device *pdev)
59 struct sdhci_host *host = platform_get_drvdata(pdev); 60 struct sdhci_host *host = platform_get_drvdata(pdev);
60 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 61 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
61 const __be32 *clk; 62 const __be32 *clk;
63 u32 bus_width;
62 int size; 64 int size;
63 65
64 if (of_device_is_available(np)) { 66 if (of_device_is_available(np)) {
65 if (of_get_property(np, "sdhci,auto-cmd12", NULL)) 67 if (of_get_property(np, "sdhci,auto-cmd12", NULL))
66 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; 68 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
67 69
68 if (of_get_property(np, "sdhci,1-bit-only", NULL)) 70 if (of_get_property(np, "sdhci,1-bit-only", NULL) ||
71 (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
72 bus_width == 1))
69 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; 73 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
70 74
71 if (sdhci_of_wp_inverted(np)) 75 if (sdhci_of_wp_inverted(np))
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 4a44bf833611..68548236ec42 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -722,8 +722,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
722 } 722 }
723 } 723 }
724 724
725 tegra->phy = tegra_usb_phy_open(instance, hcd->regs, pdata->phy_config, 725 tegra->phy = tegra_usb_phy_open(&pdev->dev, instance, hcd->regs,
726 TEGRA_USB_PHY_MODE_HOST); 726 pdata->phy_config,
727 TEGRA_USB_PHY_MODE_HOST);
727 if (IS_ERR(tegra->phy)) { 728 if (IS_ERR(tegra->phy)) {
728 dev_err(&pdev->dev, "Failed to open USB phy\n"); 729 dev_err(&pdev->dev, "Failed to open USB phy\n");
729 err = -ENXIO; 730 err = -ENXIO;