aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/memory
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-09 19:57:56 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-09 19:57:56 -0500
commitfe78c54b4788b69bb2a8f157b524c933ea0c66d5 (patch)
tree2970629a2aa722a72765d10c67895f3748428dab /drivers/memory
parent6da314122ddc11936c6f054753bbb956a499d020 (diff)
parent1306b20daa38c1429dabacc9ec8b437cb585e427 (diff)
Merge tag 'omap-gpmc-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC/OMAP GPMC driver cleanup and move from Arnd Bergmann: "The GPMC driver has traditionally been considered a part of the OMAP platform code and tightly interweaved with some of the boards. With this cleanup, it has finally come to the point where it makes sense to move it out of arch/arm into drivers/memory, where we already have other drivers for similar hardware. The cleanups are still ongoing, with the goal of eventually having a standalone driver that does not require an interface to architecture code. This is a separate branch because of dependencies on multiple other branches, and to keep the drivers changes separate from the normal cleanups" * tag 'omap-gpmc-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: memory: gpmc: Move omap gpmc code to live under drivers ARM: OMAP2+: Move GPMC initcall to devices.c ARM: OMAP2+: Prepare to move GPMC to drivers by platform data header ARM: OMAP2+: Remove unnecesary include in GPMC driver ARM: OMAP2+: Drop board file for 3430sdp ARM: OMAP2+: Drop board file for ti8168evm ARM: OMAP2+: Drop legacy code for gpmc-smc91x.c ARM: OMAP2+: Require proper GPMC timings for devices ARM: OMAP2+: Show bootloader GPMC timings to allow configuring the .dts file ARM: OMAP2+: Fix support for multiple devices on a GPMC chip select ARM: OMAP2+: gpmc: Sanity check GPMC fck on probe ARM: OMAP2+: gpmc: Keep Chip Select disabled while configuring it ARM: OMAP2+: gpmc: Always enable A26-A11 for non NAND devices ARM: OMAP2+: gpmc: Error out if timings fail in gpmc_probe_generic_child() ARM: OMAP2+: gpmc: Print error message in set_gpmc_timing_reg()
Diffstat (limited to 'drivers/memory')
-rw-r--r--drivers/memory/Kconfig8
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/omap-gpmc.c2092
3 files changed, 2101 insertions, 0 deletions
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 08bd4cfca2a4..191383d8c94d 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -41,6 +41,14 @@ config TI_EMIF
41 parameters and other settings during frequency, voltage and 41 parameters and other settings during frequency, voltage and
42 temperature changes 42 temperature changes
43 43
44config OMAP_GPMC
45 bool
46 help
47 This driver is for the General Purpose Memory Controller (GPMC)
48 present on Texas Instruments SoCs (e.g. OMAP2+). GPMC allows
49 interfacing to a variety of asynchronous as well as synchronous
50 memory drives like NOR, NAND, OneNAND, SRAM.
51
44config MVEBU_DEVBUS 52config MVEBU_DEVBUS
45 bool "Marvell EBU Device Bus Controller" 53 bool "Marvell EBU Device Bus Controller"
46 default y 54 default y
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index ad98bb232623..6b6548124473 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -8,6 +8,7 @@ endif
8obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o 8obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o
9obj-$(CONFIG_TI_AEMIF) += ti-aemif.o 9obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
10obj-$(CONFIG_TI_EMIF) += emif.o 10obj-$(CONFIG_TI_EMIF) += emif.o
11obj-$(CONFIG_OMAP_GPMC) += omap-gpmc.o
11obj-$(CONFIG_FSL_CORENET_CF) += fsl-corenet-cf.o 12obj-$(CONFIG_FSL_CORENET_CF) += fsl-corenet-cf.o
12obj-$(CONFIG_FSL_IFC) += fsl_ifc.o 13obj-$(CONFIG_FSL_IFC) += fsl_ifc.o
13obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o 14obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
new file mode 100644
index 000000000000..ffc5e60c0664
--- /dev/null
+++ b/drivers/memory/omap-gpmc.c
@@ -0,0 +1,2092 @@
1/*
2 * GPMC support functions
3 *
4 * Copyright (C) 2005-2006 Nokia Corporation
5 *
6 * Author: Juha Yrjola
7 *
8 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15#undef DEBUG
16
17#include <linux/irq.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/err.h>
21#include <linux/clk.h>
22#include <linux/ioport.h>
23#include <linux/spinlock.h>
24#include <linux/io.h>
25#include <linux/module.h>
26#include <linux/interrupt.h>
27#include <linux/platform_device.h>
28#include <linux/of.h>
29#include <linux/of_address.h>
30#include <linux/of_mtd.h>
31#include <linux/of_device.h>
32#include <linux/omap-gpmc.h>
33#include <linux/mtd/nand.h>
34#include <linux/pm_runtime.h>
35
36#include <linux/platform_data/mtd-nand-omap2.h>
37#include <linux/platform_data/mtd-onenand-omap2.h>
38
39#include <asm/mach-types.h>
40
41#define DEVICE_NAME "omap-gpmc"
42
43/* GPMC register offsets */
44#define GPMC_REVISION 0x00
45#define GPMC_SYSCONFIG 0x10
46#define GPMC_SYSSTATUS 0x14
47#define GPMC_IRQSTATUS 0x18
48#define GPMC_IRQENABLE 0x1c
49#define GPMC_TIMEOUT_CONTROL 0x40
50#define GPMC_ERR_ADDRESS 0x44
51#define GPMC_ERR_TYPE 0x48
52#define GPMC_CONFIG 0x50
53#define GPMC_STATUS 0x54
54#define GPMC_PREFETCH_CONFIG1 0x1e0
55#define GPMC_PREFETCH_CONFIG2 0x1e4
56#define GPMC_PREFETCH_CONTROL 0x1ec
57#define GPMC_PREFETCH_STATUS 0x1f0
58#define GPMC_ECC_CONFIG 0x1f4
59#define GPMC_ECC_CONTROL 0x1f8
60#define GPMC_ECC_SIZE_CONFIG 0x1fc
61#define GPMC_ECC1_RESULT 0x200
62#define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */
63#define GPMC_ECC_BCH_RESULT_1 0x244 /* not available on OMAP2 */
64#define GPMC_ECC_BCH_RESULT_2 0x248 /* not available on OMAP2 */
65#define GPMC_ECC_BCH_RESULT_3 0x24c /* not available on OMAP2 */
66#define GPMC_ECC_BCH_RESULT_4 0x300 /* not available on OMAP2 */
67#define GPMC_ECC_BCH_RESULT_5 0x304 /* not available on OMAP2 */
68#define GPMC_ECC_BCH_RESULT_6 0x308 /* not available on OMAP2 */
69
70/* GPMC ECC control settings */
71#define GPMC_ECC_CTRL_ECCCLEAR 0x100
72#define GPMC_ECC_CTRL_ECCDISABLE 0x000
73#define GPMC_ECC_CTRL_ECCREG1 0x001
74#define GPMC_ECC_CTRL_ECCREG2 0x002
75#define GPMC_ECC_CTRL_ECCREG3 0x003
76#define GPMC_ECC_CTRL_ECCREG4 0x004
77#define GPMC_ECC_CTRL_ECCREG5 0x005
78#define GPMC_ECC_CTRL_ECCREG6 0x006
79#define GPMC_ECC_CTRL_ECCREG7 0x007
80#define GPMC_ECC_CTRL_ECCREG8 0x008
81#define GPMC_ECC_CTRL_ECCREG9 0x009
82
83#define GPMC_CONFIG_LIMITEDADDRESS BIT(1)
84
85#define GPMC_CONFIG2_CSEXTRADELAY BIT(7)
86#define GPMC_CONFIG3_ADVEXTRADELAY BIT(7)
87#define GPMC_CONFIG4_OEEXTRADELAY BIT(7)
88#define GPMC_CONFIG4_WEEXTRADELAY BIT(23)
89#define GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN BIT(6)
90#define GPMC_CONFIG6_CYCLE2CYCLESAMECSEN BIT(7)
91
92#define GPMC_CS0_OFFSET 0x60
93#define GPMC_CS_SIZE 0x30
94#define GPMC_BCH_SIZE 0x10
95
96#define GPMC_MEM_END 0x3FFFFFFF
97
98#define GPMC_CHUNK_SHIFT 24 /* 16 MB */
99#define GPMC_SECTION_SHIFT 28 /* 128 MB */
100
101#define CS_NUM_SHIFT 24
102#define ENABLE_PREFETCH (0x1 << 7)
103#define DMA_MPU_MODE 2
104
105#define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf)
106#define GPMC_REVISION_MINOR(l) (l & 0xf)
107
108#define GPMC_HAS_WR_ACCESS 0x1
109#define GPMC_HAS_WR_DATA_MUX_BUS 0x2
110#define GPMC_HAS_MUX_AAD 0x4
111
112#define GPMC_NR_WAITPINS 4
113
114#define GPMC_CS_CONFIG1 0x00
115#define GPMC_CS_CONFIG2 0x04
116#define GPMC_CS_CONFIG3 0x08
117#define GPMC_CS_CONFIG4 0x0c
118#define GPMC_CS_CONFIG5 0x10
119#define GPMC_CS_CONFIG6 0x14
120#define GPMC_CS_CONFIG7 0x18
121#define GPMC_CS_NAND_COMMAND 0x1c
122#define GPMC_CS_NAND_ADDRESS 0x20
123#define GPMC_CS_NAND_DATA 0x24
124
125/* Control Commands */
126#define GPMC_CONFIG_RDY_BSY 0x00000001
127#define GPMC_CONFIG_DEV_SIZE 0x00000002
128#define GPMC_CONFIG_DEV_TYPE 0x00000003
129#define GPMC_SET_IRQ_STATUS 0x00000004
130
131#define GPMC_CONFIG1_WRAPBURST_SUPP (1 << 31)
132#define GPMC_CONFIG1_READMULTIPLE_SUPP (1 << 30)
133#define GPMC_CONFIG1_READTYPE_ASYNC (0 << 29)
134#define GPMC_CONFIG1_READTYPE_SYNC (1 << 29)
135#define GPMC_CONFIG1_WRITEMULTIPLE_SUPP (1 << 28)
136#define GPMC_CONFIG1_WRITETYPE_ASYNC (0 << 27)
137#define GPMC_CONFIG1_WRITETYPE_SYNC (1 << 27)
138#define GPMC_CONFIG1_CLKACTIVATIONTIME(val) ((val & 3) << 25)
139#define GPMC_CONFIG1_PAGE_LEN(val) ((val & 3) << 23)
140#define GPMC_CONFIG1_WAIT_READ_MON (1 << 22)
141#define GPMC_CONFIG1_WAIT_WRITE_MON (1 << 21)
142#define GPMC_CONFIG1_WAIT_MON_IIME(val) ((val & 3) << 18)
143#define GPMC_CONFIG1_WAIT_PIN_SEL(val) ((val & 3) << 16)
144#define GPMC_CONFIG1_DEVICESIZE(val) ((val & 3) << 12)
145#define GPMC_CONFIG1_DEVICESIZE_16 GPMC_CONFIG1_DEVICESIZE(1)
146#define GPMC_CONFIG1_DEVICETYPE(val) ((val & 3) << 10)
147#define GPMC_CONFIG1_DEVICETYPE_NOR GPMC_CONFIG1_DEVICETYPE(0)
148#define GPMC_CONFIG1_MUXTYPE(val) ((val & 3) << 8)
149#define GPMC_CONFIG1_TIME_PARA_GRAN (1 << 4)
150#define GPMC_CONFIG1_FCLK_DIV(val) (val & 3)
151#define GPMC_CONFIG1_FCLK_DIV2 (GPMC_CONFIG1_FCLK_DIV(1))
152#define GPMC_CONFIG1_FCLK_DIV3 (GPMC_CONFIG1_FCLK_DIV(2))
153#define GPMC_CONFIG1_FCLK_DIV4 (GPMC_CONFIG1_FCLK_DIV(3))
154#define GPMC_CONFIG7_CSVALID (1 << 6)
155
156#define GPMC_DEVICETYPE_NOR 0
157#define GPMC_DEVICETYPE_NAND 2
158#define GPMC_CONFIG_WRITEPROTECT 0x00000010
159#define WR_RD_PIN_MONITORING 0x00600000
160
161#define GPMC_ENABLE_IRQ 0x0000000d
162
163/* ECC commands */
164#define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */
165#define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */
166#define GPMC_ECC_READSYN 2 /* Reset before syndrom is read back */
167
168/* XXX: Only NAND irq has been considered,currently these are the only ones used
169 */
170#define GPMC_NR_IRQ 2
171
172struct gpmc_cs_data {
173 const char *name;
174
175#define GPMC_CS_RESERVED (1 << 0)
176 u32 flags;
177
178 struct resource mem;
179};
180
181struct gpmc_client_irq {
182 unsigned irq;
183 u32 bitmask;
184};
185
186/* Structure to save gpmc cs context */
187struct gpmc_cs_config {
188 u32 config1;
189 u32 config2;
190 u32 config3;
191 u32 config4;
192 u32 config5;
193 u32 config6;
194 u32 config7;
195 int is_valid;
196};
197
198/*
199 * Structure to save/restore gpmc context
200 * to support core off on OMAP3
201 */
202struct omap3_gpmc_regs {
203 u32 sysconfig;
204 u32 irqenable;
205 u32 timeout_ctrl;
206 u32 config;
207 u32 prefetch_config1;
208 u32 prefetch_config2;
209 u32 prefetch_control;
210 struct gpmc_cs_config cs_context[GPMC_CS_NUM];
211};
212
213static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
214static struct irq_chip gpmc_irq_chip;
215static int gpmc_irq_start;
216
217static struct resource gpmc_mem_root;
218static struct gpmc_cs_data gpmc_cs[GPMC_CS_NUM];
219static DEFINE_SPINLOCK(gpmc_mem_lock);
220/* Define chip-selects as reserved by default until probe completes */
221static unsigned int gpmc_cs_num = GPMC_CS_NUM;
222static unsigned int gpmc_nr_waitpins;
223static struct device *gpmc_dev;
224static int gpmc_irq;
225static resource_size_t phys_base, mem_size;
226static unsigned gpmc_capability;
227static void __iomem *gpmc_base;
228
229static struct clk *gpmc_l3_clk;
230
231static irqreturn_t gpmc_handle_irq(int irq, void *dev);
232
233static void gpmc_write_reg(int idx, u32 val)
234{
235 writel_relaxed(val, gpmc_base + idx);
236}
237
238static u32 gpmc_read_reg(int idx)
239{
240 return readl_relaxed(gpmc_base + idx);
241}
242
243void gpmc_cs_write_reg(int cs, int idx, u32 val)
244{
245 void __iomem *reg_addr;
246
247 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
248 writel_relaxed(val, reg_addr);
249}
250
251static u32 gpmc_cs_read_reg(int cs, int idx)
252{
253 void __iomem *reg_addr;
254
255 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
256 return readl_relaxed(reg_addr);
257}
258
259/* TODO: Add support for gpmc_fck to clock framework and use it */
260static unsigned long gpmc_get_fclk_period(void)
261{
262 unsigned long rate = clk_get_rate(gpmc_l3_clk);
263
264 rate /= 1000;
265 rate = 1000000000 / rate; /* In picoseconds */
266
267 return rate;
268}
269
270static unsigned int gpmc_ns_to_ticks(unsigned int time_ns)
271{
272 unsigned long tick_ps;
273
274 /* Calculate in picosecs to yield more exact results */
275 tick_ps = gpmc_get_fclk_period();
276
277 return (time_ns * 1000 + tick_ps - 1) / tick_ps;
278}
279
280static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
281{
282 unsigned long tick_ps;
283
284 /* Calculate in picosecs to yield more exact results */
285 tick_ps = gpmc_get_fclk_period();
286
287 return (time_ps + tick_ps - 1) / tick_ps;
288}
289
290unsigned int gpmc_ticks_to_ns(unsigned int ticks)
291{
292 return ticks * gpmc_get_fclk_period() / 1000;
293}
294
295static unsigned int gpmc_ticks_to_ps(unsigned int ticks)
296{
297 return ticks * gpmc_get_fclk_period();
298}
299
300static unsigned int gpmc_round_ps_to_ticks(unsigned int time_ps)
301{
302 unsigned long ticks = gpmc_ps_to_ticks(time_ps);
303
304 return ticks * gpmc_get_fclk_period();
305}
306
307static inline void gpmc_cs_modify_reg(int cs, int reg, u32 mask, bool value)
308{
309 u32 l;
310
311 l = gpmc_cs_read_reg(cs, reg);
312 if (value)
313 l |= mask;
314 else
315 l &= ~mask;
316 gpmc_cs_write_reg(cs, reg, l);
317}
318
319static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
320{
321 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG1,
322 GPMC_CONFIG1_TIME_PARA_GRAN,
323 p->time_para_granularity);
324 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG2,
325 GPMC_CONFIG2_CSEXTRADELAY, p->cs_extra_delay);
326 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG3,
327 GPMC_CONFIG3_ADVEXTRADELAY, p->adv_extra_delay);
328 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
329 GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
330 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
331 GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
332 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
333 GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
334 p->cycle2cyclesamecsen);
335 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
336 GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN,
337 p->cycle2cyclediffcsen);
338}
339
340#ifdef DEBUG
341static int get_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
342 bool raw, bool noval, int shift,
343 const char *name)
344{
345 u32 l;
346 int nr_bits, max_value, mask;
347
348 l = gpmc_cs_read_reg(cs, reg);
349 nr_bits = end_bit - st_bit + 1;
350 max_value = (1 << nr_bits) - 1;
351 mask = max_value << st_bit;
352 l = (l & mask) >> st_bit;
353 if (shift)
354 l = (shift << l);
355 if (noval && (l == 0))
356 return 0;
357 if (!raw) {
358 unsigned int time_ns_min, time_ns, time_ns_max;
359
360 time_ns_min = gpmc_ticks_to_ns(l ? l - 1 : 0);
361 time_ns = gpmc_ticks_to_ns(l);
362 time_ns_max = gpmc_ticks_to_ns(l + 1 > max_value ?
363 max_value : l + 1);
364 pr_info("gpmc,%s = <%u> (%u - %u ns, %i ticks)\n",
365 name, time_ns, time_ns_min, time_ns_max, l);
366 } else {
367 pr_info("gpmc,%s = <%u>\n", name, l);
368 }
369
370 return l;
371}
372
373#define GPMC_PRINT_CONFIG(cs, config) \
374 pr_info("cs%i %s: 0x%08x\n", cs, #config, \
375 gpmc_cs_read_reg(cs, config))
376#define GPMC_GET_RAW(reg, st, end, field) \
377 get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 0, 0, field)
378#define GPMC_GET_RAW_BOOL(reg, st, end, field) \
379 get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 1, 0, field)
380#define GPMC_GET_RAW_SHIFT(reg, st, end, shift, field) \
381 get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 1, (shift), field)
382#define GPMC_GET_TICKS(reg, st, end, field) \
383 get_gpmc_timing_reg(cs, (reg), (st), (end), 0, 0, 0, field)
384
385static void gpmc_show_regs(int cs, const char *desc)
386{
387 pr_info("gpmc cs%i %s:\n", cs, desc);
388 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG1);
389 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG2);
390 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG3);
391 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG4);
392 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG5);
393 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG6);
394}
395
396/*
397 * Note that gpmc,wait-pin handing wrongly assumes bit 8 is available,
398 * see commit c9fb809.
399 */
400static void gpmc_cs_show_timings(int cs, const char *desc)
401{
402 gpmc_show_regs(cs, desc);
403
404 pr_info("gpmc cs%i access configuration:\n", cs);
405 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity");
406 GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data");
407 GPMC_GET_RAW(GPMC_CS_CONFIG1, 12, 13, "device-width");
408 GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin");
409 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write");
410 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 22, 22, "wait-on-read");
411 GPMC_GET_RAW_SHIFT(GPMC_CS_CONFIG1, 23, 24, 4, "burst-length");
412 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 27, 27, "sync-write");
413 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 28, 28, "burst-write");
414 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 29, 29, "gpmc,sync-read");
415 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 30, 30, "burst-read");
416 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 31, 31, "burst-wrap");
417
418 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG2, 7, 7, "cs-extra-delay");
419
420 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG3, 7, 7, "adv-extra-delay");
421
422 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4, 23, 23, "we-extra-delay");
423 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4, 7, 7, "oe-extra-delay");
424
425 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6, 7, 7, "cycle2cycle-samecsen");
426 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6, 6, 6, "cycle2cycle-diffcsen");
427
428 pr_info("gpmc cs%i timings configuration:\n", cs);
429 GPMC_GET_TICKS(GPMC_CS_CONFIG2, 0, 3, "cs-on-ns");
430 GPMC_GET_TICKS(GPMC_CS_CONFIG2, 8, 12, "cs-rd-off-ns");
431 GPMC_GET_TICKS(GPMC_CS_CONFIG2, 16, 20, "cs-wr-off-ns");
432
433 GPMC_GET_TICKS(GPMC_CS_CONFIG3, 0, 3, "adv-on-ns");
434 GPMC_GET_TICKS(GPMC_CS_CONFIG3, 8, 12, "adv-rd-off-ns");
435 GPMC_GET_TICKS(GPMC_CS_CONFIG3, 16, 20, "adv-wr-off-ns");
436
437 GPMC_GET_TICKS(GPMC_CS_CONFIG4, 0, 3, "oe-on-ns");
438 GPMC_GET_TICKS(GPMC_CS_CONFIG4, 8, 12, "oe-off-ns");
439 GPMC_GET_TICKS(GPMC_CS_CONFIG4, 16, 19, "we-on-ns");
440 GPMC_GET_TICKS(GPMC_CS_CONFIG4, 24, 28, "we-off-ns");
441
442 GPMC_GET_TICKS(GPMC_CS_CONFIG5, 0, 4, "rd-cycle-ns");
443 GPMC_GET_TICKS(GPMC_CS_CONFIG5, 8, 12, "wr-cycle-ns");
444 GPMC_GET_TICKS(GPMC_CS_CONFIG5, 16, 20, "access-ns");
445
446 GPMC_GET_TICKS(GPMC_CS_CONFIG5, 24, 27, "page-burst-access-ns");
447
448 GPMC_GET_TICKS(GPMC_CS_CONFIG6, 0, 3, "bus-turnaround-ns");
449 GPMC_GET_TICKS(GPMC_CS_CONFIG6, 8, 11, "cycle2cycle-delay-ns");
450
451 GPMC_GET_TICKS(GPMC_CS_CONFIG1, 18, 19, "wait-monitoring-ns");
452 GPMC_GET_TICKS(GPMC_CS_CONFIG1, 25, 26, "clk-activation-ns");
453
454 GPMC_GET_TICKS(GPMC_CS_CONFIG6, 16, 19, "wr-data-mux-bus-ns");
455 GPMC_GET_TICKS(GPMC_CS_CONFIG6, 24, 28, "wr-access-ns");
456}
457#else
458static inline void gpmc_cs_show_timings(int cs, const char *desc)
459{
460}
461#endif
462
463static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
464 int time, const char *name)
465{
466 u32 l;
467 int ticks, mask, nr_bits;
468
469 if (time == 0)
470 ticks = 0;
471 else
472 ticks = gpmc_ns_to_ticks(time);
473 nr_bits = end_bit - st_bit + 1;
474 mask = (1 << nr_bits) - 1;
475
476 if (ticks > mask) {
477 pr_err("%s: GPMC error! CS%d: %s: %d ns, %d ticks > %d\n",
478 __func__, cs, name, time, ticks, mask);
479
480 return -1;
481 }
482
483 l = gpmc_cs_read_reg(cs, reg);
484#ifdef DEBUG
485 printk(KERN_INFO
486 "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
487 cs, name, ticks, gpmc_get_fclk_period() * ticks / 1000,
488 (l >> st_bit) & mask, time);
489#endif
490 l &= ~(mask << st_bit);
491 l |= ticks << st_bit;
492 gpmc_cs_write_reg(cs, reg, l);
493
494 return 0;
495}
496
497#define GPMC_SET_ONE(reg, st, end, field) \
498 if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
499 t->field, #field) < 0) \
500 return -1
501
502int gpmc_calc_divider(unsigned int sync_clk)
503{
504 int div;
505 u32 l;
506
507 l = sync_clk + (gpmc_get_fclk_period() - 1);
508 div = l / gpmc_get_fclk_period();
509 if (div > 4)
510 return -1;
511 if (div <= 0)
512 div = 1;
513
514 return div;
515}
516
517int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t)
518{
519 int div;
520 u32 l;
521
522 gpmc_cs_show_timings(cs, "before gpmc_cs_set_timings");
523 div = gpmc_calc_divider(t->sync_clk);
524 if (div < 0)
525 return div;
526
527 GPMC_SET_ONE(GPMC_CS_CONFIG2, 0, 3, cs_on);
528 GPMC_SET_ONE(GPMC_CS_CONFIG2, 8, 12, cs_rd_off);
529 GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off);
530
531 GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on);
532 GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off);
533 GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off);
534
535 GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on);
536 GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off);
537 GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on);
538 GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off);
539
540 GPMC_SET_ONE(GPMC_CS_CONFIG5, 0, 4, rd_cycle);
541 GPMC_SET_ONE(GPMC_CS_CONFIG5, 8, 12, wr_cycle);
542 GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access);
543
544 GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access);
545
546 GPMC_SET_ONE(GPMC_CS_CONFIG6, 0, 3, bus_turnaround);
547 GPMC_SET_ONE(GPMC_CS_CONFIG6, 8, 11, cycle2cycle_delay);
548
549 GPMC_SET_ONE(GPMC_CS_CONFIG1, 18, 19, wait_monitoring);
550 GPMC_SET_ONE(GPMC_CS_CONFIG1, 25, 26, clk_activation);
551
552 if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
553 GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus);
554 if (gpmc_capability & GPMC_HAS_WR_ACCESS)
555 GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access);
556
557 /* caller is expected to have initialized CONFIG1 to cover
558 * at least sync vs async
559 */
560 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
561 if (l & (GPMC_CONFIG1_READTYPE_SYNC | GPMC_CONFIG1_WRITETYPE_SYNC)) {
562#ifdef DEBUG
563 printk(KERN_INFO "GPMC CS%d CLK period is %lu ns (div %d)\n",
564 cs, (div * gpmc_get_fclk_period()) / 1000, div);
565#endif
566 l &= ~0x03;
567 l |= (div - 1);
568 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
569 }
570
571 gpmc_cs_bool_timings(cs, &t->bool_timings);
572 gpmc_cs_show_timings(cs, "after gpmc_cs_set_timings");
573
574 return 0;
575}
576
577static int gpmc_cs_set_memconf(int cs, u32 base, u32 size)
578{
579 u32 l;
580 u32 mask;
581
582 /*
583 * Ensure that base address is aligned on a
584 * boundary equal to or greater than size.
585 */
586 if (base & (size - 1))
587 return -EINVAL;
588
589 mask = (1 << GPMC_SECTION_SHIFT) - size;
590 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
591 l &= ~0x3f;
592 l = (base >> GPMC_CHUNK_SHIFT) & 0x3f;
593 l &= ~(0x0f << 8);
594 l |= ((mask >> GPMC_CHUNK_SHIFT) & 0x0f) << 8;
595 l |= GPMC_CONFIG7_CSVALID;
596 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
597
598 return 0;
599}
600
601static void gpmc_cs_enable_mem(int cs)
602{
603 u32 l;
604
605 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
606 l |= GPMC_CONFIG7_CSVALID;
607 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
608}
609
610static void gpmc_cs_disable_mem(int cs)
611{
612 u32 l;
613
614 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
615 l &= ~GPMC_CONFIG7_CSVALID;
616 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
617}
618
619static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size)
620{
621 u32 l;
622 u32 mask;
623
624 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
625 *base = (l & 0x3f) << GPMC_CHUNK_SHIFT;
626 mask = (l >> 8) & 0x0f;
627 *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT);
628}
629
630static int gpmc_cs_mem_enabled(int cs)
631{
632 u32 l;
633
634 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
635 return l & GPMC_CONFIG7_CSVALID;
636}
637
638static void gpmc_cs_set_reserved(int cs, int reserved)
639{
640 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
641
642 gpmc->flags |= GPMC_CS_RESERVED;
643}
644
645static bool gpmc_cs_reserved(int cs)
646{
647 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
648
649 return gpmc->flags & GPMC_CS_RESERVED;
650}
651
652static void gpmc_cs_set_name(int cs, const char *name)
653{
654 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
655
656 gpmc->name = name;
657}
658
659const char *gpmc_cs_get_name(int cs)
660{
661 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
662
663 return gpmc->name;
664}
665
666static unsigned long gpmc_mem_align(unsigned long size)
667{
668 int order;
669
670 size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1);
671 order = GPMC_CHUNK_SHIFT - 1;
672 do {
673 size >>= 1;
674 order++;
675 } while (size);
676 size = 1 << order;
677 return size;
678}
679
680static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size)
681{
682 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
683 struct resource *res = &gpmc->mem;
684 int r;
685
686 size = gpmc_mem_align(size);
687 spin_lock(&gpmc_mem_lock);
688 res->start = base;
689 res->end = base + size - 1;
690 r = request_resource(&gpmc_mem_root, res);
691 spin_unlock(&gpmc_mem_lock);
692
693 return r;
694}
695
696static int gpmc_cs_delete_mem(int cs)
697{
698 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
699 struct resource *res = &gpmc->mem;
700 int r;
701
702 spin_lock(&gpmc_mem_lock);
703 r = release_resource(res);
704 res->start = 0;
705 res->end = 0;
706 spin_unlock(&gpmc_mem_lock);
707
708 return r;
709}
710
711/**
712 * gpmc_cs_remap - remaps a chip-select physical base address
713 * @cs: chip-select to remap
714 * @base: physical base address to re-map chip-select to
715 *
716 * Re-maps a chip-select to a new physical base address specified by
717 * "base". Returns 0 on success and appropriate negative error code
718 * on failure.
719 */
720static int gpmc_cs_remap(int cs, u32 base)
721{
722 int ret;
723 u32 old_base, size;
724
725 if (cs > gpmc_cs_num) {
726 pr_err("%s: requested chip-select is disabled\n", __func__);
727 return -ENODEV;
728 }
729
730 /*
731 * Make sure we ignore any device offsets from the GPMC partition
732 * allocated for the chip select and that the new base confirms
733 * to the GPMC 16MB minimum granularity.
734 */
735 base &= ~(SZ_16M - 1);
736
737 gpmc_cs_get_memconf(cs, &old_base, &size);
738 if (base == old_base)
739 return 0;
740
741 ret = gpmc_cs_delete_mem(cs);
742 if (ret < 0)
743 return ret;
744
745 ret = gpmc_cs_insert_mem(cs, base, size);
746 if (ret < 0)
747 return ret;
748
749 ret = gpmc_cs_set_memconf(cs, base, size);
750
751 return ret;
752}
753
754int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
755{
756 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
757 struct resource *res = &gpmc->mem;
758 int r = -1;
759
760 if (cs > gpmc_cs_num) {
761 pr_err("%s: requested chip-select is disabled\n", __func__);
762 return -ENODEV;
763 }
764 size = gpmc_mem_align(size);
765 if (size > (1 << GPMC_SECTION_SHIFT))
766 return -ENOMEM;
767
768 spin_lock(&gpmc_mem_lock);
769 if (gpmc_cs_reserved(cs)) {
770 r = -EBUSY;
771 goto out;
772 }
773 if (gpmc_cs_mem_enabled(cs))
774 r = adjust_resource(res, res->start & ~(size - 1), size);
775 if (r < 0)
776 r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0,
777 size, NULL, NULL);
778 if (r < 0)
779 goto out;
780
781 /* Disable CS while changing base address and size mask */
782 gpmc_cs_disable_mem(cs);
783
784 r = gpmc_cs_set_memconf(cs, res->start, resource_size(res));
785 if (r < 0) {
786 release_resource(res);
787 goto out;
788 }
789
790 /* Enable CS */
791 gpmc_cs_enable_mem(cs);
792 *base = res->start;
793 gpmc_cs_set_reserved(cs, 1);
794out:
795 spin_unlock(&gpmc_mem_lock);
796 return r;
797}
798EXPORT_SYMBOL(gpmc_cs_request);
799
800void gpmc_cs_free(int cs)
801{
802 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
803 struct resource *res = &gpmc->mem;
804
805 spin_lock(&gpmc_mem_lock);
806 if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
807 printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
808 BUG();
809 spin_unlock(&gpmc_mem_lock);
810 return;
811 }
812 gpmc_cs_disable_mem(cs);
813 if (res->flags)
814 release_resource(res);
815 gpmc_cs_set_reserved(cs, 0);
816 spin_unlock(&gpmc_mem_lock);
817}
818EXPORT_SYMBOL(gpmc_cs_free);
819
820/**
821 * gpmc_configure - write request to configure gpmc
822 * @cmd: command type
823 * @wval: value to write
824 * @return status of the operation
825 */
826int gpmc_configure(int cmd, int wval)
827{
828 u32 regval;
829
830 switch (cmd) {
831 case GPMC_ENABLE_IRQ:
832 gpmc_write_reg(GPMC_IRQENABLE, wval);
833 break;
834
835 case GPMC_SET_IRQ_STATUS:
836 gpmc_write_reg(GPMC_IRQSTATUS, wval);
837 break;
838
839 case GPMC_CONFIG_WP:
840 regval = gpmc_read_reg(GPMC_CONFIG);
841 if (wval)
842 regval &= ~GPMC_CONFIG_WRITEPROTECT; /* WP is ON */
843 else
844 regval |= GPMC_CONFIG_WRITEPROTECT; /* WP is OFF */
845 gpmc_write_reg(GPMC_CONFIG, regval);
846 break;
847
848 default:
849 pr_err("%s: command not supported\n", __func__);
850 return -EINVAL;
851 }
852
853 return 0;
854}
855EXPORT_SYMBOL(gpmc_configure);
856
857void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
858{
859 int i;
860
861 reg->gpmc_status = gpmc_base + GPMC_STATUS;
862 reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET +
863 GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs;
864 reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET +
865 GPMC_CS_NAND_ADDRESS + GPMC_CS_SIZE * cs;
866 reg->gpmc_nand_data = gpmc_base + GPMC_CS0_OFFSET +
867 GPMC_CS_NAND_DATA + GPMC_CS_SIZE * cs;
868 reg->gpmc_prefetch_config1 = gpmc_base + GPMC_PREFETCH_CONFIG1;
869 reg->gpmc_prefetch_config2 = gpmc_base + GPMC_PREFETCH_CONFIG2;
870 reg->gpmc_prefetch_control = gpmc_base + GPMC_PREFETCH_CONTROL;
871 reg->gpmc_prefetch_status = gpmc_base + GPMC_PREFETCH_STATUS;
872 reg->gpmc_ecc_config = gpmc_base + GPMC_ECC_CONFIG;
873 reg->gpmc_ecc_control = gpmc_base + GPMC_ECC_CONTROL;
874 reg->gpmc_ecc_size_config = gpmc_base + GPMC_ECC_SIZE_CONFIG;
875 reg->gpmc_ecc1_result = gpmc_base + GPMC_ECC1_RESULT;
876
877 for (i = 0; i < GPMC_BCH_NUM_REMAINDER; i++) {
878 reg->gpmc_bch_result0[i] = gpmc_base + GPMC_ECC_BCH_RESULT_0 +
879 GPMC_BCH_SIZE * i;
880 reg->gpmc_bch_result1[i] = gpmc_base + GPMC_ECC_BCH_RESULT_1 +
881 GPMC_BCH_SIZE * i;
882 reg->gpmc_bch_result2[i] = gpmc_base + GPMC_ECC_BCH_RESULT_2 +
883 GPMC_BCH_SIZE * i;
884 reg->gpmc_bch_result3[i] = gpmc_base + GPMC_ECC_BCH_RESULT_3 +
885 GPMC_BCH_SIZE * i;
886 reg->gpmc_bch_result4[i] = gpmc_base + GPMC_ECC_BCH_RESULT_4 +
887 i * GPMC_BCH_SIZE;
888 reg->gpmc_bch_result5[i] = gpmc_base + GPMC_ECC_BCH_RESULT_5 +
889 i * GPMC_BCH_SIZE;
890 reg->gpmc_bch_result6[i] = gpmc_base + GPMC_ECC_BCH_RESULT_6 +
891 i * GPMC_BCH_SIZE;
892 }
893}
894
895int gpmc_get_client_irq(unsigned irq_config)
896{
897 int i;
898
899 if (hweight32(irq_config) > 1)
900 return 0;
901
902 for (i = 0; i < GPMC_NR_IRQ; i++)
903 if (gpmc_client_irq[i].bitmask & irq_config)
904 return gpmc_client_irq[i].irq;
905
906 return 0;
907}
908
909static int gpmc_irq_endis(unsigned irq, bool endis)
910{
911 int i;
912 u32 regval;
913
914 for (i = 0; i < GPMC_NR_IRQ; i++)
915 if (irq == gpmc_client_irq[i].irq) {
916 regval = gpmc_read_reg(GPMC_IRQENABLE);
917 if (endis)
918 regval |= gpmc_client_irq[i].bitmask;
919 else
920 regval &= ~gpmc_client_irq[i].bitmask;
921 gpmc_write_reg(GPMC_IRQENABLE, regval);
922 break;
923 }
924
925 return 0;
926}
927
928static void gpmc_irq_disable(struct irq_data *p)
929{
930 gpmc_irq_endis(p->irq, false);
931}
932
933static void gpmc_irq_enable(struct irq_data *p)
934{
935 gpmc_irq_endis(p->irq, true);
936}
937
938static void gpmc_irq_noop(struct irq_data *data) { }
939
940static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
941
942static int gpmc_setup_irq(void)
943{
944 int i;
945 u32 regval;
946
947 if (!gpmc_irq)
948 return -EINVAL;
949
950 gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0);
951 if (gpmc_irq_start < 0) {
952 pr_err("irq_alloc_descs failed\n");
953 return gpmc_irq_start;
954 }
955
956 gpmc_irq_chip.name = "gpmc";
957 gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
958 gpmc_irq_chip.irq_enable = gpmc_irq_enable;
959 gpmc_irq_chip.irq_disable = gpmc_irq_disable;
960 gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
961 gpmc_irq_chip.irq_ack = gpmc_irq_noop;
962 gpmc_irq_chip.irq_mask = gpmc_irq_noop;
963 gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
964
965 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
966 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
967
968 for (i = 0; i < GPMC_NR_IRQ; i++) {
969 gpmc_client_irq[i].irq = gpmc_irq_start + i;
970 irq_set_chip_and_handler(gpmc_client_irq[i].irq,
971 &gpmc_irq_chip, handle_simple_irq);
972 set_irq_flags(gpmc_client_irq[i].irq,
973 IRQF_VALID | IRQF_NOAUTOEN);
974 }
975
976 /* Disable interrupts */
977 gpmc_write_reg(GPMC_IRQENABLE, 0);
978
979 /* clear interrupts */
980 regval = gpmc_read_reg(GPMC_IRQSTATUS);
981 gpmc_write_reg(GPMC_IRQSTATUS, regval);
982
983 return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL);
984}
985
986static int gpmc_free_irq(void)
987{
988 int i;
989
990 if (gpmc_irq)
991 free_irq(gpmc_irq, NULL);
992
993 for (i = 0; i < GPMC_NR_IRQ; i++) {
994 irq_set_handler(gpmc_client_irq[i].irq, NULL);
995 irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip);
996 irq_modify_status(gpmc_client_irq[i].irq, 0, 0);
997 }
998
999 irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ);
1000
1001 return 0;
1002}
1003
1004static void gpmc_mem_exit(void)
1005{
1006 int cs;
1007
1008 for (cs = 0; cs < gpmc_cs_num; cs++) {
1009 if (!gpmc_cs_mem_enabled(cs))
1010 continue;
1011 gpmc_cs_delete_mem(cs);
1012 }
1013
1014}
1015
1016static void gpmc_mem_init(void)
1017{
1018 int cs;
1019
1020 /*
1021 * The first 1MB of GPMC address space is typically mapped to
1022 * the internal ROM. Never allocate the first page, to
1023 * facilitate bug detection; even if we didn't boot from ROM.
1024 */
1025 gpmc_mem_root.start = SZ_1M;
1026 gpmc_mem_root.end = GPMC_MEM_END;
1027
1028 /* Reserve all regions that has been set up by bootloader */
1029 for (cs = 0; cs < gpmc_cs_num; cs++) {
1030 u32 base, size;
1031
1032 if (!gpmc_cs_mem_enabled(cs))
1033 continue;
1034 gpmc_cs_get_memconf(cs, &base, &size);
1035 if (gpmc_cs_insert_mem(cs, base, size)) {
1036 pr_warn("%s: disabling cs %d mapped at 0x%x-0x%x\n",
1037 __func__, cs, base, base + size);
1038 gpmc_cs_disable_mem(cs);
1039 }
1040 }
1041}
1042
1043static u32 gpmc_round_ps_to_sync_clk(u32 time_ps, u32 sync_clk)
1044{
1045 u32 temp;
1046 int div;
1047
1048 div = gpmc_calc_divider(sync_clk);
1049 temp = gpmc_ps_to_ticks(time_ps);
1050 temp = (temp + div - 1) / div;
1051 return gpmc_ticks_to_ps(temp * div);
1052}
1053
1054/* XXX: can the cycles be avoided ? */
1055static int gpmc_calc_sync_read_timings(struct gpmc_timings *gpmc_t,
1056 struct gpmc_device_timings *dev_t,
1057 bool mux)
1058{
1059 u32 temp;
1060
1061 /* adv_rd_off */
1062 temp = dev_t->t_avdp_r;
1063 /* XXX: mux check required ? */
1064 if (mux) {
1065 /* XXX: t_avdp not to be required for sync, only added for tusb
1066 * this indirectly necessitates requirement of t_avdp_r and
1067 * t_avdp_w instead of having a single t_avdp
1068 */
1069 temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_avdh);
1070 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1071 }
1072 gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
1073
1074 /* oe_on */
1075 temp = dev_t->t_oeasu; /* XXX: remove this ? */
1076 if (mux) {
1077 temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_ach);
1078 temp = max_t(u32, temp, gpmc_t->adv_rd_off +
1079 gpmc_ticks_to_ps(dev_t->cyc_aavdh_oe));
1080 }
1081 gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
1082
1083 /* access */
1084 /* XXX: any scope for improvement ?, by combining oe_on
1085 * and clk_activation, need to check whether
1086 * access = clk_activation + round to sync clk ?
1087 */
1088 temp = max_t(u32, dev_t->t_iaa, dev_t->cyc_iaa * gpmc_t->sync_clk);
1089 temp += gpmc_t->clk_activation;
1090 if (dev_t->cyc_oe)
1091 temp = max_t(u32, temp, gpmc_t->oe_on +
1092 gpmc_ticks_to_ps(dev_t->cyc_oe));
1093 gpmc_t->access = gpmc_round_ps_to_ticks(temp);
1094
1095 gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
1096 gpmc_t->cs_rd_off = gpmc_t->oe_off;
1097
1098 /* rd_cycle */
1099 temp = max_t(u32, dev_t->t_cez_r, dev_t->t_oez);
1100 temp = gpmc_round_ps_to_sync_clk(temp, gpmc_t->sync_clk) +
1101 gpmc_t->access;
1102 /* XXX: barter t_ce_rdyz with t_cez_r ? */
1103 if (dev_t->t_ce_rdyz)
1104 temp = max_t(u32, temp, gpmc_t->cs_rd_off + dev_t->t_ce_rdyz);
1105 gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
1106
1107 return 0;
1108}
1109
1110static int gpmc_calc_sync_write_timings(struct gpmc_timings *gpmc_t,
1111 struct gpmc_device_timings *dev_t,
1112 bool mux)
1113{
1114 u32 temp;
1115
1116 /* adv_wr_off */
1117 temp = dev_t->t_avdp_w;
1118 if (mux) {
1119 temp = max_t(u32, temp,
1120 gpmc_t->clk_activation + dev_t->t_avdh);
1121 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1122 }
1123 gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
1124
1125 /* wr_data_mux_bus */
1126 temp = max_t(u32, dev_t->t_weasu,
1127 gpmc_t->clk_activation + dev_t->t_rdyo);
1128 /* XXX: shouldn't mux be kept as a whole for wr_data_mux_bus ?,
1129 * and in that case remember to handle we_on properly
1130 */
1131 if (mux) {
1132 temp = max_t(u32, temp,
1133 gpmc_t->adv_wr_off + dev_t->t_aavdh);
1134 temp = max_t(u32, temp, gpmc_t->adv_wr_off +
1135 gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
1136 }
1137 gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
1138
1139 /* we_on */
1140 if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
1141 gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
1142 else
1143 gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
1144
1145 /* wr_access */
1146 /* XXX: gpmc_capability check reqd ? , even if not, will not harm */
1147 gpmc_t->wr_access = gpmc_t->access;
1148
1149 /* we_off */
1150 temp = gpmc_t->we_on + dev_t->t_wpl;
1151 temp = max_t(u32, temp,
1152 gpmc_t->wr_access + gpmc_ticks_to_ps(1));
1153 temp = max_t(u32, temp,
1154 gpmc_t->we_on + gpmc_ticks_to_ps(dev_t->cyc_wpl));
1155 gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
1156
1157 gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
1158 dev_t->t_wph);
1159
1160 /* wr_cycle */
1161 temp = gpmc_round_ps_to_sync_clk(dev_t->t_cez_w, gpmc_t->sync_clk);
1162 temp += gpmc_t->wr_access;
1163 /* XXX: barter t_ce_rdyz with t_cez_w ? */
1164 if (dev_t->t_ce_rdyz)
1165 temp = max_t(u32, temp,
1166 gpmc_t->cs_wr_off + dev_t->t_ce_rdyz);
1167 gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
1168
1169 return 0;
1170}
1171
1172static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t,
1173 struct gpmc_device_timings *dev_t,
1174 bool mux)
1175{
1176 u32 temp;
1177
1178 /* adv_rd_off */
1179 temp = dev_t->t_avdp_r;
1180 if (mux)
1181 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1182 gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
1183
1184 /* oe_on */
1185 temp = dev_t->t_oeasu;
1186 if (mux)
1187 temp = max_t(u32, temp,
1188 gpmc_t->adv_rd_off + dev_t->t_aavdh);
1189 gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
1190
1191 /* access */
1192 temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */
1193 gpmc_t->oe_on + dev_t->t_oe);
1194 temp = max_t(u32, temp,
1195 gpmc_t->cs_on + dev_t->t_ce);
1196 temp = max_t(u32, temp,
1197 gpmc_t->adv_on + dev_t->t_aa);
1198 gpmc_t->access = gpmc_round_ps_to_ticks(temp);
1199
1200 gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
1201 gpmc_t->cs_rd_off = gpmc_t->oe_off;
1202
1203 /* rd_cycle */
1204 temp = max_t(u32, dev_t->t_rd_cycle,
1205 gpmc_t->cs_rd_off + dev_t->t_cez_r);
1206 temp = max_t(u32, temp, gpmc_t->oe_off + dev_t->t_oez);
1207 gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
1208
1209 return 0;
1210}
1211
1212static int gpmc_calc_async_write_timings(struct gpmc_timings *gpmc_t,
1213 struct gpmc_device_timings *dev_t,
1214 bool mux)
1215{
1216 u32 temp;
1217
1218 /* adv_wr_off */
1219 temp = dev_t->t_avdp_w;
1220 if (mux)
1221 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1222 gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
1223
1224 /* wr_data_mux_bus */
1225 temp = dev_t->t_weasu;
1226 if (mux) {
1227 temp = max_t(u32, temp, gpmc_t->adv_wr_off + dev_t->t_aavdh);
1228 temp = max_t(u32, temp, gpmc_t->adv_wr_off +
1229 gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
1230 }
1231 gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
1232
1233 /* we_on */
1234 if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
1235 gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
1236 else
1237 gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
1238
1239 /* we_off */
1240 temp = gpmc_t->we_on + dev_t->t_wpl;
1241 gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
1242
1243 gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
1244 dev_t->t_wph);
1245
1246 /* wr_cycle */
1247 temp = max_t(u32, dev_t->t_wr_cycle,
1248 gpmc_t->cs_wr_off + dev_t->t_cez_w);
1249 gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
1250
1251 return 0;
1252}
1253
1254static int gpmc_calc_sync_common_timings(struct gpmc_timings *gpmc_t,
1255 struct gpmc_device_timings *dev_t)
1256{
1257 u32 temp;
1258
1259 gpmc_t->sync_clk = gpmc_calc_divider(dev_t->clk) *
1260 gpmc_get_fclk_period();
1261
1262 gpmc_t->page_burst_access = gpmc_round_ps_to_sync_clk(
1263 dev_t->t_bacc,
1264 gpmc_t->sync_clk);
1265
1266 temp = max_t(u32, dev_t->t_ces, dev_t->t_avds);
1267 gpmc_t->clk_activation = gpmc_round_ps_to_ticks(temp);
1268
1269 if (gpmc_calc_divider(gpmc_t->sync_clk) != 1)
1270 return 0;
1271
1272 if (dev_t->ce_xdelay)
1273 gpmc_t->bool_timings.cs_extra_delay = true;
1274 if (dev_t->avd_xdelay)
1275 gpmc_t->bool_timings.adv_extra_delay = true;
1276 if (dev_t->oe_xdelay)
1277 gpmc_t->bool_timings.oe_extra_delay = true;
1278 if (dev_t->we_xdelay)
1279 gpmc_t->bool_timings.we_extra_delay = true;
1280
1281 return 0;
1282}
1283
1284static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t,
1285 struct gpmc_device_timings *dev_t,
1286 bool sync)
1287{
1288 u32 temp;
1289
1290 /* cs_on */
1291 gpmc_t->cs_on = gpmc_round_ps_to_ticks(dev_t->t_ceasu);
1292
1293 /* adv_on */
1294 temp = dev_t->t_avdasu;
1295 if (dev_t->t_ce_avd)
1296 temp = max_t(u32, temp,
1297 gpmc_t->cs_on + dev_t->t_ce_avd);
1298 gpmc_t->adv_on = gpmc_round_ps_to_ticks(temp);
1299
1300 if (sync)
1301 gpmc_calc_sync_common_timings(gpmc_t, dev_t);
1302
1303 return 0;
1304}
1305
1306/* TODO: remove this function once all peripherals are confirmed to
1307 * work with generic timing. Simultaneously gpmc_cs_set_timings()
1308 * has to be modified to handle timings in ps instead of ns
1309*/
1310static void gpmc_convert_ps_to_ns(struct gpmc_timings *t)
1311{
1312 t->cs_on /= 1000;
1313 t->cs_rd_off /= 1000;
1314 t->cs_wr_off /= 1000;
1315 t->adv_on /= 1000;
1316 t->adv_rd_off /= 1000;
1317 t->adv_wr_off /= 1000;
1318 t->we_on /= 1000;
1319 t->we_off /= 1000;
1320 t->oe_on /= 1000;
1321 t->oe_off /= 1000;
1322 t->page_burst_access /= 1000;
1323 t->access /= 1000;
1324 t->rd_cycle /= 1000;
1325 t->wr_cycle /= 1000;
1326 t->bus_turnaround /= 1000;
1327 t->cycle2cycle_delay /= 1000;
1328 t->wait_monitoring /= 1000;
1329 t->clk_activation /= 1000;
1330 t->wr_access /= 1000;
1331 t->wr_data_mux_bus /= 1000;
1332}
1333
1334int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
1335 struct gpmc_settings *gpmc_s,
1336 struct gpmc_device_timings *dev_t)
1337{
1338 bool mux = false, sync = false;
1339
1340 if (gpmc_s) {
1341 mux = gpmc_s->mux_add_data ? true : false;
1342 sync = (gpmc_s->sync_read || gpmc_s->sync_write);
1343 }
1344
1345 memset(gpmc_t, 0, sizeof(*gpmc_t));
1346
1347 gpmc_calc_common_timings(gpmc_t, dev_t, sync);
1348
1349 if (gpmc_s && gpmc_s->sync_read)
1350 gpmc_calc_sync_read_timings(gpmc_t, dev_t, mux);
1351 else
1352 gpmc_calc_async_read_timings(gpmc_t, dev_t, mux);
1353
1354 if (gpmc_s && gpmc_s->sync_write)
1355 gpmc_calc_sync_write_timings(gpmc_t, dev_t, mux);
1356 else
1357 gpmc_calc_async_write_timings(gpmc_t, dev_t, mux);
1358
1359 /* TODO: remove, see function definition */
1360 gpmc_convert_ps_to_ns(gpmc_t);
1361
1362 return 0;
1363}
1364
1365/**
1366 * gpmc_cs_program_settings - programs non-timing related settings
1367 * @cs: GPMC chip-select to program
1368 * @p: pointer to GPMC settings structure
1369 *
1370 * Programs non-timing related settings for a GPMC chip-select, such as
1371 * bus-width, burst configuration, etc. Function should be called once
1372 * for each chip-select that is being used and must be called before
1373 * calling gpmc_cs_set_timings() as timing parameters in the CONFIG1
1374 * register will be initialised to zero by this function. Returns 0 on
1375 * success and appropriate negative error code on failure.
1376 */
1377int gpmc_cs_program_settings(int cs, struct gpmc_settings *p)
1378{
1379 u32 config1;
1380
1381 if ((!p->device_width) || (p->device_width > GPMC_DEVWIDTH_16BIT)) {
1382 pr_err("%s: invalid width %d!", __func__, p->device_width);
1383 return -EINVAL;
1384 }
1385
1386 /* Address-data multiplexing not supported for NAND devices */
1387 if (p->device_nand && p->mux_add_data) {
1388 pr_err("%s: invalid configuration!\n", __func__);
1389 return -EINVAL;
1390 }
1391
1392 if ((p->mux_add_data > GPMC_MUX_AD) ||
1393 ((p->mux_add_data == GPMC_MUX_AAD) &&
1394 !(gpmc_capability & GPMC_HAS_MUX_AAD))) {
1395 pr_err("%s: invalid multiplex configuration!\n", __func__);
1396 return -EINVAL;
1397 }
1398
1399 /* Page/burst mode supports lengths of 4, 8 and 16 bytes */
1400 if (p->burst_read || p->burst_write) {
1401 switch (p->burst_len) {
1402 case GPMC_BURST_4:
1403 case GPMC_BURST_8:
1404 case GPMC_BURST_16:
1405 break;
1406 default:
1407 pr_err("%s: invalid page/burst-length (%d)\n",
1408 __func__, p->burst_len);
1409 return -EINVAL;
1410 }
1411 }
1412
1413 if (p->wait_pin > gpmc_nr_waitpins) {
1414 pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin);
1415 return -EINVAL;
1416 }
1417
1418 config1 = GPMC_CONFIG1_DEVICESIZE((p->device_width - 1));
1419
1420 if (p->sync_read)
1421 config1 |= GPMC_CONFIG1_READTYPE_SYNC;
1422 if (p->sync_write)
1423 config1 |= GPMC_CONFIG1_WRITETYPE_SYNC;
1424 if (p->wait_on_read)
1425 config1 |= GPMC_CONFIG1_WAIT_READ_MON;
1426 if (p->wait_on_write)
1427 config1 |= GPMC_CONFIG1_WAIT_WRITE_MON;
1428 if (p->wait_on_read || p->wait_on_write)
1429 config1 |= GPMC_CONFIG1_WAIT_PIN_SEL(p->wait_pin);
1430 if (p->device_nand)
1431 config1 |= GPMC_CONFIG1_DEVICETYPE(GPMC_DEVICETYPE_NAND);
1432 if (p->mux_add_data)
1433 config1 |= GPMC_CONFIG1_MUXTYPE(p->mux_add_data);
1434 if (p->burst_read)
1435 config1 |= GPMC_CONFIG1_READMULTIPLE_SUPP;
1436 if (p->burst_write)
1437 config1 |= GPMC_CONFIG1_WRITEMULTIPLE_SUPP;
1438 if (p->burst_read || p->burst_write) {
1439 config1 |= GPMC_CONFIG1_PAGE_LEN(p->burst_len >> 3);
1440 config1 |= p->burst_wrap ? GPMC_CONFIG1_WRAPBURST_SUPP : 0;
1441 }
1442
1443 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, config1);
1444
1445 return 0;
1446}
1447
1448#ifdef CONFIG_OF
1449static const struct of_device_id gpmc_dt_ids[] = {
1450 { .compatible = "ti,omap2420-gpmc" },
1451 { .compatible = "ti,omap2430-gpmc" },
1452 { .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */
1453 { .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */
1454 { .compatible = "ti,am3352-gpmc" }, /* am335x devices */
1455 { }
1456};
1457MODULE_DEVICE_TABLE(of, gpmc_dt_ids);
1458
1459/**
1460 * gpmc_read_settings_dt - read gpmc settings from device-tree
1461 * @np: pointer to device-tree node for a gpmc child device
1462 * @p: pointer to gpmc settings structure
1463 *
1464 * Reads the GPMC settings for a GPMC child device from device-tree and
1465 * stores them in the GPMC settings structure passed. The GPMC settings
1466 * structure is initialised to zero by this function and so any
1467 * previously stored settings will be cleared.
1468 */
1469void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
1470{
1471 memset(p, 0, sizeof(struct gpmc_settings));
1472
1473 p->sync_read = of_property_read_bool(np, "gpmc,sync-read");
1474 p->sync_write = of_property_read_bool(np, "gpmc,sync-write");
1475 of_property_read_u32(np, "gpmc,device-width", &p->device_width);
1476 of_property_read_u32(np, "gpmc,mux-add-data", &p->mux_add_data);
1477
1478 if (!of_property_read_u32(np, "gpmc,burst-length", &p->burst_len)) {
1479 p->burst_wrap = of_property_read_bool(np, "gpmc,burst-wrap");
1480 p->burst_read = of_property_read_bool(np, "gpmc,burst-read");
1481 p->burst_write = of_property_read_bool(np, "gpmc,burst-write");
1482 if (!p->burst_read && !p->burst_write)
1483 pr_warn("%s: page/burst-length set but not used!\n",
1484 __func__);
1485 }
1486
1487 if (!of_property_read_u32(np, "gpmc,wait-pin", &p->wait_pin)) {
1488 p->wait_on_read = of_property_read_bool(np,
1489 "gpmc,wait-on-read");
1490 p->wait_on_write = of_property_read_bool(np,
1491 "gpmc,wait-on-write");
1492 if (!p->wait_on_read && !p->wait_on_write)
1493 pr_debug("%s: rd/wr wait monitoring not enabled!\n",
1494 __func__);
1495 }
1496}
1497
1498static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
1499 struct gpmc_timings *gpmc_t)
1500{
1501 struct gpmc_bool_timings *p;
1502
1503 if (!np || !gpmc_t)
1504 return;
1505
1506 memset(gpmc_t, 0, sizeof(*gpmc_t));
1507
1508 /* minimum clock period for syncronous mode */
1509 of_property_read_u32(np, "gpmc,sync-clk-ps", &gpmc_t->sync_clk);
1510
1511 /* chip select timtings */
1512 of_property_read_u32(np, "gpmc,cs-on-ns", &gpmc_t->cs_on);
1513 of_property_read_u32(np, "gpmc,cs-rd-off-ns", &gpmc_t->cs_rd_off);
1514 of_property_read_u32(np, "gpmc,cs-wr-off-ns", &gpmc_t->cs_wr_off);
1515
1516 /* ADV signal timings */
1517 of_property_read_u32(np, "gpmc,adv-on-ns", &gpmc_t->adv_on);
1518 of_property_read_u32(np, "gpmc,adv-rd-off-ns", &gpmc_t->adv_rd_off);
1519 of_property_read_u32(np, "gpmc,adv-wr-off-ns", &gpmc_t->adv_wr_off);
1520
1521 /* WE signal timings */
1522 of_property_read_u32(np, "gpmc,we-on-ns", &gpmc_t->we_on);
1523 of_property_read_u32(np, "gpmc,we-off-ns", &gpmc_t->we_off);
1524
1525 /* OE signal timings */
1526 of_property_read_u32(np, "gpmc,oe-on-ns", &gpmc_t->oe_on);
1527 of_property_read_u32(np, "gpmc,oe-off-ns", &gpmc_t->oe_off);
1528
1529 /* access and cycle timings */
1530 of_property_read_u32(np, "gpmc,page-burst-access-ns",
1531 &gpmc_t->page_burst_access);
1532 of_property_read_u32(np, "gpmc,access-ns", &gpmc_t->access);
1533 of_property_read_u32(np, "gpmc,rd-cycle-ns", &gpmc_t->rd_cycle);
1534 of_property_read_u32(np, "gpmc,wr-cycle-ns", &gpmc_t->wr_cycle);
1535 of_property_read_u32(np, "gpmc,bus-turnaround-ns",
1536 &gpmc_t->bus_turnaround);
1537 of_property_read_u32(np, "gpmc,cycle2cycle-delay-ns",
1538 &gpmc_t->cycle2cycle_delay);
1539 of_property_read_u32(np, "gpmc,wait-monitoring-ns",
1540 &gpmc_t->wait_monitoring);
1541 of_property_read_u32(np, "gpmc,clk-activation-ns",
1542 &gpmc_t->clk_activation);
1543
1544 /* only applicable to OMAP3+ */
1545 of_property_read_u32(np, "gpmc,wr-access-ns", &gpmc_t->wr_access);
1546 of_property_read_u32(np, "gpmc,wr-data-mux-bus-ns",
1547 &gpmc_t->wr_data_mux_bus);
1548
1549 /* bool timing parameters */
1550 p = &gpmc_t->bool_timings;
1551
1552 p->cycle2cyclediffcsen =
1553 of_property_read_bool(np, "gpmc,cycle2cycle-diffcsen");
1554 p->cycle2cyclesamecsen =
1555 of_property_read_bool(np, "gpmc,cycle2cycle-samecsen");
1556 p->we_extra_delay = of_property_read_bool(np, "gpmc,we-extra-delay");
1557 p->oe_extra_delay = of_property_read_bool(np, "gpmc,oe-extra-delay");
1558 p->adv_extra_delay = of_property_read_bool(np, "gpmc,adv-extra-delay");
1559 p->cs_extra_delay = of_property_read_bool(np, "gpmc,cs-extra-delay");
1560 p->time_para_granularity =
1561 of_property_read_bool(np, "gpmc,time-para-granularity");
1562}
1563
1564#if IS_ENABLED(CONFIG_MTD_NAND)
1565
1566static const char * const nand_xfer_types[] = {
1567 [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
1568 [NAND_OMAP_POLLED] = "polled",
1569 [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
1570 [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
1571};
1572
1573static int gpmc_probe_nand_child(struct platform_device *pdev,
1574 struct device_node *child)
1575{
1576 u32 val;
1577 const char *s;
1578 struct gpmc_timings gpmc_t;
1579 struct omap_nand_platform_data *gpmc_nand_data;
1580
1581 if (of_property_read_u32(child, "reg", &val) < 0) {
1582 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1583 child->full_name);
1584 return -ENODEV;
1585 }
1586
1587 gpmc_nand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_nand_data),
1588 GFP_KERNEL);
1589 if (!gpmc_nand_data)
1590 return -ENOMEM;
1591
1592 gpmc_nand_data->cs = val;
1593 gpmc_nand_data->of_node = child;
1594
1595 /* Detect availability of ELM module */
1596 gpmc_nand_data->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
1597 if (gpmc_nand_data->elm_of_node == NULL)
1598 gpmc_nand_data->elm_of_node =
1599 of_parse_phandle(child, "elm_id", 0);
1600
1601 /* select ecc-scheme for NAND */
1602 if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
1603 pr_err("%s: ti,nand-ecc-opt not found\n", __func__);
1604 return -ENODEV;
1605 }
1606
1607 if (!strcmp(s, "sw"))
1608 gpmc_nand_data->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
1609 else if (!strcmp(s, "ham1") ||
1610 !strcmp(s, "hw") || !strcmp(s, "hw-romcode"))
1611 gpmc_nand_data->ecc_opt =
1612 OMAP_ECC_HAM1_CODE_HW;
1613 else if (!strcmp(s, "bch4"))
1614 if (gpmc_nand_data->elm_of_node)
1615 gpmc_nand_data->ecc_opt =
1616 OMAP_ECC_BCH4_CODE_HW;
1617 else
1618 gpmc_nand_data->ecc_opt =
1619 OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
1620 else if (!strcmp(s, "bch8"))
1621 if (gpmc_nand_data->elm_of_node)
1622 gpmc_nand_data->ecc_opt =
1623 OMAP_ECC_BCH8_CODE_HW;
1624 else
1625 gpmc_nand_data->ecc_opt =
1626 OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
1627 else if (!strcmp(s, "bch16"))
1628 if (gpmc_nand_data->elm_of_node)
1629 gpmc_nand_data->ecc_opt =
1630 OMAP_ECC_BCH16_CODE_HW;
1631 else
1632 pr_err("%s: BCH16 requires ELM support\n", __func__);
1633 else
1634 pr_err("%s: ti,nand-ecc-opt invalid value\n", __func__);
1635
1636 /* select data transfer mode for NAND controller */
1637 if (!of_property_read_string(child, "ti,nand-xfer-type", &s))
1638 for (val = 0; val < ARRAY_SIZE(nand_xfer_types); val++)
1639 if (!strcasecmp(s, nand_xfer_types[val])) {
1640 gpmc_nand_data->xfer_type = val;
1641 break;
1642 }
1643
1644 gpmc_nand_data->flash_bbt = of_get_nand_on_flash_bbt(child);
1645
1646 val = of_get_nand_bus_width(child);
1647 if (val == 16)
1648 gpmc_nand_data->devsize = NAND_BUSWIDTH_16;
1649
1650 gpmc_read_timings_dt(child, &gpmc_t);
1651 gpmc_nand_init(gpmc_nand_data, &gpmc_t);
1652
1653 return 0;
1654}
1655#else
1656static int gpmc_probe_nand_child(struct platform_device *pdev,
1657 struct device_node *child)
1658{
1659 return 0;
1660}
1661#endif
1662
1663#if IS_ENABLED(CONFIG_MTD_ONENAND)
1664static int gpmc_probe_onenand_child(struct platform_device *pdev,
1665 struct device_node *child)
1666{
1667 u32 val;
1668 struct omap_onenand_platform_data *gpmc_onenand_data;
1669
1670 if (of_property_read_u32(child, "reg", &val) < 0) {
1671 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1672 child->full_name);
1673 return -ENODEV;
1674 }
1675
1676 gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data),
1677 GFP_KERNEL);
1678 if (!gpmc_onenand_data)
1679 return -ENOMEM;
1680
1681 gpmc_onenand_data->cs = val;
1682 gpmc_onenand_data->of_node = child;
1683 gpmc_onenand_data->dma_channel = -1;
1684
1685 if (!of_property_read_u32(child, "dma-channel", &val))
1686 gpmc_onenand_data->dma_channel = val;
1687
1688 gpmc_onenand_init(gpmc_onenand_data);
1689
1690 return 0;
1691}
1692#else
1693static int gpmc_probe_onenand_child(struct platform_device *pdev,
1694 struct device_node *child)
1695{
1696 return 0;
1697}
1698#endif
1699
1700/**
1701 * gpmc_probe_generic_child - configures the gpmc for a child device
1702 * @pdev: pointer to gpmc platform device
1703 * @child: pointer to device-tree node for child device
1704 *
1705 * Allocates and configures a GPMC chip-select for a child device.
1706 * Returns 0 on success and appropriate negative error code on failure.
1707 */
1708static int gpmc_probe_generic_child(struct platform_device *pdev,
1709 struct device_node *child)
1710{
1711 struct gpmc_settings gpmc_s;
1712 struct gpmc_timings gpmc_t;
1713 struct resource res;
1714 unsigned long base;
1715 const char *name;
1716 int ret, cs;
1717 u32 val;
1718
1719 if (of_property_read_u32(child, "reg", &cs) < 0) {
1720 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1721 child->full_name);
1722 return -ENODEV;
1723 }
1724
1725 if (of_address_to_resource(child, 0, &res) < 0) {
1726 dev_err(&pdev->dev, "%s has malformed 'reg' property\n",
1727 child->full_name);
1728 return -ENODEV;
1729 }
1730
1731 /*
1732 * Check if we have multiple instances of the same device
1733 * on a single chip select. If so, use the already initialized
1734 * timings.
1735 */
1736 name = gpmc_cs_get_name(cs);
1737 if (name && child->name && of_node_cmp(child->name, name) == 0)
1738 goto no_timings;
1739
1740 ret = gpmc_cs_request(cs, resource_size(&res), &base);
1741 if (ret < 0) {
1742 dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs);
1743 return ret;
1744 }
1745 gpmc_cs_set_name(cs, child->name);
1746
1747 gpmc_read_settings_dt(child, &gpmc_s);
1748 gpmc_read_timings_dt(child, &gpmc_t);
1749
1750 /*
1751 * For some GPMC devices we still need to rely on the bootloader
1752 * timings because the devices can be connected via FPGA.
1753 * REVISIT: Add timing support from slls644g.pdf.
1754 */
1755 if (!gpmc_t.cs_rd_off) {
1756 WARN(1, "enable GPMC debug to configure .dts timings for CS%i\n",
1757 cs);
1758 gpmc_cs_show_timings(cs,
1759 "please add GPMC bootloader timings to .dts");
1760 goto no_timings;
1761 }
1762
1763 /* CS must be disabled while making changes to gpmc configuration */
1764 gpmc_cs_disable_mem(cs);
1765
1766 /*
1767 * FIXME: gpmc_cs_request() will map the CS to an arbitary
1768 * location in the gpmc address space. When booting with
1769 * device-tree we want the NOR flash to be mapped to the
1770 * location specified in the device-tree blob. So remap the
1771 * CS to this location. Once DT migration is complete should
1772 * just make gpmc_cs_request() map a specific address.
1773 */
1774 ret = gpmc_cs_remap(cs, res.start);
1775 if (ret < 0) {
1776 dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n",
1777 cs, &res.start);
1778 goto err;
1779 }
1780
1781 ret = of_property_read_u32(child, "bank-width", &gpmc_s.device_width);
1782 if (ret < 0)
1783 goto err;
1784
1785 ret = gpmc_cs_program_settings(cs, &gpmc_s);
1786 if (ret < 0)
1787 goto err;
1788
1789 ret = gpmc_cs_set_timings(cs, &gpmc_t);
1790 if (ret) {
1791 dev_err(&pdev->dev, "failed to set gpmc timings for: %s\n",
1792 child->name);
1793 goto err;
1794 }
1795
1796 /* Clear limited address i.e. enable A26-A11 */
1797 val = gpmc_read_reg(GPMC_CONFIG);
1798 val &= ~GPMC_CONFIG_LIMITEDADDRESS;
1799 gpmc_write_reg(GPMC_CONFIG, val);
1800
1801 /* Enable CS region */
1802 gpmc_cs_enable_mem(cs);
1803
1804no_timings:
1805 if (of_platform_device_create(child, NULL, &pdev->dev))
1806 return 0;
1807
1808 dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name);
1809 ret = -ENODEV;
1810
1811err:
1812 gpmc_cs_free(cs);
1813
1814 return ret;
1815}
1816
1817static int gpmc_probe_dt(struct platform_device *pdev)
1818{
1819 int ret;
1820 struct device_node *child;
1821 const struct of_device_id *of_id =
1822 of_match_device(gpmc_dt_ids, &pdev->dev);
1823
1824 if (!of_id)
1825 return 0;
1826
1827 ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-cs",
1828 &gpmc_cs_num);
1829 if (ret < 0) {
1830 pr_err("%s: number of chip-selects not defined\n", __func__);
1831 return ret;
1832 } else if (gpmc_cs_num < 1) {
1833 pr_err("%s: all chip-selects are disabled\n", __func__);
1834 return -EINVAL;
1835 } else if (gpmc_cs_num > GPMC_CS_NUM) {
1836 pr_err("%s: number of supported chip-selects cannot be > %d\n",
1837 __func__, GPMC_CS_NUM);
1838 return -EINVAL;
1839 }
1840
1841 ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-waitpins",
1842 &gpmc_nr_waitpins);
1843 if (ret < 0) {
1844 pr_err("%s: number of wait pins not found!\n", __func__);
1845 return ret;
1846 }
1847
1848 for_each_available_child_of_node(pdev->dev.of_node, child) {
1849
1850 if (!child->name)
1851 continue;
1852
1853 if (of_node_cmp(child->name, "nand") == 0)
1854 ret = gpmc_probe_nand_child(pdev, child);
1855 else if (of_node_cmp(child->name, "onenand") == 0)
1856 ret = gpmc_probe_onenand_child(pdev, child);
1857 else if (of_node_cmp(child->name, "ethernet") == 0 ||
1858 of_node_cmp(child->name, "nor") == 0 ||
1859 of_node_cmp(child->name, "uart") == 0)
1860 ret = gpmc_probe_generic_child(pdev, child);
1861
1862 if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
1863 __func__, child->full_name))
1864 of_node_put(child);
1865 }
1866
1867 return 0;
1868}
1869#else
1870static int gpmc_probe_dt(struct platform_device *pdev)
1871{
1872 return 0;
1873}
1874#endif
1875
1876static int gpmc_probe(struct platform_device *pdev)
1877{
1878 int rc;
1879 u32 l;
1880 struct resource *res;
1881
1882 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1883 if (res == NULL)
1884 return -ENOENT;
1885
1886 phys_base = res->start;
1887 mem_size = resource_size(res);
1888
1889 gpmc_base = devm_ioremap_resource(&pdev->dev, res);
1890 if (IS_ERR(gpmc_base))
1891 return PTR_ERR(gpmc_base);
1892
1893 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1894 if (res == NULL)
1895 dev_warn(&pdev->dev, "Failed to get resource: irq\n");
1896 else
1897 gpmc_irq = res->start;
1898
1899 gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck");
1900 if (IS_ERR(gpmc_l3_clk)) {
1901 dev_err(&pdev->dev, "Failed to get GPMC fck\n");
1902 gpmc_irq = 0;
1903 return PTR_ERR(gpmc_l3_clk);
1904 }
1905
1906 if (!clk_get_rate(gpmc_l3_clk)) {
1907 dev_err(&pdev->dev, "Invalid GPMC fck clock rate\n");
1908 return -EINVAL;
1909 }
1910
1911 pm_runtime_enable(&pdev->dev);
1912 pm_runtime_get_sync(&pdev->dev);
1913
1914 gpmc_dev = &pdev->dev;
1915
1916 l = gpmc_read_reg(GPMC_REVISION);
1917
1918 /*
1919 * FIXME: Once device-tree migration is complete the below flags
1920 * should be populated based upon the device-tree compatible
1921 * string. For now just use the IP revision. OMAP3+ devices have
1922 * the wr_access and wr_data_mux_bus register fields. OMAP4+
1923 * devices support the addr-addr-data multiplex protocol.
1924 *
1925 * GPMC IP revisions:
1926 * - OMAP24xx = 2.0
1927 * - OMAP3xxx = 5.0
1928 * - OMAP44xx/54xx/AM335x = 6.0
1929 */
1930 if (GPMC_REVISION_MAJOR(l) > 0x4)
1931 gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS;
1932 if (GPMC_REVISION_MAJOR(l) > 0x5)
1933 gpmc_capability |= GPMC_HAS_MUX_AAD;
1934 dev_info(gpmc_dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
1935 GPMC_REVISION_MINOR(l));
1936
1937 gpmc_mem_init();
1938
1939 if (gpmc_setup_irq() < 0)
1940 dev_warn(gpmc_dev, "gpmc_setup_irq failed\n");
1941
1942 if (!pdev->dev.of_node) {
1943 gpmc_cs_num = GPMC_CS_NUM;
1944 gpmc_nr_waitpins = GPMC_NR_WAITPINS;
1945 }
1946
1947 rc = gpmc_probe_dt(pdev);
1948 if (rc < 0) {
1949 pm_runtime_put_sync(&pdev->dev);
1950 dev_err(gpmc_dev, "failed to probe DT parameters\n");
1951 return rc;
1952 }
1953
1954 return 0;
1955}
1956
1957static int gpmc_remove(struct platform_device *pdev)
1958{
1959 gpmc_free_irq();
1960 gpmc_mem_exit();
1961 pm_runtime_put_sync(&pdev->dev);
1962 pm_runtime_disable(&pdev->dev);
1963 gpmc_dev = NULL;
1964 return 0;
1965}
1966
1967#ifdef CONFIG_PM_SLEEP
1968static int gpmc_suspend(struct device *dev)
1969{
1970 omap3_gpmc_save_context();
1971 pm_runtime_put_sync(dev);
1972 return 0;
1973}
1974
1975static int gpmc_resume(struct device *dev)
1976{
1977 pm_runtime_get_sync(dev);
1978 omap3_gpmc_restore_context();
1979 return 0;
1980}
1981#endif
1982
1983static SIMPLE_DEV_PM_OPS(gpmc_pm_ops, gpmc_suspend, gpmc_resume);
1984
1985static struct platform_driver gpmc_driver = {
1986 .probe = gpmc_probe,
1987 .remove = gpmc_remove,
1988 .driver = {
1989 .name = DEVICE_NAME,
1990 .owner = THIS_MODULE,
1991 .of_match_table = of_match_ptr(gpmc_dt_ids),
1992 .pm = &gpmc_pm_ops,
1993 },
1994};
1995
1996static __init int gpmc_init(void)
1997{
1998 return platform_driver_register(&gpmc_driver);
1999}
2000
2001static __exit void gpmc_exit(void)
2002{
2003 platform_driver_unregister(&gpmc_driver);
2004
2005}
2006
2007postcore_initcall(gpmc_init);
2008module_exit(gpmc_exit);
2009
2010static irqreturn_t gpmc_handle_irq(int irq, void *dev)
2011{
2012 int i;
2013 u32 regval;
2014
2015 regval = gpmc_read_reg(GPMC_IRQSTATUS);
2016
2017 if (!regval)
2018 return IRQ_NONE;
2019
2020 for (i = 0; i < GPMC_NR_IRQ; i++)
2021 if (regval & gpmc_client_irq[i].bitmask)
2022 generic_handle_irq(gpmc_client_irq[i].irq);
2023
2024 gpmc_write_reg(GPMC_IRQSTATUS, regval);
2025
2026 return IRQ_HANDLED;
2027}
2028
2029static struct omap3_gpmc_regs gpmc_context;
2030
2031void omap3_gpmc_save_context(void)
2032{
2033 int i;
2034
2035 gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
2036 gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
2037 gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
2038 gpmc_context.config = gpmc_read_reg(GPMC_CONFIG);
2039 gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
2040 gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2);
2041 gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL);
2042 for (i = 0; i < gpmc_cs_num; i++) {
2043 gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i);
2044 if (gpmc_context.cs_context[i].is_valid) {
2045 gpmc_context.cs_context[i].config1 =
2046 gpmc_cs_read_reg(i, GPMC_CS_CONFIG1);
2047 gpmc_context.cs_context[i].config2 =
2048 gpmc_cs_read_reg(i, GPMC_CS_CONFIG2);
2049 gpmc_context.cs_context[i].config3 =
2050 gpmc_cs_read_reg(i, GPMC_CS_CONFIG3);
2051 gpmc_context.cs_context[i].config4 =
2052 gpmc_cs_read_reg(i, GPMC_CS_CONFIG4);
2053 gpmc_context.cs_context[i].config5 =
2054 gpmc_cs_read_reg(i, GPMC_CS_CONFIG5);
2055 gpmc_context.cs_context[i].config6 =
2056 gpmc_cs_read_reg(i, GPMC_CS_CONFIG6);
2057 gpmc_context.cs_context[i].config7 =
2058 gpmc_cs_read_reg(i, GPMC_CS_CONFIG7);
2059 }
2060 }
2061}
2062
2063void omap3_gpmc_restore_context(void)
2064{
2065 int i;
2066
2067 gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
2068 gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
2069 gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
2070 gpmc_write_reg(GPMC_CONFIG, gpmc_context.config);
2071 gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1);
2072 gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2);
2073 gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control);
2074 for (i = 0; i < gpmc_cs_num; i++) {
2075 if (gpmc_context.cs_context[i].is_valid) {
2076 gpmc_cs_write_reg(i, GPMC_CS_CONFIG1,
2077 gpmc_context.cs_context[i].config1);
2078 gpmc_cs_write_reg(i, GPMC_CS_CONFIG2,
2079 gpmc_context.cs_context[i].config2);
2080 gpmc_cs_write_reg(i, GPMC_CS_CONFIG3,
2081 gpmc_context.cs_context[i].config3);
2082 gpmc_cs_write_reg(i, GPMC_CS_CONFIG4,
2083 gpmc_context.cs_context[i].config4);
2084 gpmc_cs_write_reg(i, GPMC_CS_CONFIG5,
2085 gpmc_context.cs_context[i].config5);
2086 gpmc_cs_write_reg(i, GPMC_CS_CONFIG6,
2087 gpmc_context.cs_context[i].config6);
2088 gpmc_cs_write_reg(i, GPMC_CS_CONFIG7,
2089 gpmc_context.cs_context[i].config7);
2090 }
2091 }
2092}