aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/memory
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-24 14:00:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-24 14:00:20 -0400
commit8bc4d5f394a3facbad6af2f18940f1db3b1a0844 (patch)
tree14838a236e87126d4b84d22b9049b9a6f0be878f /drivers/memory
parent29567292c0b5b2fb484125c280a2175141fe2205 (diff)
parente5366a266a8cd4cd6b0fe66876462cca2e1c6a89 (diff)
Merge tag 'for-linus-20160523' of git://git.infradead.org/linux-mtd
Pull MTD updates from Brian Norris: "First cycle with Boris as NAND maintainer! Many (most) bullets stolen from him. Generic: - Migrated NAND LED trigger to be a generic MTD trigger NAND: - Introduction of the "ECC algorithm" concept, to avoid overloading the ECC mode field too much more - Replaced the nand_ecclayout infrastructure with something a little more flexible (finally!) and future proof - Rework of the OMAP GPMC and NAND drivers; the TI folks pulled some of this into their own tree as well - Prepare the sunxi NAND driver to receive DMA support - Handle bitflips in erased pages on GPMI revisions that do not support this in hardware. SPI NOR: - Start using the spi_flash_read() API for SPI drivers that support it (i.e., SPI drivers with special memory-mapped flash modes) And other small scattered improvments" * tag 'for-linus-20160523' of git://git.infradead.org/linux-mtd: (155 commits) mtd: spi-nor: support GigaDevice gd25lq64c mtd: nand_bch: fix spelling of "probably" mtd: brcmnand: respect ECC algorithm set by NAND subsystem gpmi-nand: Handle ECC Errors in erased pages Documentation: devicetree: deprecate "soft_bch" nand-ecc-mode value mtd: nand: add support for "nand-ecc-algo" DT property mtd: mtd: drop NAND_ECC_SOFT_BCH enum value mtd: drop support for NAND_ECC_SOFT_BCH as "soft_bch" mapping mtd: nand: read ECC algorithm from the new field mtd: nand: fsmc: validate ECC setup by checking algorithm directly mtd: nand: set ECC algorithm to Hamming on fallback staging: mt29f_spinand: set ECC algorithm explicitly CRIS v32: nand: set ECC algorithm explicitly mtd: nand: atmel: set ECC algorithm explicitly mtd: nand: davinci: set ECC algorithm explicitly mtd: nand: bf5xx: set ECC algorithm explicitly mtd: nand: omap2: Fix high memory dma prefetch transfer mtd: nand: omap2: Start dma request before enabling prefetch mtd: nandsim: add __init attribute mtd: nand: move of_get_nand_xxx() helpers into nand_base.c ...
Diffstat (limited to 'drivers/memory')
-rw-r--r--drivers/memory/Kconfig1
-rw-r--r--drivers/memory/fsl_ifc.c36
-rw-r--r--drivers/memory/omap-gpmc.c657
3 files changed, 437 insertions, 257 deletions
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index c61a284133e0..81ddb17575a9 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -51,6 +51,7 @@ config TI_EMIF
51 51
52config OMAP_GPMC 52config OMAP_GPMC
53 bool 53 bool
54 select GPIOLIB
54 help 55 help
55 This driver is for the General Purpose Memory Controller (GPMC) 56 This driver is for the General Purpose Memory Controller (GPMC)
56 present on Texas Instruments SoCs (e.g. OMAP2+). GPMC allows 57 present on Texas Instruments SoCs (e.g. OMAP2+). GPMC allows
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 2a691da8c1c7..904b4af5f142 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -59,11 +59,11 @@ int fsl_ifc_find(phys_addr_t addr_base)
59{ 59{
60 int i = 0; 60 int i = 0;
61 61
62 if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) 62 if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->gregs)
63 return -ENODEV; 63 return -ENODEV;
64 64
65 for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) { 65 for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) {
66 u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr); 66 u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->gregs->cspr_cs[i].cspr);
67 if (cspr & CSPR_V && (cspr & CSPR_BA) == 67 if (cspr & CSPR_V && (cspr & CSPR_BA) ==
68 convert_ifc_address(addr_base)) 68 convert_ifc_address(addr_base))
69 return i; 69 return i;
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(fsl_ifc_find);
75 75
76static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl) 76static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl)
77{ 77{
78 struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 78 struct fsl_ifc_global __iomem *ifc = ctrl->gregs;
79 79
80 /* 80 /*
81 * Clear all the common status and event registers 81 * Clear all the common status and event registers
@@ -104,7 +104,7 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev)
104 irq_dispose_mapping(ctrl->nand_irq); 104 irq_dispose_mapping(ctrl->nand_irq);
105 irq_dispose_mapping(ctrl->irq); 105 irq_dispose_mapping(ctrl->irq);
106 106
107 iounmap(ctrl->regs); 107 iounmap(ctrl->gregs);
108 108
109 dev_set_drvdata(&dev->dev, NULL); 109 dev_set_drvdata(&dev->dev, NULL);
110 kfree(ctrl); 110 kfree(ctrl);
@@ -122,7 +122,7 @@ static DEFINE_SPINLOCK(nand_irq_lock);
122 122
123static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl) 123static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl)
124{ 124{
125 struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 125 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
126 unsigned long flags; 126 unsigned long flags;
127 u32 stat; 127 u32 stat;
128 128
@@ -157,7 +157,7 @@ static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data)
157static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) 157static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
158{ 158{
159 struct fsl_ifc_ctrl *ctrl = data; 159 struct fsl_ifc_ctrl *ctrl = data;
160 struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 160 struct fsl_ifc_global __iomem *ifc = ctrl->gregs;
161 u32 err_axiid, err_srcid, status, cs_err, err_addr; 161 u32 err_axiid, err_srcid, status, cs_err, err_addr;
162 irqreturn_t ret = IRQ_NONE; 162 irqreturn_t ret = IRQ_NONE;
163 163
@@ -215,6 +215,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
215{ 215{
216 int ret = 0; 216 int ret = 0;
217 int version, banks; 217 int version, banks;
218 void __iomem *addr;
218 219
219 dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); 220 dev_info(&dev->dev, "Freescale Integrated Flash Controller\n");
220 221
@@ -225,22 +226,13 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
225 dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev); 226 dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev);
226 227
227 /* IOMAP the entire IFC region */ 228 /* IOMAP the entire IFC region */
228 fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); 229 fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0);
229 if (!fsl_ifc_ctrl_dev->regs) { 230 if (!fsl_ifc_ctrl_dev->gregs) {
230 dev_err(&dev->dev, "failed to get memory region\n"); 231 dev_err(&dev->dev, "failed to get memory region\n");
231 ret = -ENODEV; 232 ret = -ENODEV;
232 goto err; 233 goto err;
233 } 234 }
234 235
235 version = ifc_in32(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
236 FSL_IFC_VERSION_MASK;
237 banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
238 dev_info(&dev->dev, "IFC version %d.%d, %d banks\n",
239 version >> 24, (version >> 16) & 0xf, banks);
240
241 fsl_ifc_ctrl_dev->version = version;
242 fsl_ifc_ctrl_dev->banks = banks;
243
244 if (of_property_read_bool(dev->dev.of_node, "little-endian")) { 236 if (of_property_read_bool(dev->dev.of_node, "little-endian")) {
245 fsl_ifc_ctrl_dev->little_endian = true; 237 fsl_ifc_ctrl_dev->little_endian = true;
246 dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n"); 238 dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n");
@@ -249,8 +241,9 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
249 dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n"); 241 dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n");
250 } 242 }
251 243
252 version = ioread32be(&fsl_ifc_ctrl_dev->regs->ifc_rev) & 244 version = ifc_in32(&fsl_ifc_ctrl_dev->gregs->ifc_rev) &
253 FSL_IFC_VERSION_MASK; 245 FSL_IFC_VERSION_MASK;
246
254 banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8; 247 banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
255 dev_info(&dev->dev, "IFC version %d.%d, %d banks\n", 248 dev_info(&dev->dev, "IFC version %d.%d, %d banks\n",
256 version >> 24, (version >> 16) & 0xf, banks); 249 version >> 24, (version >> 16) & 0xf, banks);
@@ -258,6 +251,13 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
258 fsl_ifc_ctrl_dev->version = version; 251 fsl_ifc_ctrl_dev->version = version;
259 fsl_ifc_ctrl_dev->banks = banks; 252 fsl_ifc_ctrl_dev->banks = banks;
260 253
254 addr = fsl_ifc_ctrl_dev->gregs;
255 if (version >= FSL_IFC_VERSION_2_0_0)
256 addr += PGOFFSET_64K;
257 else
258 addr += PGOFFSET_4K;
259 fsl_ifc_ctrl_dev->rregs = addr;
260
261 /* get the Controller level irq */ 261 /* get the Controller level irq */
262 fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); 262 fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
263 if (fsl_ifc_ctrl_dev->irq == 0) { 263 if (fsl_ifc_ctrl_dev->irq == 0) {
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 21825ddce4a3..af4884ba6b7c 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -21,15 +21,15 @@
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/gpio/driver.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/irqdomain.h>
25#include <linux/platform_device.h> 27#include <linux/platform_device.h>
26#include <linux/of.h> 28#include <linux/of.h>
27#include <linux/of_address.h> 29#include <linux/of_address.h>
28#include <linux/of_mtd.h>
29#include <linux/of_device.h> 30#include <linux/of_device.h>
30#include <linux/of_platform.h> 31#include <linux/of_platform.h>
31#include <linux/omap-gpmc.h> 32#include <linux/omap-gpmc.h>
32#include <linux/mtd/nand.h>
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34 34
35#include <linux/platform_data/mtd-nand-omap2.h> 35#include <linux/platform_data/mtd-nand-omap2.h>
@@ -81,6 +81,8 @@
81 81
82#define GPMC_CONFIG_LIMITEDADDRESS BIT(1) 82#define GPMC_CONFIG_LIMITEDADDRESS BIT(1)
83 83
84#define GPMC_STATUS_EMPTYWRITEBUFFERSTATUS BIT(0)
85
84#define GPMC_CONFIG2_CSEXTRADELAY BIT(7) 86#define GPMC_CONFIG2_CSEXTRADELAY BIT(7)
85#define GPMC_CONFIG3_ADVEXTRADELAY BIT(7) 87#define GPMC_CONFIG3_ADVEXTRADELAY BIT(7)
86#define GPMC_CONFIG4_OEEXTRADELAY BIT(7) 88#define GPMC_CONFIG4_OEEXTRADELAY BIT(7)
@@ -92,6 +94,14 @@
92#define GPMC_CS_SIZE 0x30 94#define GPMC_CS_SIZE 0x30
93#define GPMC_BCH_SIZE 0x10 95#define GPMC_BCH_SIZE 0x10
94 96
97/*
98 * The first 1MB of GPMC address space is typically mapped to
99 * the internal ROM. Never allocate the first page, to
100 * facilitate bug detection; even if we didn't boot from ROM.
101 * As GPMC minimum partition size is 16MB we can only start from
102 * there.
103 */
104#define GPMC_MEM_START 0x1000000
95#define GPMC_MEM_END 0x3FFFFFFF 105#define GPMC_MEM_END 0x3FFFFFFF
96 106
97#define GPMC_CHUNK_SHIFT 24 /* 16 MB */ 107#define GPMC_CHUNK_SHIFT 24 /* 16 MB */
@@ -125,7 +135,6 @@
125#define GPMC_CONFIG_RDY_BSY 0x00000001 135#define GPMC_CONFIG_RDY_BSY 0x00000001
126#define GPMC_CONFIG_DEV_SIZE 0x00000002 136#define GPMC_CONFIG_DEV_SIZE 0x00000002
127#define GPMC_CONFIG_DEV_TYPE 0x00000003 137#define GPMC_CONFIG_DEV_TYPE 0x00000003
128#define GPMC_SET_IRQ_STATUS 0x00000004
129 138
130#define GPMC_CONFIG1_WRAPBURST_SUPP (1 << 31) 139#define GPMC_CONFIG1_WRAPBURST_SUPP (1 << 31)
131#define GPMC_CONFIG1_READMULTIPLE_SUPP (1 << 30) 140#define GPMC_CONFIG1_READMULTIPLE_SUPP (1 << 30)
@@ -174,16 +183,12 @@
174#define GPMC_CONFIG_WRITEPROTECT 0x00000010 183#define GPMC_CONFIG_WRITEPROTECT 0x00000010
175#define WR_RD_PIN_MONITORING 0x00600000 184#define WR_RD_PIN_MONITORING 0x00600000
176 185
177#define GPMC_ENABLE_IRQ 0x0000000d
178
179/* ECC commands */ 186/* ECC commands */
180#define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */ 187#define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */
181#define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */ 188#define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */
182#define GPMC_ECC_READSYN 2 /* Reset before syndrom is read back */ 189#define GPMC_ECC_READSYN 2 /* Reset before syndrom is read back */
183 190
184/* XXX: Only NAND irq has been considered,currently these are the only ones used 191#define GPMC_NR_NAND_IRQS 2 /* number of NAND specific IRQs */
185 */
186#define GPMC_NR_IRQ 2
187 192
188enum gpmc_clk_domain { 193enum gpmc_clk_domain {
189 GPMC_CD_FCLK, 194 GPMC_CD_FCLK,
@@ -199,11 +204,6 @@ struct gpmc_cs_data {
199 struct resource mem; 204 struct resource mem;
200}; 205};
201 206
202struct gpmc_client_irq {
203 unsigned irq;
204 u32 bitmask;
205};
206
207/* Structure to save gpmc cs context */ 207/* Structure to save gpmc cs context */
208struct gpmc_cs_config { 208struct gpmc_cs_config {
209 u32 config1; 209 u32 config1;
@@ -231,9 +231,15 @@ struct omap3_gpmc_regs {
231 struct gpmc_cs_config cs_context[GPMC_CS_NUM]; 231 struct gpmc_cs_config cs_context[GPMC_CS_NUM];
232}; 232};
233 233
234static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ]; 234struct gpmc_device {
235static struct irq_chip gpmc_irq_chip; 235 struct device *dev;
236static int gpmc_irq_start; 236 int irq;
237 struct irq_chip irq_chip;
238 struct gpio_chip gpio_chip;
239 int nirqs;
240};
241
242static struct irq_domain *gpmc_irq_domain;
237 243
238static struct resource gpmc_mem_root; 244static struct resource gpmc_mem_root;
239static struct gpmc_cs_data gpmc_cs[GPMC_CS_NUM]; 245static struct gpmc_cs_data gpmc_cs[GPMC_CS_NUM];
@@ -241,8 +247,6 @@ static DEFINE_SPINLOCK(gpmc_mem_lock);
241/* Define chip-selects as reserved by default until probe completes */ 247/* Define chip-selects as reserved by default until probe completes */
242static unsigned int gpmc_cs_num = GPMC_CS_NUM; 248static unsigned int gpmc_cs_num = GPMC_CS_NUM;
243static unsigned int gpmc_nr_waitpins; 249static unsigned int gpmc_nr_waitpins;
244static struct device *gpmc_dev;
245static int gpmc_irq;
246static resource_size_t phys_base, mem_size; 250static resource_size_t phys_base, mem_size;
247static unsigned gpmc_capability; 251static unsigned gpmc_capability;
248static void __iomem *gpmc_base; 252static void __iomem *gpmc_base;
@@ -1054,14 +1058,6 @@ int gpmc_configure(int cmd, int wval)
1054 u32 regval; 1058 u32 regval;
1055 1059
1056 switch (cmd) { 1060 switch (cmd) {
1057 case GPMC_ENABLE_IRQ:
1058 gpmc_write_reg(GPMC_IRQENABLE, wval);
1059 break;
1060
1061 case GPMC_SET_IRQ_STATUS:
1062 gpmc_write_reg(GPMC_IRQSTATUS, wval);
1063 break;
1064
1065 case GPMC_CONFIG_WP: 1061 case GPMC_CONFIG_WP:
1066 regval = gpmc_read_reg(GPMC_CONFIG); 1062 regval = gpmc_read_reg(GPMC_CONFIG);
1067 if (wval) 1063 if (wval)
@@ -1084,7 +1080,7 @@ void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
1084{ 1080{
1085 int i; 1081 int i;
1086 1082
1087 reg->gpmc_status = gpmc_base + GPMC_STATUS; 1083 reg->gpmc_status = NULL; /* deprecated */
1088 reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET + 1084 reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET +
1089 GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs; 1085 GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs;
1090 reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET + 1086 reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET +
@@ -1118,87 +1114,201 @@ void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
1118 } 1114 }
1119} 1115}
1120 1116
1121int gpmc_get_client_irq(unsigned irq_config) 1117static bool gpmc_nand_writebuffer_empty(void)
1122{ 1118{
1123 int i; 1119 if (gpmc_read_reg(GPMC_STATUS) & GPMC_STATUS_EMPTYWRITEBUFFERSTATUS)
1120 return true;
1124 1121
1125 if (hweight32(irq_config) > 1) 1122 return false;
1123}
1124
1125static struct gpmc_nand_ops nand_ops = {
1126 .nand_writebuffer_empty = gpmc_nand_writebuffer_empty,
1127};
1128
1129/**
1130 * gpmc_omap_get_nand_ops - Get the GPMC NAND interface
1131 * @regs: the GPMC NAND register map exclusive for NAND use.
1132 * @cs: GPMC chip select number on which the NAND sits. The
1133 * register map returned will be specific to this chip select.
1134 *
1135 * Returns NULL on error e.g. invalid cs.
1136 */
1137struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs)
1138{
1139 if (cs >= gpmc_cs_num)
1140 return NULL;
1141
1142 gpmc_update_nand_reg(reg, cs);
1143
1144 return &nand_ops;
1145}
1146EXPORT_SYMBOL_GPL(gpmc_omap_get_nand_ops);
1147
1148int gpmc_get_client_irq(unsigned irq_config)
1149{
1150 if (!gpmc_irq_domain) {
1151 pr_warn("%s called before GPMC IRQ domain available\n",
1152 __func__);
1126 return 0; 1153 return 0;
1154 }
1127 1155
1128 for (i = 0; i < GPMC_NR_IRQ; i++) 1156 /* we restrict this to NAND IRQs only */
1129 if (gpmc_client_irq[i].bitmask & irq_config) 1157 if (irq_config >= GPMC_NR_NAND_IRQS)
1130 return gpmc_client_irq[i].irq; 1158 return 0;
1131 1159
1132 return 0; 1160 return irq_create_mapping(gpmc_irq_domain, irq_config);
1133} 1161}
1134 1162
1135static int gpmc_irq_endis(unsigned irq, bool endis) 1163static int gpmc_irq_endis(unsigned long hwirq, bool endis)
1136{ 1164{
1137 int i;
1138 u32 regval; 1165 u32 regval;
1139 1166
1140 for (i = 0; i < GPMC_NR_IRQ; i++) 1167 /* bits GPMC_NR_NAND_IRQS to 8 are reserved */
1141 if (irq == gpmc_client_irq[i].irq) { 1168 if (hwirq >= GPMC_NR_NAND_IRQS)
1142 regval = gpmc_read_reg(GPMC_IRQENABLE); 1169 hwirq += 8 - GPMC_NR_NAND_IRQS;
1143 if (endis) 1170
1144 regval |= gpmc_client_irq[i].bitmask; 1171 regval = gpmc_read_reg(GPMC_IRQENABLE);
1145 else 1172 if (endis)
1146 regval &= ~gpmc_client_irq[i].bitmask; 1173 regval |= BIT(hwirq);
1147 gpmc_write_reg(GPMC_IRQENABLE, regval); 1174 else
1148 break; 1175 regval &= ~BIT(hwirq);
1149 } 1176 gpmc_write_reg(GPMC_IRQENABLE, regval);
1150 1177
1151 return 0; 1178 return 0;
1152} 1179}
1153 1180
1154static void gpmc_irq_disable(struct irq_data *p) 1181static void gpmc_irq_disable(struct irq_data *p)
1155{ 1182{
1156 gpmc_irq_endis(p->irq, false); 1183 gpmc_irq_endis(p->hwirq, false);
1157} 1184}
1158 1185
1159static void gpmc_irq_enable(struct irq_data *p) 1186static void gpmc_irq_enable(struct irq_data *p)
1160{ 1187{
1161 gpmc_irq_endis(p->irq, true); 1188 gpmc_irq_endis(p->hwirq, true);
1162} 1189}
1163 1190
1164static void gpmc_irq_noop(struct irq_data *data) { } 1191static void gpmc_irq_mask(struct irq_data *d)
1192{
1193 gpmc_irq_endis(d->hwirq, false);
1194}
1165 1195
1166static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; } 1196static void gpmc_irq_unmask(struct irq_data *d)
1197{
1198 gpmc_irq_endis(d->hwirq, true);
1199}
1167 1200
1168static int gpmc_setup_irq(void) 1201static void gpmc_irq_edge_config(unsigned long hwirq, bool rising_edge)
1169{ 1202{
1170 int i;
1171 u32 regval; 1203 u32 regval;
1172 1204
1173 if (!gpmc_irq) 1205 /* NAND IRQs polarity is not configurable */
1206 if (hwirq < GPMC_NR_NAND_IRQS)
1207 return;
1208
1209 /* WAITPIN starts at BIT 8 */
1210 hwirq += 8 - GPMC_NR_NAND_IRQS;
1211
1212 regval = gpmc_read_reg(GPMC_CONFIG);
1213 if (rising_edge)
1214 regval &= ~BIT(hwirq);
1215 else
1216 regval |= BIT(hwirq);
1217
1218 gpmc_write_reg(GPMC_CONFIG, regval);
1219}
1220
1221static void gpmc_irq_ack(struct irq_data *d)
1222{
1223 unsigned int hwirq = d->hwirq;
1224
1225 /* skip reserved bits */
1226 if (hwirq >= GPMC_NR_NAND_IRQS)
1227 hwirq += 8 - GPMC_NR_NAND_IRQS;
1228
1229 /* Setting bit to 1 clears (or Acks) the interrupt */
1230 gpmc_write_reg(GPMC_IRQSTATUS, BIT(hwirq));
1231}
1232
1233static int gpmc_irq_set_type(struct irq_data *d, unsigned int trigger)
1234{
1235 /* can't set type for NAND IRQs */
1236 if (d->hwirq < GPMC_NR_NAND_IRQS)
1174 return -EINVAL; 1237 return -EINVAL;
1175 1238
1176 gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0); 1239 /* We can support either rising or falling edge at a time */
1177 if (gpmc_irq_start < 0) { 1240 if (trigger == IRQ_TYPE_EDGE_FALLING)
1178 pr_err("irq_alloc_descs failed\n"); 1241 gpmc_irq_edge_config(d->hwirq, false);
1179 return gpmc_irq_start; 1242 else if (trigger == IRQ_TYPE_EDGE_RISING)
1243 gpmc_irq_edge_config(d->hwirq, true);
1244 else
1245 return -EINVAL;
1246
1247 return 0;
1248}
1249
1250static int gpmc_irq_map(struct irq_domain *d, unsigned int virq,
1251 irq_hw_number_t hw)
1252{
1253 struct gpmc_device *gpmc = d->host_data;
1254
1255 irq_set_chip_data(virq, gpmc);
1256 if (hw < GPMC_NR_NAND_IRQS) {
1257 irq_modify_status(virq, IRQ_NOREQUEST, IRQ_NOAUTOEN);
1258 irq_set_chip_and_handler(virq, &gpmc->irq_chip,
1259 handle_simple_irq);
1260 } else {
1261 irq_set_chip_and_handler(virq, &gpmc->irq_chip,
1262 handle_edge_irq);
1180 } 1263 }
1181 1264
1182 gpmc_irq_chip.name = "gpmc"; 1265 return 0;
1183 gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret; 1266}
1184 gpmc_irq_chip.irq_enable = gpmc_irq_enable; 1267
1185 gpmc_irq_chip.irq_disable = gpmc_irq_disable; 1268static const struct irq_domain_ops gpmc_irq_domain_ops = {
1186 gpmc_irq_chip.irq_shutdown = gpmc_irq_noop; 1269 .map = gpmc_irq_map,
1187 gpmc_irq_chip.irq_ack = gpmc_irq_noop; 1270 .xlate = irq_domain_xlate_twocell,
1188 gpmc_irq_chip.irq_mask = gpmc_irq_noop; 1271};
1189 gpmc_irq_chip.irq_unmask = gpmc_irq_noop; 1272
1190 1273static irqreturn_t gpmc_handle_irq(int irq, void *data)
1191 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE; 1274{
1192 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT; 1275 int hwirq, virq;
1193 1276 u32 regval, regvalx;
1194 for (i = 0; i < GPMC_NR_IRQ; i++) { 1277 struct gpmc_device *gpmc = data;
1195 gpmc_client_irq[i].irq = gpmc_irq_start + i; 1278
1196 irq_set_chip_and_handler(gpmc_client_irq[i].irq, 1279 regval = gpmc_read_reg(GPMC_IRQSTATUS);
1197 &gpmc_irq_chip, handle_simple_irq); 1280 regvalx = regval;
1198 irq_modify_status(gpmc_client_irq[i].irq, IRQ_NOREQUEST, 1281
1199 IRQ_NOAUTOEN); 1282 if (!regval)
1283 return IRQ_NONE;
1284
1285 for (hwirq = 0; hwirq < gpmc->nirqs; hwirq++) {
1286 /* skip reserved status bits */
1287 if (hwirq == GPMC_NR_NAND_IRQS)
1288 regvalx >>= 8 - GPMC_NR_NAND_IRQS;
1289
1290 if (regvalx & BIT(hwirq)) {
1291 virq = irq_find_mapping(gpmc_irq_domain, hwirq);
1292 if (!virq) {
1293 dev_warn(gpmc->dev,
1294 "spurious irq detected hwirq %d, virq %d\n",
1295 hwirq, virq);
1296 }
1297
1298 generic_handle_irq(virq);
1299 }
1200 } 1300 }
1201 1301
1302 gpmc_write_reg(GPMC_IRQSTATUS, regval);
1303
1304 return IRQ_HANDLED;
1305}
1306
1307static int gpmc_setup_irq(struct gpmc_device *gpmc)
1308{
1309 u32 regval;
1310 int rc;
1311
1202 /* Disable interrupts */ 1312 /* Disable interrupts */
1203 gpmc_write_reg(GPMC_IRQENABLE, 0); 1313 gpmc_write_reg(GPMC_IRQENABLE, 0);
1204 1314
@@ -1206,22 +1316,45 @@ static int gpmc_setup_irq(void)
1206 regval = gpmc_read_reg(GPMC_IRQSTATUS); 1316 regval = gpmc_read_reg(GPMC_IRQSTATUS);
1207 gpmc_write_reg(GPMC_IRQSTATUS, regval); 1317 gpmc_write_reg(GPMC_IRQSTATUS, regval);
1208 1318
1209 return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL); 1319 gpmc->irq_chip.name = "gpmc";
1320 gpmc->irq_chip.irq_enable = gpmc_irq_enable;
1321 gpmc->irq_chip.irq_disable = gpmc_irq_disable;
1322 gpmc->irq_chip.irq_ack = gpmc_irq_ack;
1323 gpmc->irq_chip.irq_mask = gpmc_irq_mask;
1324 gpmc->irq_chip.irq_unmask = gpmc_irq_unmask;
1325 gpmc->irq_chip.irq_set_type = gpmc_irq_set_type;
1326
1327 gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node,
1328 gpmc->nirqs,
1329 &gpmc_irq_domain_ops,
1330 gpmc);
1331 if (!gpmc_irq_domain) {
1332 dev_err(gpmc->dev, "IRQ domain add failed\n");
1333 return -ENODEV;
1334 }
1335
1336 rc = request_irq(gpmc->irq, gpmc_handle_irq, 0, "gpmc", gpmc);
1337 if (rc) {
1338 dev_err(gpmc->dev, "failed to request irq %d: %d\n",
1339 gpmc->irq, rc);
1340 irq_domain_remove(gpmc_irq_domain);
1341 gpmc_irq_domain = NULL;
1342 }
1343
1344 return rc;
1210} 1345}
1211 1346
1212static int gpmc_free_irq(void) 1347static int gpmc_free_irq(struct gpmc_device *gpmc)
1213{ 1348{
1214 int i; 1349 int hwirq;
1215 1350
1216 if (gpmc_irq) 1351 free_irq(gpmc->irq, gpmc);
1217 free_irq(gpmc_irq, NULL);
1218 1352
1219 for (i = 0; i < GPMC_NR_IRQ; i++) { 1353 for (hwirq = 0; hwirq < gpmc->nirqs; hwirq++)
1220 irq_set_handler(gpmc_client_irq[i].irq, NULL); 1354 irq_dispose_mapping(irq_find_mapping(gpmc_irq_domain, hwirq));
1221 irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip);
1222 }
1223 1355
1224 irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ); 1356 irq_domain_remove(gpmc_irq_domain);
1357 gpmc_irq_domain = NULL;
1225 1358
1226 return 0; 1359 return 0;
1227} 1360}
@@ -1242,12 +1375,7 @@ static void gpmc_mem_init(void)
1242{ 1375{
1243 int cs; 1376 int cs;
1244 1377
1245 /* 1378 gpmc_mem_root.start = GPMC_MEM_START;
1246 * The first 1MB of GPMC address space is typically mapped to
1247 * the internal ROM. Never allocate the first page, to
1248 * facilitate bug detection; even if we didn't boot from ROM.
1249 */
1250 gpmc_mem_root.start = SZ_1M;
1251 gpmc_mem_root.end = GPMC_MEM_END; 1379 gpmc_mem_root.end = GPMC_MEM_END;
1252 1380
1253 /* Reserve all regions that has been set up by bootloader */ 1381 /* Reserve all regions that has been set up by bootloader */
@@ -1796,105 +1924,6 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
1796 of_property_read_bool(np, "gpmc,time-para-granularity"); 1924 of_property_read_bool(np, "gpmc,time-para-granularity");
1797} 1925}
1798 1926
1799#if IS_ENABLED(CONFIG_MTD_NAND)
1800
1801static const char * const nand_xfer_types[] = {
1802 [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
1803 [NAND_OMAP_POLLED] = "polled",
1804 [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
1805 [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
1806};
1807
1808static int gpmc_probe_nand_child(struct platform_device *pdev,
1809 struct device_node *child)
1810{
1811 u32 val;
1812 const char *s;
1813 struct gpmc_timings gpmc_t;
1814 struct omap_nand_platform_data *gpmc_nand_data;
1815
1816 if (of_property_read_u32(child, "reg", &val) < 0) {
1817 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1818 child->full_name);
1819 return -ENODEV;
1820 }
1821
1822 gpmc_nand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_nand_data),
1823 GFP_KERNEL);
1824 if (!gpmc_nand_data)
1825 return -ENOMEM;
1826
1827 gpmc_nand_data->cs = val;
1828 gpmc_nand_data->of_node = child;
1829
1830 /* Detect availability of ELM module */
1831 gpmc_nand_data->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
1832 if (gpmc_nand_data->elm_of_node == NULL)
1833 gpmc_nand_data->elm_of_node =
1834 of_parse_phandle(child, "elm_id", 0);
1835
1836 /* select ecc-scheme for NAND */
1837 if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
1838 pr_err("%s: ti,nand-ecc-opt not found\n", __func__);
1839 return -ENODEV;
1840 }
1841
1842 if (!strcmp(s, "sw"))
1843 gpmc_nand_data->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
1844 else if (!strcmp(s, "ham1") ||
1845 !strcmp(s, "hw") || !strcmp(s, "hw-romcode"))
1846 gpmc_nand_data->ecc_opt =
1847 OMAP_ECC_HAM1_CODE_HW;
1848 else if (!strcmp(s, "bch4"))
1849 if (gpmc_nand_data->elm_of_node)
1850 gpmc_nand_data->ecc_opt =
1851 OMAP_ECC_BCH4_CODE_HW;
1852 else
1853 gpmc_nand_data->ecc_opt =
1854 OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
1855 else if (!strcmp(s, "bch8"))
1856 if (gpmc_nand_data->elm_of_node)
1857 gpmc_nand_data->ecc_opt =
1858 OMAP_ECC_BCH8_CODE_HW;
1859 else
1860 gpmc_nand_data->ecc_opt =
1861 OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
1862 else if (!strcmp(s, "bch16"))
1863 if (gpmc_nand_data->elm_of_node)
1864 gpmc_nand_data->ecc_opt =
1865 OMAP_ECC_BCH16_CODE_HW;
1866 else
1867 pr_err("%s: BCH16 requires ELM support\n", __func__);
1868 else
1869 pr_err("%s: ti,nand-ecc-opt invalid value\n", __func__);
1870
1871 /* select data transfer mode for NAND controller */
1872 if (!of_property_read_string(child, "ti,nand-xfer-type", &s))
1873 for (val = 0; val < ARRAY_SIZE(nand_xfer_types); val++)
1874 if (!strcasecmp(s, nand_xfer_types[val])) {
1875 gpmc_nand_data->xfer_type = val;
1876 break;
1877 }
1878
1879 gpmc_nand_data->flash_bbt = of_get_nand_on_flash_bbt(child);
1880
1881 val = of_get_nand_bus_width(child);
1882 if (val == 16)
1883 gpmc_nand_data->devsize = NAND_BUSWIDTH_16;
1884
1885 gpmc_read_timings_dt(child, &gpmc_t);
1886 gpmc_nand_init(gpmc_nand_data, &gpmc_t);
1887
1888 return 0;
1889}
1890#else
1891static int gpmc_probe_nand_child(struct platform_device *pdev,
1892 struct device_node *child)
1893{
1894 return 0;
1895}
1896#endif
1897
1898#if IS_ENABLED(CONFIG_MTD_ONENAND) 1927#if IS_ENABLED(CONFIG_MTD_ONENAND)
1899static int gpmc_probe_onenand_child(struct platform_device *pdev, 1928static int gpmc_probe_onenand_child(struct platform_device *pdev,
1900 struct device_node *child) 1929 struct device_node *child)
@@ -1950,6 +1979,8 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
1950 const char *name; 1979 const char *name;
1951 int ret, cs; 1980 int ret, cs;
1952 u32 val; 1981 u32 val;
1982 struct gpio_desc *waitpin_desc = NULL;
1983 struct gpmc_device *gpmc = platform_get_drvdata(pdev);
1953 1984
1954 if (of_property_read_u32(child, "reg", &cs) < 0) { 1985 if (of_property_read_u32(child, "reg", &cs) < 0) {
1955 dev_err(&pdev->dev, "%s has no 'reg' property\n", 1986 dev_err(&pdev->dev, "%s has no 'reg' property\n",
@@ -2010,23 +2041,80 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
2010 if (ret < 0) { 2041 if (ret < 0) {
2011 dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n", 2042 dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n",
2012 cs, &res.start); 2043 cs, &res.start);
2044 if (res.start < GPMC_MEM_START) {
2045 dev_info(&pdev->dev,
2046 "GPMC CS %d start cannot be lesser than 0x%x\n",
2047 cs, GPMC_MEM_START);
2048 } else if (res.end > GPMC_MEM_END) {
2049 dev_info(&pdev->dev,
2050 "GPMC CS %d end cannot be greater than 0x%x\n",
2051 cs, GPMC_MEM_END);
2052 }
2013 goto err; 2053 goto err;
2014 } 2054 }
2015 2055
2016 ret = of_property_read_u32(child, "bank-width", &gpmc_s.device_width); 2056 if (of_node_cmp(child->name, "nand") == 0) {
2017 if (ret < 0) 2057 /* Warn about older DT blobs with no compatible property */
2018 goto err; 2058 if (!of_property_read_bool(child, "compatible")) {
2059 dev_warn(&pdev->dev,
2060 "Incompatible NAND node: missing compatible");
2061 ret = -EINVAL;
2062 goto err;
2063 }
2064 }
2065
2066 if (of_device_is_compatible(child, "ti,omap2-nand")) {
2067 /* NAND specific setup */
2068 val = 8;
2069 of_property_read_u32(child, "nand-bus-width", &val);
2070 switch (val) {
2071 case 8:
2072 gpmc_s.device_width = GPMC_DEVWIDTH_8BIT;
2073 break;
2074 case 16:
2075 gpmc_s.device_width = GPMC_DEVWIDTH_16BIT;
2076 break;
2077 default:
2078 dev_err(&pdev->dev, "%s: invalid 'nand-bus-width'\n",
2079 child->name);
2080 ret = -EINVAL;
2081 goto err;
2082 }
2083
2084 /* disable write protect */
2085 gpmc_configure(GPMC_CONFIG_WP, 0);
2086 gpmc_s.device_nand = true;
2087 } else {
2088 ret = of_property_read_u32(child, "bank-width",
2089 &gpmc_s.device_width);
2090 if (ret < 0)
2091 goto err;
2092 }
2093
2094 /* Reserve wait pin if it is required and valid */
2095 if (gpmc_s.wait_on_read || gpmc_s.wait_on_write) {
2096 unsigned int wait_pin = gpmc_s.wait_pin;
2097
2098 waitpin_desc = gpiochip_request_own_desc(&gpmc->gpio_chip,
2099 wait_pin, "WAITPIN");
2100 if (IS_ERR(waitpin_desc)) {
2101 dev_err(&pdev->dev, "invalid wait-pin: %d\n", wait_pin);
2102 ret = PTR_ERR(waitpin_desc);
2103 goto err;
2104 }
2105 }
2019 2106
2020 gpmc_cs_show_timings(cs, "before gpmc_cs_program_settings"); 2107 gpmc_cs_show_timings(cs, "before gpmc_cs_program_settings");
2108
2021 ret = gpmc_cs_program_settings(cs, &gpmc_s); 2109 ret = gpmc_cs_program_settings(cs, &gpmc_s);
2022 if (ret < 0) 2110 if (ret < 0)
2023 goto err; 2111 goto err_cs;
2024 2112
2025 ret = gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s); 2113 ret = gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s);
2026 if (ret) { 2114 if (ret) {
2027 dev_err(&pdev->dev, "failed to set gpmc timings for: %s\n", 2115 dev_err(&pdev->dev, "failed to set gpmc timings for: %s\n",
2028 child->name); 2116 child->name);
2029 goto err; 2117 goto err_cs;
2030 } 2118 }
2031 2119
2032 /* Clear limited address i.e. enable A26-A11 */ 2120 /* Clear limited address i.e. enable A26-A11 */
@@ -2057,16 +2145,81 @@ err_child_fail:
2057 dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name); 2145 dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name);
2058 ret = -ENODEV; 2146 ret = -ENODEV;
2059 2147
2148err_cs:
2149 if (waitpin_desc)
2150 gpiochip_free_own_desc(waitpin_desc);
2151
2060err: 2152err:
2061 gpmc_cs_free(cs); 2153 gpmc_cs_free(cs);
2062 2154
2063 return ret; 2155 return ret;
2064} 2156}
2065 2157
2158static int gpmc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
2159{
2160 return 1; /* we're input only */
2161}
2162
2163static int gpmc_gpio_direction_input(struct gpio_chip *chip,
2164 unsigned int offset)
2165{
2166 return 0; /* we're input only */
2167}
2168
2169static int gpmc_gpio_direction_output(struct gpio_chip *chip,
2170 unsigned int offset, int value)
2171{
2172 return -EINVAL; /* we're input only */
2173}
2174
2175static void gpmc_gpio_set(struct gpio_chip *chip, unsigned int offset,
2176 int value)
2177{
2178}
2179
2180static int gpmc_gpio_get(struct gpio_chip *chip, unsigned int offset)
2181{
2182 u32 reg;
2183
2184 offset += 8;
2185
2186 reg = gpmc_read_reg(GPMC_STATUS) & BIT(offset);
2187
2188 return !!reg;
2189}
2190
2191static int gpmc_gpio_init(struct gpmc_device *gpmc)
2192{
2193 int ret;
2194
2195 gpmc->gpio_chip.parent = gpmc->dev;
2196 gpmc->gpio_chip.owner = THIS_MODULE;
2197 gpmc->gpio_chip.label = DEVICE_NAME;
2198 gpmc->gpio_chip.ngpio = gpmc_nr_waitpins;
2199 gpmc->gpio_chip.get_direction = gpmc_gpio_get_direction;
2200 gpmc->gpio_chip.direction_input = gpmc_gpio_direction_input;
2201 gpmc->gpio_chip.direction_output = gpmc_gpio_direction_output;
2202 gpmc->gpio_chip.set = gpmc_gpio_set;
2203 gpmc->gpio_chip.get = gpmc_gpio_get;
2204 gpmc->gpio_chip.base = -1;
2205
2206 ret = gpiochip_add(&gpmc->gpio_chip);
2207 if (ret < 0) {
2208 dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret);
2209 return ret;
2210 }
2211
2212 return 0;
2213}
2214
2215static void gpmc_gpio_exit(struct gpmc_device *gpmc)
2216{
2217 gpiochip_remove(&gpmc->gpio_chip);
2218}
2219
2066static int gpmc_probe_dt(struct platform_device *pdev) 2220static int gpmc_probe_dt(struct platform_device *pdev)
2067{ 2221{
2068 int ret; 2222 int ret;
2069 struct device_node *child;
2070 const struct of_device_id *of_id = 2223 const struct of_device_id *of_id =
2071 of_match_device(gpmc_dt_ids, &pdev->dev); 2224 of_match_device(gpmc_dt_ids, &pdev->dev);
2072 2225
@@ -2094,17 +2247,26 @@ static int gpmc_probe_dt(struct platform_device *pdev)
2094 return ret; 2247 return ret;
2095 } 2248 }
2096 2249
2250 return 0;
2251}
2252
2253static int gpmc_probe_dt_children(struct platform_device *pdev)
2254{
2255 int ret;
2256 struct device_node *child;
2257
2097 for_each_available_child_of_node(pdev->dev.of_node, child) { 2258 for_each_available_child_of_node(pdev->dev.of_node, child) {
2098 2259
2099 if (!child->name) 2260 if (!child->name)
2100 continue; 2261 continue;
2101 2262
2102 if (of_node_cmp(child->name, "nand") == 0) 2263 if (of_node_cmp(child->name, "onenand") == 0)
2103 ret = gpmc_probe_nand_child(pdev, child);
2104 else if (of_node_cmp(child->name, "onenand") == 0)
2105 ret = gpmc_probe_onenand_child(pdev, child); 2264 ret = gpmc_probe_onenand_child(pdev, child);
2106 else 2265 else
2107 ret = gpmc_probe_generic_child(pdev, child); 2266 ret = gpmc_probe_generic_child(pdev, child);
2267
2268 if (ret)
2269 return ret;
2108 } 2270 }
2109 2271
2110 return 0; 2272 return 0;
@@ -2114,6 +2276,11 @@ static int gpmc_probe_dt(struct platform_device *pdev)
2114{ 2276{
2115 return 0; 2277 return 0;
2116} 2278}
2279
2280static int gpmc_probe_dt_children(struct platform_device *pdev)
2281{
2282 return 0;
2283}
2117#endif 2284#endif
2118 2285
2119static int gpmc_probe(struct platform_device *pdev) 2286static int gpmc_probe(struct platform_device *pdev)
@@ -2121,6 +2288,14 @@ static int gpmc_probe(struct platform_device *pdev)
2121 int rc; 2288 int rc;
2122 u32 l; 2289 u32 l;
2123 struct resource *res; 2290 struct resource *res;
2291 struct gpmc_device *gpmc;
2292
2293 gpmc = devm_kzalloc(&pdev->dev, sizeof(*gpmc), GFP_KERNEL);
2294 if (!gpmc)
2295 return -ENOMEM;
2296
2297 gpmc->dev = &pdev->dev;
2298 platform_set_drvdata(pdev, gpmc);
2124 2299
2125 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2300 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2126 if (res == NULL) 2301 if (res == NULL)
@@ -2134,15 +2309,16 @@ static int gpmc_probe(struct platform_device *pdev)
2134 return PTR_ERR(gpmc_base); 2309 return PTR_ERR(gpmc_base);
2135 2310
2136 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2311 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2137 if (res == NULL) 2312 if (!res) {
2138 dev_warn(&pdev->dev, "Failed to get resource: irq\n"); 2313 dev_err(&pdev->dev, "Failed to get resource: irq\n");
2139 else 2314 return -ENOENT;
2140 gpmc_irq = res->start; 2315 }
2316
2317 gpmc->irq = res->start;
2141 2318
2142 gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck"); 2319 gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck");
2143 if (IS_ERR(gpmc_l3_clk)) { 2320 if (IS_ERR(gpmc_l3_clk)) {
2144 dev_err(&pdev->dev, "Failed to get GPMC fck\n"); 2321 dev_err(&pdev->dev, "Failed to get GPMC fck\n");
2145 gpmc_irq = 0;
2146 return PTR_ERR(gpmc_l3_clk); 2322 return PTR_ERR(gpmc_l3_clk);
2147 } 2323 }
2148 2324
@@ -2151,11 +2327,18 @@ static int gpmc_probe(struct platform_device *pdev)
2151 return -EINVAL; 2327 return -EINVAL;
2152 } 2328 }
2153 2329
2330 if (pdev->dev.of_node) {
2331 rc = gpmc_probe_dt(pdev);
2332 if (rc)
2333 return rc;
2334 } else {
2335 gpmc_cs_num = GPMC_CS_NUM;
2336 gpmc_nr_waitpins = GPMC_NR_WAITPINS;
2337 }
2338
2154 pm_runtime_enable(&pdev->dev); 2339 pm_runtime_enable(&pdev->dev);
2155 pm_runtime_get_sync(&pdev->dev); 2340 pm_runtime_get_sync(&pdev->dev);
2156 2341
2157 gpmc_dev = &pdev->dev;
2158
2159 l = gpmc_read_reg(GPMC_REVISION); 2342 l = gpmc_read_reg(GPMC_REVISION);
2160 2343
2161 /* 2344 /*
@@ -2174,36 +2357,51 @@ static int gpmc_probe(struct platform_device *pdev)
2174 gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS; 2357 gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS;
2175 if (GPMC_REVISION_MAJOR(l) > 0x5) 2358 if (GPMC_REVISION_MAJOR(l) > 0x5)
2176 gpmc_capability |= GPMC_HAS_MUX_AAD; 2359 gpmc_capability |= GPMC_HAS_MUX_AAD;
2177 dev_info(gpmc_dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l), 2360 dev_info(gpmc->dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
2178 GPMC_REVISION_MINOR(l)); 2361 GPMC_REVISION_MINOR(l));
2179 2362
2180 gpmc_mem_init(); 2363 gpmc_mem_init();
2181 2364 rc = gpmc_gpio_init(gpmc);
2182 if (gpmc_setup_irq() < 0) 2365 if (rc)
2183 dev_warn(gpmc_dev, "gpmc_setup_irq failed\n"); 2366 goto gpio_init_failed;
2184 2367
2185 if (!pdev->dev.of_node) { 2368 gpmc->nirqs = GPMC_NR_NAND_IRQS + gpmc_nr_waitpins;
2186 gpmc_cs_num = GPMC_CS_NUM; 2369 rc = gpmc_setup_irq(gpmc);
2187 gpmc_nr_waitpins = GPMC_NR_WAITPINS; 2370 if (rc) {
2371 dev_err(gpmc->dev, "gpmc_setup_irq failed\n");
2372 goto setup_irq_failed;
2188 } 2373 }
2189 2374
2190 rc = gpmc_probe_dt(pdev); 2375 rc = gpmc_probe_dt_children(pdev);
2191 if (rc < 0) { 2376 if (rc < 0) {
2192 pm_runtime_put_sync(&pdev->dev); 2377 dev_err(gpmc->dev, "failed to probe DT children\n");
2193 dev_err(gpmc_dev, "failed to probe DT parameters\n"); 2378 goto dt_children_failed;
2194 return rc;
2195 } 2379 }
2196 2380
2197 return 0; 2381 return 0;
2382
2383dt_children_failed:
2384 gpmc_free_irq(gpmc);
2385setup_irq_failed:
2386 gpmc_gpio_exit(gpmc);
2387gpio_init_failed:
2388 gpmc_mem_exit();
2389 pm_runtime_put_sync(&pdev->dev);
2390 pm_runtime_disable(&pdev->dev);
2391
2392 return rc;
2198} 2393}
2199 2394
2200static int gpmc_remove(struct platform_device *pdev) 2395static int gpmc_remove(struct platform_device *pdev)
2201{ 2396{
2202 gpmc_free_irq(); 2397 struct gpmc_device *gpmc = platform_get_drvdata(pdev);
2398
2399 gpmc_free_irq(gpmc);
2400 gpmc_gpio_exit(gpmc);
2203 gpmc_mem_exit(); 2401 gpmc_mem_exit();
2204 pm_runtime_put_sync(&pdev->dev); 2402 pm_runtime_put_sync(&pdev->dev);
2205 pm_runtime_disable(&pdev->dev); 2403 pm_runtime_disable(&pdev->dev);
2206 gpmc_dev = NULL; 2404
2207 return 0; 2405 return 0;
2208} 2406}
2209 2407
@@ -2249,25 +2447,6 @@ static __exit void gpmc_exit(void)
2249postcore_initcall(gpmc_init); 2447postcore_initcall(gpmc_init);
2250module_exit(gpmc_exit); 2448module_exit(gpmc_exit);
2251 2449
2252static irqreturn_t gpmc_handle_irq(int irq, void *dev)
2253{
2254 int i;
2255 u32 regval;
2256
2257 regval = gpmc_read_reg(GPMC_IRQSTATUS);
2258
2259 if (!regval)
2260 return IRQ_NONE;
2261
2262 for (i = 0; i < GPMC_NR_IRQ; i++)
2263 if (regval & gpmc_client_irq[i].bitmask)
2264 generic_handle_irq(gpmc_client_irq[i].irq);
2265
2266 gpmc_write_reg(GPMC_IRQSTATUS, regval);
2267
2268 return IRQ_HANDLED;
2269}
2270
2271static struct omap3_gpmc_regs gpmc_context; 2450static struct omap3_gpmc_regs gpmc_context;
2272 2451
2273void omap3_gpmc_save_context(void) 2452void omap3_gpmc_save_context(void)