aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/memory
diff options
context:
space:
mode:
authorRoger Quadros <rogerq@ti.com>2016-02-19 04:01:02 -0500
committerRoger Quadros <rogerq@ti.com>2016-04-15 04:55:06 -0400
commitb2bac25a4d298309bb4b2649bb1107ddaa287c47 (patch)
treef8bf795b115860f570f179f3ee65ffa6b992a0a8 /drivers/memory
parent210325f0f4eb531f83ffb0b0f95612e2a8063983 (diff)
memory: omap-gpmc: Support WAIT pin edge interrupts
OMAPs can have 2 to 4 WAITPINs that can be used as edge triggered interrupts if not used for memory wait state insertion. Support these interrupts via the gpmc IRQ domain. The gpmc IRQ domain interrupt map is: 0 - NAND_fifoevent 1 - NAND_termcount 2 - GPMC_WAIT0 edge 3 - GPMC_WAIT1 edge, and so on Signed-off-by: Roger Quadros <rogerq@ti.com> Acked-by: Rob Herring <robh@kernel.org> Acked-by: Tony Lindgren <tony@atomide.com>
Diffstat (limited to 'drivers/memory')
-rw-r--r--drivers/memory/omap-gpmc.c106
1 files changed, 88 insertions, 18 deletions
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 784a64f31a8b..ea9c89747950 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -189,9 +189,7 @@
189#define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */ 189#define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */
190#define GPMC_ECC_READSYN 2 /* Reset before syndrom is read back */ 190#define GPMC_ECC_READSYN 2 /* Reset before syndrom is read back */
191 191
192/* XXX: Only NAND irq has been considered,currently these are the only ones used 192#define GPMC_NR_NAND_IRQS 2 /* number of NAND specific IRQs */
193 */
194#define GPMC_NR_IRQ 2
195 193
196enum gpmc_clk_domain { 194enum gpmc_clk_domain {
197 GPMC_CD_FCLK, 195 GPMC_CD_FCLK,
@@ -239,6 +237,7 @@ struct gpmc_device {
239 int irq; 237 int irq;
240 struct irq_chip irq_chip; 238 struct irq_chip irq_chip;
241 struct gpio_chip gpio_chip; 239 struct gpio_chip gpio_chip;
240 int nirqs;
242}; 241};
243 242
244static struct irq_domain *gpmc_irq_domain; 243static struct irq_domain *gpmc_irq_domain;
@@ -1155,7 +1154,8 @@ int gpmc_get_client_irq(unsigned irq_config)
1155 return 0; 1154 return 0;
1156 } 1155 }
1157 1156
1158 if (irq_config >= GPMC_NR_IRQ) 1157 /* we restrict this to NAND IRQs only */
1158 if (irq_config >= GPMC_NR_NAND_IRQS)
1159 return 0; 1159 return 0;
1160 1160
1161 return irq_create_mapping(gpmc_irq_domain, irq_config); 1161 return irq_create_mapping(gpmc_irq_domain, irq_config);
@@ -1165,6 +1165,10 @@ static int gpmc_irq_endis(unsigned long hwirq, bool endis)
1165{ 1165{
1166 u32 regval; 1166 u32 regval;
1167 1167
1168 /* bits GPMC_NR_NAND_IRQS to 8 are reserved */
1169 if (hwirq >= GPMC_NR_NAND_IRQS)
1170 hwirq += 8 - GPMC_NR_NAND_IRQS;
1171
1168 regval = gpmc_read_reg(GPMC_IRQENABLE); 1172 regval = gpmc_read_reg(GPMC_IRQENABLE);
1169 if (endis) 1173 if (endis)
1170 regval |= BIT(hwirq); 1174 regval |= BIT(hwirq);
@@ -1185,9 +1189,64 @@ static void gpmc_irq_enable(struct irq_data *p)
1185 gpmc_irq_endis(p->hwirq, true); 1189 gpmc_irq_endis(p->hwirq, true);
1186} 1190}
1187 1191
1188static void gpmc_irq_noop(struct irq_data *data) { } 1192static void gpmc_irq_mask(struct irq_data *d)
1193{
1194 gpmc_irq_endis(d->hwirq, false);
1195}
1196
1197static void gpmc_irq_unmask(struct irq_data *d)
1198{
1199 gpmc_irq_endis(d->hwirq, true);
1200}
1201
1202static void gpmc_irq_edge_config(unsigned long hwirq, bool rising_edge)
1203{
1204 u32 regval;
1205
1206 /* NAND IRQs polarity is not configurable */
1207 if (hwirq < GPMC_NR_NAND_IRQS)
1208 return;
1209
1210 /* WAITPIN starts at BIT 8 */
1211 hwirq += 8 - GPMC_NR_NAND_IRQS;
1212
1213 regval = gpmc_read_reg(GPMC_CONFIG);
1214 if (rising_edge)
1215 regval &= ~BIT(hwirq);
1216 else
1217 regval |= BIT(hwirq);
1218
1219 gpmc_write_reg(GPMC_CONFIG, regval);
1220}
1221
1222static void gpmc_irq_ack(struct irq_data *d)
1223{
1224 unsigned int hwirq = d->hwirq;
1225
1226 /* skip reserved bits */
1227 if (hwirq >= GPMC_NR_NAND_IRQS)
1228 hwirq += 8 - GPMC_NR_NAND_IRQS;
1229
1230 /* Setting bit to 1 clears (or Acks) the interrupt */
1231 gpmc_write_reg(GPMC_IRQSTATUS, BIT(hwirq));
1232}
1233
1234static int gpmc_irq_set_type(struct irq_data *d, unsigned int trigger)
1235{
1236 /* can't set type for NAND IRQs */
1237 if (d->hwirq < GPMC_NR_NAND_IRQS)
1238 return -EINVAL;
1239
1240 /* We can support either rising or falling edge at a time */
1241 if (trigger == IRQ_TYPE_EDGE_FALLING)
1242 gpmc_irq_edge_config(d->hwirq, false);
1243 else if (trigger == IRQ_TYPE_EDGE_RISING)
1244 gpmc_irq_edge_config(d->hwirq, true);
1245 else
1246 return -EINVAL;
1189 1247
1190static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; } 1248 return 0;
1249}
1191 1250
1192static int gpmc_irq_map(struct irq_domain *d, unsigned int virq, 1251static int gpmc_irq_map(struct irq_domain *d, unsigned int virq,
1193 irq_hw_number_t hw) 1252 irq_hw_number_t hw)
@@ -1195,8 +1254,14 @@ static int gpmc_irq_map(struct irq_domain *d, unsigned int virq,
1195 struct gpmc_device *gpmc = d->host_data; 1254 struct gpmc_device *gpmc = d->host_data;
1196 1255
1197 irq_set_chip_data(virq, gpmc); 1256 irq_set_chip_data(virq, gpmc);
1198 irq_set_chip_and_handler(virq, &gpmc->irq_chip, handle_simple_irq); 1257 if (hw < GPMC_NR_NAND_IRQS) {
1199 irq_modify_status(virq, IRQ_NOREQUEST, IRQ_NOAUTOEN); 1258 irq_modify_status(virq, IRQ_NOREQUEST, IRQ_NOAUTOEN);
1259 irq_set_chip_and_handler(virq, &gpmc->irq_chip,
1260 handle_simple_irq);
1261 } else {
1262 irq_set_chip_and_handler(virq, &gpmc->irq_chip,
1263 handle_edge_irq);
1264 }
1200 1265
1201 return 0; 1266 return 0;
1202} 1267}
@@ -1209,16 +1274,21 @@ static const struct irq_domain_ops gpmc_irq_domain_ops = {
1209static irqreturn_t gpmc_handle_irq(int irq, void *data) 1274static irqreturn_t gpmc_handle_irq(int irq, void *data)
1210{ 1275{
1211 int hwirq, virq; 1276 int hwirq, virq;
1212 u32 regval; 1277 u32 regval, regvalx;
1213 struct gpmc_device *gpmc = data; 1278 struct gpmc_device *gpmc = data;
1214 1279
1215 regval = gpmc_read_reg(GPMC_IRQSTATUS); 1280 regval = gpmc_read_reg(GPMC_IRQSTATUS);
1281 regvalx = regval;
1216 1282
1217 if (!regval) 1283 if (!regval)
1218 return IRQ_NONE; 1284 return IRQ_NONE;
1219 1285
1220 for (hwirq = 0; hwirq < GPMC_NR_IRQ; hwirq++) { 1286 for (hwirq = 0; hwirq < gpmc->nirqs; hwirq++) {
1221 if (regval & BIT(hwirq)) { 1287 /* skip reserved status bits */
1288 if (hwirq == GPMC_NR_NAND_IRQS)
1289 regvalx >>= 8 - GPMC_NR_NAND_IRQS;
1290
1291 if (regvalx & BIT(hwirq)) {
1222 virq = irq_find_mapping(gpmc_irq_domain, hwirq); 1292 virq = irq_find_mapping(gpmc_irq_domain, hwirq);
1223 if (!virq) { 1293 if (!virq) {
1224 dev_warn(gpmc->dev, 1294 dev_warn(gpmc->dev,
@@ -1248,16 +1318,15 @@ static int gpmc_setup_irq(struct gpmc_device *gpmc)
1248 gpmc_write_reg(GPMC_IRQSTATUS, regval); 1318 gpmc_write_reg(GPMC_IRQSTATUS, regval);
1249 1319
1250 gpmc->irq_chip.name = "gpmc"; 1320 gpmc->irq_chip.name = "gpmc";
1251 gpmc->irq_chip.irq_startup = gpmc_irq_noop_ret;
1252 gpmc->irq_chip.irq_enable = gpmc_irq_enable; 1321 gpmc->irq_chip.irq_enable = gpmc_irq_enable;
1253 gpmc->irq_chip.irq_disable = gpmc_irq_disable; 1322 gpmc->irq_chip.irq_disable = gpmc_irq_disable;
1254 gpmc->irq_chip.irq_shutdown = gpmc_irq_noop; 1323 gpmc->irq_chip.irq_ack = gpmc_irq_ack;
1255 gpmc->irq_chip.irq_ack = gpmc_irq_noop; 1324 gpmc->irq_chip.irq_mask = gpmc_irq_mask;
1256 gpmc->irq_chip.irq_mask = gpmc_irq_noop; 1325 gpmc->irq_chip.irq_unmask = gpmc_irq_unmask;
1257 gpmc->irq_chip.irq_unmask = gpmc_irq_noop; 1326 gpmc->irq_chip.irq_set_type = gpmc_irq_set_type;
1258 1327
1259 gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node, 1328 gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node,
1260 GPMC_NR_IRQ, 1329 gpmc->nirqs,
1261 &gpmc_irq_domain_ops, 1330 &gpmc_irq_domain_ops,
1262 gpmc); 1331 gpmc);
1263 if (!gpmc_irq_domain) { 1332 if (!gpmc_irq_domain) {
@@ -1282,7 +1351,7 @@ static int gpmc_free_irq(struct gpmc_device *gpmc)
1282 1351
1283 free_irq(gpmc->irq, gpmc); 1352 free_irq(gpmc->irq, gpmc);
1284 1353
1285 for (hwirq = 0; hwirq < GPMC_NR_IRQ; hwirq++) 1354 for (hwirq = 0; hwirq < gpmc->nirqs; hwirq++)
1286 irq_dispose_mapping(irq_find_mapping(gpmc_irq_domain, hwirq)); 1355 irq_dispose_mapping(irq_find_mapping(gpmc_irq_domain, hwirq));
1287 1356
1288 irq_domain_remove(gpmc_irq_domain); 1357 irq_domain_remove(gpmc_irq_domain);
@@ -2296,6 +2365,7 @@ static int gpmc_probe(struct platform_device *pdev)
2296 if (rc) 2365 if (rc)
2297 goto gpio_init_failed; 2366 goto gpio_init_failed;
2298 2367
2368 gpmc->nirqs = GPMC_NR_NAND_IRQS + gpmc_nr_waitpins;
2299 rc = gpmc_setup_irq(gpmc); 2369 rc = gpmc_setup_irq(gpmc);
2300 if (rc) { 2370 if (rc) {
2301 dev_err(gpmc->dev, "gpmc_setup_irq failed\n"); 2371 dev_err(gpmc->dev, "gpmc_setup_irq failed\n");