aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpio
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2012-03-02 08:05:00 -0500
committerArnd Bergmann <arnd@arndb.de>2012-03-02 08:05:00 -0500
commit1220547bfd7f7dd97f770a04b533323e5404b8e9 (patch)
treea4231b9570f3302f032a2d757fb3af24a4238d46 /drivers/gpio
parenta26d3c4fcd4bb875ae5adc32f27fab7a478bb00d (diff)
parentf86bcc302a8c570dd0f5a50097a6af96a0e717c2 (diff)
Merge branch 'depends/omap/gpio/runtime-pm-cleanup' into next/cleanup
Conflicts: arch/arm/mach-omap1/gpio16xx.c drivers/gpio/gpio-omap.c Merge in the runtime-pm-cleanup branch from the gpio tree into next/cleanup, this resolves a nonobvious merge conflict between the two branches. Both branches move parts of the gpio-omap driver into platform code, this takes the superset of both changes. Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'drivers/gpio')
-rw-r--r--drivers/gpio/gpio-omap.c1101
1 files changed, 462 insertions, 639 deletions
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 5b7b164e08ad..f49bd6f47a50 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -21,6 +21,7 @@
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/pm_runtime.h> 23#include <linux/pm_runtime.h>
24#include <linux/pm.h>
24 25
25#include <mach/hardware.h> 26#include <mach/hardware.h>
26#include <asm/irq.h> 27#include <asm/irq.h>
@@ -28,19 +29,36 @@
28#include <asm/gpio.h> 29#include <asm/gpio.h>
29#include <asm/mach/irq.h> 30#include <asm/mach/irq.h>
30 31
32#define OFF_MODE 1
33
34static LIST_HEAD(omap_gpio_list);
35
36struct gpio_regs {
37 u32 irqenable1;
38 u32 irqenable2;
39 u32 wake_en;
40 u32 ctrl;
41 u32 oe;
42 u32 leveldetect0;
43 u32 leveldetect1;
44 u32 risingdetect;
45 u32 fallingdetect;
46 u32 dataout;
47 u32 debounce;
48 u32 debounce_en;
49};
50
31struct gpio_bank { 51struct gpio_bank {
52 struct list_head node;
32 unsigned long pbase; 53 unsigned long pbase;
33 void __iomem *base; 54 void __iomem *base;
34 u16 irq; 55 u16 irq;
35 u16 virtual_irq_start; 56 u16 virtual_irq_start;
36 int method;
37 u32 suspend_wakeup; 57 u32 suspend_wakeup;
38#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
39 u32 saved_wakeup; 58 u32 saved_wakeup;
40#endif
41 u32 non_wakeup_gpios; 59 u32 non_wakeup_gpios;
42 u32 enabled_non_wakeup_gpios; 60 u32 enabled_non_wakeup_gpios;
43 61 struct gpio_regs context;
44 u32 saved_datain; 62 u32 saved_datain;
45 u32 saved_fallingdetect; 63 u32 saved_fallingdetect;
46 u32 saved_risingdetect; 64 u32 saved_risingdetect;
@@ -51,44 +69,27 @@ struct gpio_bank {
51 struct clk *dbck; 69 struct clk *dbck;
52 u32 mod_usage; 70 u32 mod_usage;
53 u32 dbck_enable_mask; 71 u32 dbck_enable_mask;
72 bool dbck_enabled;
54 struct device *dev; 73 struct device *dev;
74 bool is_mpuio;
55 bool dbck_flag; 75 bool dbck_flag;
76 bool loses_context;
56 int stride; 77 int stride;
57 u32 width; 78 u32 width;
79 int context_loss_count;
80 u16 id;
81 int power_mode;
82 bool workaround_enabled;
58 83
59 void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable); 84 void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
85 int (*get_context_loss_count)(struct device *dev);
60 86
61 struct omap_gpio_reg_offs *regs; 87 struct omap_gpio_reg_offs *regs;
62}; 88};
63 89
64#ifdef CONFIG_ARCH_OMAP3
65struct omap3_gpio_regs {
66 u32 irqenable1;
67 u32 irqenable2;
68 u32 wake_en;
69 u32 ctrl;
70 u32 oe;
71 u32 leveldetect0;
72 u32 leveldetect1;
73 u32 risingdetect;
74 u32 fallingdetect;
75 u32 dataout;
76};
77
78static struct omap3_gpio_regs gpio_context[OMAP34XX_NR_GPIOS];
79#endif
80
81/*
82 * TODO: Cleanup gpio_bank usage as it is having information
83 * related to all instances of the device
84 */
85static struct gpio_bank *gpio_bank;
86
87/* TODO: Analyze removing gpio_bank_count usage from driver code */
88int gpio_bank_count;
89
90#define GPIO_INDEX(bank, gpio) (gpio % bank->width) 90#define GPIO_INDEX(bank, gpio) (gpio % bank->width)
91#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio)) 91#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
92#define GPIO_MOD_CTRL_BIT BIT(0)
92 93
93static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input) 94static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
94{ 95{
@@ -102,6 +103,7 @@ static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
102 else 103 else
103 l &= ~(1 << gpio); 104 l &= ~(1 << gpio);
104 __raw_writel(l, reg); 105 __raw_writel(l, reg);
106 bank->context.oe = l;
105} 107}
106 108
107 109
@@ -132,6 +134,7 @@ static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
132 else 134 else
133 l &= ~gpio_bit; 135 l &= ~gpio_bit;
134 __raw_writel(l, reg); 136 __raw_writel(l, reg);
137 bank->context.dataout = l;
135} 138}
136 139
137static int _get_gpio_datain(struct gpio_bank *bank, int gpio) 140static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
@@ -160,6 +163,22 @@ static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
160 __raw_writel(l, base + reg); 163 __raw_writel(l, base + reg);
161} 164}
162 165
166static inline void _gpio_dbck_enable(struct gpio_bank *bank)
167{
168 if (bank->dbck_enable_mask && !bank->dbck_enabled) {
169 clk_enable(bank->dbck);
170 bank->dbck_enabled = true;
171 }
172}
173
174static inline void _gpio_dbck_disable(struct gpio_bank *bank)
175{
176 if (bank->dbck_enable_mask && bank->dbck_enabled) {
177 clk_disable(bank->dbck);
178 bank->dbck_enabled = false;
179 }
180}
181
163/** 182/**
164 * _set_gpio_debounce - low level gpio debounce time 183 * _set_gpio_debounce - low level gpio debounce time
165 * @bank: the gpio bank we're acting upon 184 * @bank: the gpio bank we're acting upon
@@ -188,70 +207,74 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
188 207
189 l = GPIO_BIT(bank, gpio); 208 l = GPIO_BIT(bank, gpio);
190 209
210 clk_enable(bank->dbck);
191 reg = bank->base + bank->regs->debounce; 211 reg = bank->base + bank->regs->debounce;
192 __raw_writel(debounce, reg); 212 __raw_writel(debounce, reg);
193 213
194 reg = bank->base + bank->regs->debounce_en; 214 reg = bank->base + bank->regs->debounce_en;
195 val = __raw_readl(reg); 215 val = __raw_readl(reg);
196 216
197 if (debounce) { 217 if (debounce)
198 val |= l; 218 val |= l;
199 clk_enable(bank->dbck); 219 else
200 } else {
201 val &= ~l; 220 val &= ~l;
202 clk_disable(bank->dbck);
203 }
204 bank->dbck_enable_mask = val; 221 bank->dbck_enable_mask = val;
205 222
206 __raw_writel(val, reg); 223 __raw_writel(val, reg);
224 clk_disable(bank->dbck);
225 /*
226 * Enable debounce clock per module.
227 * This call is mandatory because in omap_gpio_request() when
228 * *_runtime_get_sync() is called, _gpio_dbck_enable() within
229 * runtime callbck fails to turn on dbck because dbck_enable_mask
230 * used within _gpio_dbck_enable() is still not initialized at
231 * that point. Therefore we have to enable dbck here.
232 */
233 _gpio_dbck_enable(bank);
234 if (bank->dbck_enable_mask) {
235 bank->context.debounce = debounce;
236 bank->context.debounce_en = val;
237 }
207} 238}
208 239
209#ifdef CONFIG_ARCH_OMAP2PLUS 240static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
210static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
211 int trigger) 241 int trigger)
212{ 242{
213 void __iomem *base = bank->base; 243 void __iomem *base = bank->base;
214 u32 gpio_bit = 1 << gpio; 244 u32 gpio_bit = 1 << gpio;
215 245
216 if (cpu_is_omap44xx()) { 246 _gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
217 _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT0, gpio_bit, 247 trigger & IRQ_TYPE_LEVEL_LOW);
218 trigger & IRQ_TYPE_LEVEL_LOW); 248 _gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
219 _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT1, gpio_bit, 249 trigger & IRQ_TYPE_LEVEL_HIGH);
220 trigger & IRQ_TYPE_LEVEL_HIGH); 250 _gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
221 _gpio_rmw(base, OMAP4_GPIO_RISINGDETECT, gpio_bit, 251 trigger & IRQ_TYPE_EDGE_RISING);
222 trigger & IRQ_TYPE_EDGE_RISING); 252 _gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
223 _gpio_rmw(base, OMAP4_GPIO_FALLINGDETECT, gpio_bit, 253 trigger & IRQ_TYPE_EDGE_FALLING);
224 trigger & IRQ_TYPE_EDGE_FALLING); 254
225 } else { 255 bank->context.leveldetect0 =
226 _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT0, gpio_bit, 256 __raw_readl(bank->base + bank->regs->leveldetect0);
227 trigger & IRQ_TYPE_LEVEL_LOW); 257 bank->context.leveldetect1 =
228 _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT1, gpio_bit, 258 __raw_readl(bank->base + bank->regs->leveldetect1);
229 trigger & IRQ_TYPE_LEVEL_HIGH); 259 bank->context.risingdetect =
230 _gpio_rmw(base, OMAP24XX_GPIO_RISINGDETECT, gpio_bit, 260 __raw_readl(bank->base + bank->regs->risingdetect);
231 trigger & IRQ_TYPE_EDGE_RISING); 261 bank->context.fallingdetect =
232 _gpio_rmw(base, OMAP24XX_GPIO_FALLINGDETECT, gpio_bit, 262 __raw_readl(bank->base + bank->regs->fallingdetect);
233 trigger & IRQ_TYPE_EDGE_FALLING); 263
234 }
235 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { 264 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
236 if (cpu_is_omap44xx()) { 265 _gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
237 _gpio_rmw(base, OMAP4_GPIO_IRQWAKEN0, gpio_bit, 266 bank->context.wake_en =
238 trigger != 0); 267 __raw_readl(bank->base + bank->regs->wkup_en);
239 } else {
240 /*
241 * GPIO wakeup request can only be generated on edge
242 * transitions
243 */
244 if (trigger & IRQ_TYPE_EDGE_BOTH)
245 __raw_writel(1 << gpio, bank->base
246 + OMAP24XX_GPIO_SETWKUENA);
247 else
248 __raw_writel(1 << gpio, bank->base
249 + OMAP24XX_GPIO_CLEARWKUENA);
250 }
251 } 268 }
269
252 /* This part needs to be executed always for OMAP{34xx, 44xx} */ 270 /* This part needs to be executed always for OMAP{34xx, 44xx} */
253 if (cpu_is_omap34xx() || cpu_is_omap44xx() || 271 if (!bank->regs->irqctrl) {
254 (bank->non_wakeup_gpios & gpio_bit)) { 272 /* On omap24xx proceed only when valid GPIO bit is set */
273 if (bank->non_wakeup_gpios) {
274 if (!(bank->non_wakeup_gpios & gpio_bit))
275 goto exit;
276 }
277
255 /* 278 /*
256 * Log the edge gpio and manually trigger the IRQ 279 * Log the edge gpio and manually trigger the IRQ
257 * after resume if the input level changes 280 * after resume if the input level changes
@@ -264,17 +287,11 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
264 bank->enabled_non_wakeup_gpios &= ~gpio_bit; 287 bank->enabled_non_wakeup_gpios &= ~gpio_bit;
265 } 288 }
266 289
267 if (cpu_is_omap44xx()) { 290exit:
268 bank->level_mask = 291 bank->level_mask =
269 __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT0) | 292 __raw_readl(bank->base + bank->regs->leveldetect0) |
270 __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT1); 293 __raw_readl(bank->base + bank->regs->leveldetect1);
271 } else {
272 bank->level_mask =
273 __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0) |
274 __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
275 }
276} 294}
277#endif
278 295
279#ifdef CONFIG_ARCH_OMAP1 296#ifdef CONFIG_ARCH_OMAP1
280/* 297/*
@@ -286,23 +303,10 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
286 void __iomem *reg = bank->base; 303 void __iomem *reg = bank->base;
287 u32 l = 0; 304 u32 l = 0;
288 305
289 switch (bank->method) { 306 if (!bank->regs->irqctrl)
290 case METHOD_MPUIO:
291 reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
292 break;
293#ifdef CONFIG_ARCH_OMAP15XX
294 case METHOD_GPIO_1510:
295 reg += OMAP1510_GPIO_INT_CONTROL;
296 break;
297#endif
298#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
299 case METHOD_GPIO_7XX:
300 reg += OMAP7XX_GPIO_INT_CONTROL;
301 break;
302#endif
303 default:
304 return; 307 return;
305 } 308
309 reg += bank->regs->irqctrl;
306 310
307 l = __raw_readl(reg); 311 l = __raw_readl(reg);
308 if ((l >> gpio) & 1) 312 if ((l >> gpio) & 1)
@@ -312,31 +316,21 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
312 316
313 __raw_writel(l, reg); 317 __raw_writel(l, reg);
314} 318}
319#else
320static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
315#endif 321#endif
316 322
317static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger) 323static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
318{ 324{
319 void __iomem *reg = bank->base; 325 void __iomem *reg = bank->base;
326 void __iomem *base = bank->base;
320 u32 l = 0; 327 u32 l = 0;
321 328
322 switch (bank->method) { 329 if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
323#ifdef CONFIG_ARCH_OMAP1 330 set_gpio_trigger(bank, gpio, trigger);
324 case METHOD_MPUIO: 331 } else if (bank->regs->irqctrl) {
325 reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride; 332 reg += bank->regs->irqctrl;
326 l = __raw_readl(reg); 333
327 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
328 bank->toggle_mask |= 1 << gpio;
329 if (trigger & IRQ_TYPE_EDGE_RISING)
330 l |= 1 << gpio;
331 else if (trigger & IRQ_TYPE_EDGE_FALLING)
332 l &= ~(1 << gpio);
333 else
334 goto bad;
335 break;
336#endif
337#ifdef CONFIG_ARCH_OMAP15XX
338 case METHOD_GPIO_1510:
339 reg += OMAP1510_GPIO_INT_CONTROL;
340 l = __raw_readl(reg); 334 l = __raw_readl(reg);
341 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) 335 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
342 bank->toggle_mask |= 1 << gpio; 336 bank->toggle_mask |= 1 << gpio;
@@ -345,15 +339,15 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
345 else if (trigger & IRQ_TYPE_EDGE_FALLING) 339 else if (trigger & IRQ_TYPE_EDGE_FALLING)
346 l &= ~(1 << gpio); 340 l &= ~(1 << gpio);
347 else 341 else
348 goto bad; 342 return -EINVAL;
349 break; 343
350#endif 344 __raw_writel(l, reg);
351#ifdef CONFIG_ARCH_OMAP16XX 345 } else if (bank->regs->edgectrl1) {
352 case METHOD_GPIO_1610:
353 if (gpio & 0x08) 346 if (gpio & 0x08)
354 reg += OMAP1610_GPIO_EDGE_CTRL2; 347 reg += bank->regs->edgectrl2;
355 else 348 else
356 reg += OMAP1610_GPIO_EDGE_CTRL1; 349 reg += bank->regs->edgectrl1;
350
357 gpio &= 0x07; 351 gpio &= 0x07;
358 l = __raw_readl(reg); 352 l = __raw_readl(reg);
359 l &= ~(3 << (gpio << 1)); 353 l &= ~(3 << (gpio << 1));
@@ -361,40 +355,14 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
361 l |= 2 << (gpio << 1); 355 l |= 2 << (gpio << 1);
362 if (trigger & IRQ_TYPE_EDGE_FALLING) 356 if (trigger & IRQ_TYPE_EDGE_FALLING)
363 l |= 1 << (gpio << 1); 357 l |= 1 << (gpio << 1);
364 if (trigger) 358
365 /* Enable wake-up during idle for dynamic tick */ 359 /* Enable wake-up during idle for dynamic tick */
366 __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_SET_WAKEUPENA); 360 _gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger);
367 else 361 bank->context.wake_en =
368 __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA); 362 __raw_readl(bank->base + bank->regs->wkup_en);
369 break; 363 __raw_writel(l, reg);
370#endif
371#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
372 case METHOD_GPIO_7XX:
373 reg += OMAP7XX_GPIO_INT_CONTROL;
374 l = __raw_readl(reg);
375 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
376 bank->toggle_mask |= 1 << gpio;
377 if (trigger & IRQ_TYPE_EDGE_RISING)
378 l |= 1 << gpio;
379 else if (trigger & IRQ_TYPE_EDGE_FALLING)
380 l &= ~(1 << gpio);
381 else
382 goto bad;
383 break;
384#endif
385#ifdef CONFIG_ARCH_OMAP2PLUS
386 case METHOD_GPIO_24XX:
387 case METHOD_GPIO_44XX:
388 set_24xx_gpio_triggering(bank, gpio, trigger);
389 return 0;
390#endif
391 default:
392 goto bad;
393 } 364 }
394 __raw_writel(l, reg);
395 return 0; 365 return 0;
396bad:
397 return -EINVAL;
398} 366}
399 367
400static int gpio_irq_type(struct irq_data *d, unsigned type) 368static int gpio_irq_type(struct irq_data *d, unsigned type)
@@ -412,12 +380,12 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
412 if (type & ~IRQ_TYPE_SENSE_MASK) 380 if (type & ~IRQ_TYPE_SENSE_MASK)
413 return -EINVAL; 381 return -EINVAL;
414 382
415 /* OMAP1 allows only only edge triggering */ 383 bank = irq_data_get_irq_chip_data(d);
416 if (!cpu_class_is_omap2() 384
417 && (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))) 385 if (!bank->regs->leveldetect0 &&
386 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
418 return -EINVAL; 387 return -EINVAL;
419 388
420 bank = irq_data_get_irq_chip_data(d);
421 spin_lock_irqsave(&bank->lock, flags); 389 spin_lock_irqsave(&bank->lock, flags);
422 retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type); 390 retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
423 spin_unlock_irqrestore(&bank->lock, flags); 391 spin_unlock_irqrestore(&bank->lock, flags);
@@ -484,6 +452,7 @@ static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
484 } 452 }
485 453
486 __raw_writel(l, reg); 454 __raw_writel(l, reg);
455 bank->context.irqenable1 = l;
487} 456}
488 457
489static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 458static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
@@ -504,6 +473,7 @@ static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
504 } 473 }
505 474
506 __raw_writel(l, reg); 475 __raw_writel(l, reg);
476 bank->context.irqenable1 = l;
507} 477}
508 478
509static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable) 479static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
@@ -567,38 +537,39 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
567 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); 537 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
568 unsigned long flags; 538 unsigned long flags;
569 539
570 spin_lock_irqsave(&bank->lock, flags); 540 /*
541 * If this is the first gpio_request for the bank,
542 * enable the bank module.
543 */
544 if (!bank->mod_usage)
545 pm_runtime_get_sync(bank->dev);
571 546
547 spin_lock_irqsave(&bank->lock, flags);
572 /* Set trigger to none. You need to enable the desired trigger with 548 /* Set trigger to none. You need to enable the desired trigger with
573 * request_irq() or set_irq_type(). 549 * request_irq() or set_irq_type().
574 */ 550 */
575 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 551 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
576 552
577#ifdef CONFIG_ARCH_OMAP15XX 553 if (bank->regs->pinctrl) {
578 if (bank->method == METHOD_GPIO_1510) { 554 void __iomem *reg = bank->base + bank->regs->pinctrl;
579 void __iomem *reg;
580 555
581 /* Claim the pin for MPU */ 556 /* Claim the pin for MPU */
582 reg = bank->base + OMAP1510_GPIO_PIN_CONTROL;
583 __raw_writel(__raw_readl(reg) | (1 << offset), reg); 557 __raw_writel(__raw_readl(reg) | (1 << offset), reg);
584 } 558 }
585#endif 559
586 if (!cpu_class_is_omap1()) { 560 if (bank->regs->ctrl && !bank->mod_usage) {
587 if (!bank->mod_usage) { 561 void __iomem *reg = bank->base + bank->regs->ctrl;
588 void __iomem *reg = bank->base; 562 u32 ctrl;
589 u32 ctrl; 563
590 564 ctrl = __raw_readl(reg);
591 if (cpu_is_omap24xx() || cpu_is_omap34xx()) 565 /* Module is enabled, clocks are not gated */
592 reg += OMAP24XX_GPIO_CTRL; 566 ctrl &= ~GPIO_MOD_CTRL_BIT;
593 else if (cpu_is_omap44xx()) 567 __raw_writel(ctrl, reg);
594 reg += OMAP4_GPIO_CTRL; 568 bank->context.ctrl = ctrl;
595 ctrl = __raw_readl(reg);
596 /* Module is enabled, clocks are not gated */
597 ctrl &= 0xFFFFFFFE;
598 __raw_writel(ctrl, reg);
599 }
600 bank->mod_usage |= 1 << offset;
601 } 569 }
570
571 bank->mod_usage |= 1 << offset;
572
602 spin_unlock_irqrestore(&bank->lock, flags); 573 spin_unlock_irqrestore(&bank->lock, flags);
603 574
604 return 0; 575 return 0;
@@ -607,48 +578,40 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
607static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 578static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
608{ 579{
609 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); 580 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
581 void __iomem *base = bank->base;
610 unsigned long flags; 582 unsigned long flags;
611 583
612 spin_lock_irqsave(&bank->lock, flags); 584 spin_lock_irqsave(&bank->lock, flags);
613#ifdef CONFIG_ARCH_OMAP16XX 585
614 if (bank->method == METHOD_GPIO_1610) { 586 if (bank->regs->wkup_en) {
615 /* Disable wake-up during idle for dynamic tick */
616 void __iomem *reg = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
617 __raw_writel(1 << offset, reg);
618 }
619#endif
620#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
621 if (bank->method == METHOD_GPIO_24XX) {
622 /* Disable wake-up during idle for dynamic tick */
623 void __iomem *reg = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
624 __raw_writel(1 << offset, reg);
625 }
626#endif
627#ifdef CONFIG_ARCH_OMAP4
628 if (bank->method == METHOD_GPIO_44XX) {
629 /* Disable wake-up during idle for dynamic tick */ 587 /* Disable wake-up during idle for dynamic tick */
630 void __iomem *reg = bank->base + OMAP4_GPIO_IRQWAKEN0; 588 _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
631 __raw_writel(1 << offset, reg); 589 bank->context.wake_en =
590 __raw_readl(bank->base + bank->regs->wkup_en);
632 } 591 }
633#endif 592
634 if (!cpu_class_is_omap1()) { 593 bank->mod_usage &= ~(1 << offset);
635 bank->mod_usage &= ~(1 << offset); 594
636 if (!bank->mod_usage) { 595 if (bank->regs->ctrl && !bank->mod_usage) {
637 void __iomem *reg = bank->base; 596 void __iomem *reg = bank->base + bank->regs->ctrl;
638 u32 ctrl; 597 u32 ctrl;
639 598
640 if (cpu_is_omap24xx() || cpu_is_omap34xx()) 599 ctrl = __raw_readl(reg);
641 reg += OMAP24XX_GPIO_CTRL; 600 /* Module is disabled, clocks are gated */
642 else if (cpu_is_omap44xx()) 601 ctrl |= GPIO_MOD_CTRL_BIT;
643 reg += OMAP4_GPIO_CTRL; 602 __raw_writel(ctrl, reg);
644 ctrl = __raw_readl(reg); 603 bank->context.ctrl = ctrl;
645 /* Module is disabled, clocks are gated */
646 ctrl |= 1;
647 __raw_writel(ctrl, reg);
648 }
649 } 604 }
605
650 _reset_gpio(bank, bank->chip.base + offset); 606 _reset_gpio(bank, bank->chip.base + offset);
651 spin_unlock_irqrestore(&bank->lock, flags); 607 spin_unlock_irqrestore(&bank->lock, flags);
608
609 /*
610 * If this is the last gpio to be freed in the bank,
611 * disable the bank module.
612 */
613 if (!bank->mod_usage)
614 pm_runtime_put(bank->dev);
652} 615}
653 616
654/* 617/*
@@ -674,6 +637,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
674 637
675 bank = irq_get_handler_data(irq); 638 bank = irq_get_handler_data(irq);
676 isr_reg = bank->base + bank->regs->irqstatus; 639 isr_reg = bank->base + bank->regs->irqstatus;
640 pm_runtime_get_sync(bank->dev);
677 641
678 if (WARN_ON(!isr_reg)) 642 if (WARN_ON(!isr_reg))
679 goto exit; 643 goto exit;
@@ -685,12 +649,8 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
685 enabled = _get_gpio_irqbank_mask(bank); 649 enabled = _get_gpio_irqbank_mask(bank);
686 isr_saved = isr = __raw_readl(isr_reg) & enabled; 650 isr_saved = isr = __raw_readl(isr_reg) & enabled;
687 651
688 if (cpu_is_omap15xx() && (bank->method == METHOD_MPUIO)) 652 if (bank->level_mask)
689 isr &= 0x0000ffff;
690
691 if (cpu_class_is_omap2()) {
692 level_mask = bank->level_mask & enabled; 653 level_mask = bank->level_mask & enabled;
693 }
694 654
695 /* clear edge sensitive interrupts before handler(s) are 655 /* clear edge sensitive interrupts before handler(s) are
696 called so that we don't miss any interrupt occurred while 656 called so that we don't miss any interrupt occurred while
@@ -718,7 +678,6 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
718 if (!(isr & 1)) 678 if (!(isr & 1))
719 continue; 679 continue;
720 680
721#ifdef CONFIG_ARCH_OMAP1
722 /* 681 /*
723 * Some chips can't respond to both rising and falling 682 * Some chips can't respond to both rising and falling
724 * at the same time. If this irq was requested with 683 * at the same time. If this irq was requested with
@@ -728,7 +687,6 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
728 */ 687 */
729 if (bank->toggle_mask & (1 << gpio_index)) 688 if (bank->toggle_mask & (1 << gpio_index))
730 _toggle_gpio_edge_triggering(bank, gpio_index); 689 _toggle_gpio_edge_triggering(bank, gpio_index);
731#endif
732 690
733 generic_handle_irq(gpio_irq); 691 generic_handle_irq(gpio_irq);
734 } 692 }
@@ -740,6 +698,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
740exit: 698exit:
741 if (!unmasked) 699 if (!unmasked)
742 chained_irq_exit(chip, desc); 700 chained_irq_exit(chip, desc);
701 pm_runtime_put(bank->dev);
743} 702}
744 703
745static void gpio_irq_shutdown(struct irq_data *d) 704static void gpio_irq_shutdown(struct irq_data *d)
@@ -808,14 +767,6 @@ static struct irq_chip gpio_irq_chip = {
808 767
809/*---------------------------------------------------------------------*/ 768/*---------------------------------------------------------------------*/
810 769
811#ifdef CONFIG_ARCH_OMAP1
812
813#define bank_is_mpuio(bank) ((bank)->method == METHOD_MPUIO)
814
815#ifdef CONFIG_ARCH_OMAP16XX
816
817#include <linux/platform_device.h>
818
819static int omap_mpuio_suspend_noirq(struct device *dev) 770static int omap_mpuio_suspend_noirq(struct device *dev)
820{ 771{
821 struct platform_device *pdev = to_platform_device(dev); 772 struct platform_device *pdev = to_platform_device(dev);
@@ -869,32 +820,16 @@ static struct platform_device omap_mpuio_device = {
869 /* could list the /proc/iomem resources */ 820 /* could list the /proc/iomem resources */
870}; 821};
871 822
872static inline void mpuio_init(void) 823static inline void mpuio_init(struct gpio_bank *bank)
873{ 824{
874 struct gpio_bank *bank = &gpio_bank[0];
875 platform_set_drvdata(&omap_mpuio_device, bank); 825 platform_set_drvdata(&omap_mpuio_device, bank);
876 826
877 if (platform_driver_register(&omap_mpuio_driver) == 0) 827 if (platform_driver_register(&omap_mpuio_driver) == 0)
878 (void) platform_device_register(&omap_mpuio_device); 828 (void) platform_device_register(&omap_mpuio_device);
879} 829}
880 830
881#else
882static inline void mpuio_init(void) {}
883#endif /* 16xx */
884
885#else
886
887#define bank_is_mpuio(bank) 0
888static inline void mpuio_init(void) {}
889
890#endif
891
892/*---------------------------------------------------------------------*/ 831/*---------------------------------------------------------------------*/
893 832
894/* REVISIT these are stupid implementations! replace by ones that
895 * don't switch on METHOD_* and which mostly avoid spinlocks
896 */
897
898static int gpio_input(struct gpio_chip *chip, unsigned offset) 833static int gpio_input(struct gpio_chip *chip, unsigned offset)
899{ 834{
900 struct gpio_bank *bank; 835 struct gpio_bank *bank;
@@ -1007,71 +942,32 @@ static void __init omap_gpio_show_rev(struct gpio_bank *bank)
1007 */ 942 */
1008static struct lock_class_key gpio_lock_class; 943static struct lock_class_key gpio_lock_class;
1009 944
1010static inline int init_gpio_info(struct platform_device *pdev) 945static void omap_gpio_mod_init(struct gpio_bank *bank)
1011{ 946{
1012 /* TODO: Analyze removing gpio_bank_count usage from driver code */ 947 void __iomem *base = bank->base;
1013 gpio_bank = kzalloc(gpio_bank_count * sizeof(struct gpio_bank), 948 u32 l = 0xffffffff;
1014 GFP_KERNEL);
1015 if (!gpio_bank) {
1016 dev_err(&pdev->dev, "Memory alloc failed for gpio_bank\n");
1017 return -ENOMEM;
1018 }
1019 return 0;
1020}
1021 949
1022/* TODO: Cleanup cpu_is_* checks */ 950 if (bank->width == 16)
1023static void omap_gpio_mod_init(struct gpio_bank *bank, int id) 951 l = 0xffff;
1024{ 952
1025 if (cpu_class_is_omap2()) { 953 if (bank->is_mpuio) {
1026 if (cpu_is_omap44xx()) { 954 __raw_writel(l, bank->base + bank->regs->irqenable);
1027 __raw_writel(0xffffffff, bank->base + 955 return;
1028 OMAP4_GPIO_IRQSTATUSCLR0);
1029 __raw_writel(0x00000000, bank->base +
1030 OMAP4_GPIO_DEBOUNCENABLE);
1031 /* Initialize interface clk ungated, module enabled */
1032 __raw_writel(0, bank->base + OMAP4_GPIO_CTRL);
1033 } else if (cpu_is_omap34xx()) {
1034 __raw_writel(0x00000000, bank->base +
1035 OMAP24XX_GPIO_IRQENABLE1);
1036 __raw_writel(0xffffffff, bank->base +
1037 OMAP24XX_GPIO_IRQSTATUS1);
1038 __raw_writel(0x00000000, bank->base +
1039 OMAP24XX_GPIO_DEBOUNCE_EN);
1040
1041 /* Initialize interface clk ungated, module enabled */
1042 __raw_writel(0, bank->base + OMAP24XX_GPIO_CTRL);
1043 } else if (cpu_is_omap24xx()) {
1044 static const u32 non_wakeup_gpios[] = {
1045 0xe203ffc0, 0x08700040
1046 };
1047 if (id < ARRAY_SIZE(non_wakeup_gpios))
1048 bank->non_wakeup_gpios = non_wakeup_gpios[id];
1049 }
1050 } else if (cpu_class_is_omap1()) {
1051 if (bank_is_mpuio(bank))
1052 __raw_writew(0xffff, bank->base +
1053 OMAP_MPUIO_GPIO_MASKIT / bank->stride);
1054 if (cpu_is_omap15xx() && bank->method == METHOD_GPIO_1510) {
1055 __raw_writew(0xffff, bank->base
1056 + OMAP1510_GPIO_INT_MASK);
1057 __raw_writew(0x0000, bank->base
1058 + OMAP1510_GPIO_INT_STATUS);
1059 }
1060 if (cpu_is_omap16xx() && bank->method == METHOD_GPIO_1610) {
1061 __raw_writew(0x0000, bank->base
1062 + OMAP1610_GPIO_IRQENABLE1);
1063 __raw_writew(0xffff, bank->base
1064 + OMAP1610_GPIO_IRQSTATUS1);
1065 __raw_writew(0x0014, bank->base
1066 + OMAP1610_GPIO_SYSCONFIG);
1067 }
1068 if (cpu_is_omap7xx() && bank->method == METHOD_GPIO_7XX) {
1069 __raw_writel(0xffffffff, bank->base
1070 + OMAP7XX_GPIO_INT_MASK);
1071 __raw_writel(0x00000000, bank->base
1072 + OMAP7XX_GPIO_INT_STATUS);
1073 }
1074 } 956 }
957
958 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
959 _gpio_rmw(base, bank->regs->irqstatus, l,
960 bank->regs->irqenable_inv == false);
961 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->debounce_en != 0);
962 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->ctrl != 0);
963 if (bank->regs->debounce_en)
964 _gpio_rmw(base, bank->regs->debounce_en, 0, 1);
965
966 /* Save OE default value (0xffffffff) in the context */
967 bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
968 /* Initialize interface clk ungated, module enabled */
969 if (bank->regs->ctrl)
970 _gpio_rmw(base, bank->regs->ctrl, 0, 1);
1075} 971}
1076 972
1077static __init void 973static __init void
@@ -1094,8 +990,8 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
1094 ct->chip.irq_mask = irq_gc_mask_set_bit; 990 ct->chip.irq_mask = irq_gc_mask_set_bit;
1095 ct->chip.irq_unmask = irq_gc_mask_clr_bit; 991 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
1096 ct->chip.irq_set_type = gpio_irq_type; 992 ct->chip.irq_set_type = gpio_irq_type;
1097 /* REVISIT: assuming only 16xx supports MPUIO wake events */ 993
1098 if (cpu_is_omap16xx()) 994 if (bank->regs->wkup_en)
1099 ct->chip.irq_set_wake = gpio_wake_enable, 995 ct->chip.irq_set_wake = gpio_wake_enable,
1100 996
1101 ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride; 997 ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
@@ -1108,7 +1004,6 @@ static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
1108 int j; 1004 int j;
1109 static int gpio; 1005 static int gpio;
1110 1006
1111 bank->mod_usage = 0;
1112 /* 1007 /*
1113 * REVISIT eventually switch from OMAP-specific gpio structs 1008 * REVISIT eventually switch from OMAP-specific gpio structs
1114 * over to the generic ones 1009 * over to the generic ones
@@ -1121,11 +1016,10 @@ static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
1121 bank->chip.set_debounce = gpio_debounce; 1016 bank->chip.set_debounce = gpio_debounce;
1122 bank->chip.set = gpio_set; 1017 bank->chip.set = gpio_set;
1123 bank->chip.to_irq = gpio_2irq; 1018 bank->chip.to_irq = gpio_2irq;
1124 if (bank_is_mpuio(bank)) { 1019 if (bank->is_mpuio) {
1125 bank->chip.label = "mpuio"; 1020 bank->chip.label = "mpuio";
1126#ifdef CONFIG_ARCH_OMAP16XX 1021 if (bank->regs->wkup_en)
1127 bank->chip.dev = &omap_mpuio_device.dev; 1022 bank->chip.dev = &omap_mpuio_device.dev;
1128#endif
1129 bank->chip.base = OMAP_MPUIO(0); 1023 bank->chip.base = OMAP_MPUIO(0);
1130 } else { 1024 } else {
1131 bank->chip.label = "gpio"; 1025 bank->chip.label = "gpio";
@@ -1140,7 +1034,7 @@ static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
1140 j < bank->virtual_irq_start + bank->width; j++) { 1034 j < bank->virtual_irq_start + bank->width; j++) {
1141 irq_set_lockdep_class(j, &gpio_lock_class); 1035 irq_set_lockdep_class(j, &gpio_lock_class);
1142 irq_set_chip_data(j, bank); 1036 irq_set_chip_data(j, bank);
1143 if (bank_is_mpuio(bank)) { 1037 if (bank->is_mpuio) {
1144 omap_mpuio_alloc_gc(bank, j, bank->width); 1038 omap_mpuio_alloc_gc(bank, j, bank->width);
1145 } else { 1039 } else {
1146 irq_set_chip(j, &gpio_irq_chip); 1040 irq_set_chip(j, &gpio_irq_chip);
@@ -1154,42 +1048,44 @@ static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
1154 1048
1155static int __devinit omap_gpio_probe(struct platform_device *pdev) 1049static int __devinit omap_gpio_probe(struct platform_device *pdev)
1156{ 1050{
1157 static int gpio_init_done;
1158 struct omap_gpio_platform_data *pdata; 1051 struct omap_gpio_platform_data *pdata;
1159 struct resource *res; 1052 struct resource *res;
1160 int id;
1161 struct gpio_bank *bank; 1053 struct gpio_bank *bank;
1054 int ret = 0;
1162 1055
1163 if (!pdev->dev.platform_data) 1056 if (!pdev->dev.platform_data) {
1164 return -EINVAL; 1057 ret = -EINVAL;
1165 1058 goto err_exit;
1166 pdata = pdev->dev.platform_data;
1167
1168 if (!gpio_init_done) {
1169 int ret;
1170
1171 ret = init_gpio_info(pdev);
1172 if (ret)
1173 return ret;
1174 } 1059 }
1175 1060
1176 id = pdev->id; 1061 bank = kzalloc(sizeof(struct gpio_bank), GFP_KERNEL);
1177 bank = &gpio_bank[id]; 1062 if (!bank) {
1063 dev_err(&pdev->dev, "Memory alloc failed for gpio_bank\n");
1064 ret = -ENOMEM;
1065 goto err_exit;
1066 }
1178 1067
1179 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1068 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1180 if (unlikely(!res)) { 1069 if (unlikely(!res)) {
1181 dev_err(&pdev->dev, "GPIO Bank %i Invalid IRQ resource\n", id); 1070 dev_err(&pdev->dev, "GPIO Bank %i Invalid IRQ resource\n",
1182 return -ENODEV; 1071 pdev->id);
1072 ret = -ENODEV;
1073 goto err_free;
1183 } 1074 }
1184 1075
1185 bank->irq = res->start; 1076 bank->irq = res->start;
1077 bank->id = pdev->id;
1078
1079 pdata = pdev->dev.platform_data;
1186 bank->virtual_irq_start = pdata->virtual_irq_start; 1080 bank->virtual_irq_start = pdata->virtual_irq_start;
1187 bank->method = pdata->bank_type;
1188 bank->dev = &pdev->dev; 1081 bank->dev = &pdev->dev;
1189 bank->dbck_flag = pdata->dbck_flag; 1082 bank->dbck_flag = pdata->dbck_flag;
1190 bank->stride = pdata->bank_stride; 1083 bank->stride = pdata->bank_stride;
1191 bank->width = pdata->bank_width; 1084 bank->width = pdata->bank_width;
1192 1085 bank->is_mpuio = pdata->is_mpuio;
1086 bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1087 bank->loses_context = pdata->loses_context;
1088 bank->get_context_loss_count = pdata->get_context_loss_count;
1193 bank->regs = pdata->regs; 1089 bank->regs = pdata->regs;
1194 1090
1195 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1091 if (bank->regs->set_dataout && bank->regs->clr_dataout)
@@ -1202,369 +1098,310 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
1202 /* Static mapping, never released */ 1098 /* Static mapping, never released */
1203 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1099 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1204 if (unlikely(!res)) { 1100 if (unlikely(!res)) {
1205 dev_err(&pdev->dev, "GPIO Bank %i Invalid mem resource\n", id); 1101 dev_err(&pdev->dev, "GPIO Bank %i Invalid mem resource\n",
1206 return -ENODEV; 1102 pdev->id);
1103 ret = -ENODEV;
1104 goto err_free;
1207 } 1105 }
1208 1106
1209 bank->base = ioremap(res->start, resource_size(res)); 1107 bank->base = ioremap(res->start, resource_size(res));
1210 if (!bank->base) { 1108 if (!bank->base) {
1211 dev_err(&pdev->dev, "Could not ioremap gpio bank%i\n", id); 1109 dev_err(&pdev->dev, "Could not ioremap gpio bank%i\n",
1212 return -ENOMEM; 1110 pdev->id);
1111 ret = -ENOMEM;
1112 goto err_free;
1213 } 1113 }
1214 1114
1115 platform_set_drvdata(pdev, bank);
1116
1215 pm_runtime_enable(bank->dev); 1117 pm_runtime_enable(bank->dev);
1118 pm_runtime_irq_safe(bank->dev);
1216 pm_runtime_get_sync(bank->dev); 1119 pm_runtime_get_sync(bank->dev);
1217 1120
1218 omap_gpio_mod_init(bank, id); 1121 if (bank->is_mpuio)
1122 mpuio_init(bank);
1123
1124 omap_gpio_mod_init(bank);
1219 omap_gpio_chip_init(bank); 1125 omap_gpio_chip_init(bank);
1220 omap_gpio_show_rev(bank); 1126 omap_gpio_show_rev(bank);
1221 1127
1222 if (!gpio_init_done) 1128 pm_runtime_put(bank->dev);
1223 gpio_init_done = 1;
1224 1129
1225 return 0; 1130 list_add_tail(&bank->node, &omap_gpio_list);
1131
1132 return ret;
1133
1134err_free:
1135 kfree(bank);
1136err_exit:
1137 return ret;
1226} 1138}
1227 1139
1228#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) 1140#ifdef CONFIG_ARCH_OMAP2PLUS
1229static int omap_gpio_suspend(void) 1141
1142#if defined(CONFIG_PM_SLEEP)
1143static int omap_gpio_suspend(struct device *dev)
1230{ 1144{
1231 int i; 1145 struct platform_device *pdev = to_platform_device(dev);
1146 struct gpio_bank *bank = platform_get_drvdata(pdev);
1147 void __iomem *base = bank->base;
1148 void __iomem *wakeup_enable;
1149 unsigned long flags;
1232 1150
1233 if (!cpu_class_is_omap2() && !cpu_is_omap16xx()) 1151 if (!bank->mod_usage || !bank->loses_context)
1234 return 0; 1152 return 0;
1235 1153
1236 for (i = 0; i < gpio_bank_count; i++) { 1154 if (!bank->regs->wkup_en || !bank->suspend_wakeup)
1237 struct gpio_bank *bank = &gpio_bank[i]; 1155 return 0;
1238 void __iomem *wake_status;
1239 void __iomem *wake_clear;
1240 void __iomem *wake_set;
1241 unsigned long flags;
1242
1243 switch (bank->method) {
1244#ifdef CONFIG_ARCH_OMAP16XX
1245 case METHOD_GPIO_1610:
1246 wake_status = bank->base + OMAP1610_GPIO_WAKEUPENABLE;
1247 wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
1248 wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
1249 break;
1250#endif
1251#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
1252 case METHOD_GPIO_24XX:
1253 wake_status = bank->base + OMAP24XX_GPIO_WAKE_EN;
1254 wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
1255 wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
1256 break;
1257#endif
1258#ifdef CONFIG_ARCH_OMAP4
1259 case METHOD_GPIO_44XX:
1260 wake_status = bank->base + OMAP4_GPIO_IRQWAKEN0;
1261 wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
1262 wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
1263 break;
1264#endif
1265 default:
1266 continue;
1267 }
1268 1156
1269 spin_lock_irqsave(&bank->lock, flags); 1157 wakeup_enable = bank->base + bank->regs->wkup_en;
1270 bank->saved_wakeup = __raw_readl(wake_status); 1158
1271 __raw_writel(0xffffffff, wake_clear); 1159 spin_lock_irqsave(&bank->lock, flags);
1272 __raw_writel(bank->suspend_wakeup, wake_set); 1160 bank->saved_wakeup = __raw_readl(wakeup_enable);
1273 spin_unlock_irqrestore(&bank->lock, flags); 1161 _gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
1274 } 1162 _gpio_rmw(base, bank->regs->wkup_en, bank->suspend_wakeup, 1);
1163 spin_unlock_irqrestore(&bank->lock, flags);
1275 1164
1276 return 0; 1165 return 0;
1277} 1166}
1278 1167
1279static void omap_gpio_resume(void) 1168static int omap_gpio_resume(struct device *dev)
1280{ 1169{
1281 int i; 1170 struct platform_device *pdev = to_platform_device(dev);
1171 struct gpio_bank *bank = platform_get_drvdata(pdev);
1172 void __iomem *base = bank->base;
1173 unsigned long flags;
1282 1174
1283 if (!cpu_class_is_omap2() && !cpu_is_omap16xx()) 1175 if (!bank->mod_usage || !bank->loses_context)
1284 return; 1176 return 0;
1285 1177
1286 for (i = 0; i < gpio_bank_count; i++) { 1178 if (!bank->regs->wkup_en || !bank->saved_wakeup)
1287 struct gpio_bank *bank = &gpio_bank[i]; 1179 return 0;
1288 void __iomem *wake_clear;
1289 void __iomem *wake_set;
1290 unsigned long flags;
1291
1292 switch (bank->method) {
1293#ifdef CONFIG_ARCH_OMAP16XX
1294 case METHOD_GPIO_1610:
1295 wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
1296 wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
1297 break;
1298#endif
1299#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
1300 case METHOD_GPIO_24XX:
1301 wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
1302 wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
1303 break;
1304#endif
1305#ifdef CONFIG_ARCH_OMAP4
1306 case METHOD_GPIO_44XX:
1307 wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
1308 wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
1309 break;
1310#endif
1311 default:
1312 continue;
1313 }
1314 1180
1315 spin_lock_irqsave(&bank->lock, flags); 1181 spin_lock_irqsave(&bank->lock, flags);
1316 __raw_writel(0xffffffff, wake_clear); 1182 _gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
1317 __raw_writel(bank->saved_wakeup, wake_set); 1183 _gpio_rmw(base, bank->regs->wkup_en, bank->saved_wakeup, 1);
1318 spin_unlock_irqrestore(&bank->lock, flags); 1184 spin_unlock_irqrestore(&bank->lock, flags);
1319 }
1320}
1321 1185
1322static struct syscore_ops omap_gpio_syscore_ops = { 1186 return 0;
1323 .suspend = omap_gpio_suspend, 1187}
1324 .resume = omap_gpio_resume, 1188#endif /* CONFIG_PM_SLEEP */
1325};
1326 1189
1327#endif 1190#if defined(CONFIG_PM_RUNTIME)
1191static void omap_gpio_restore_context(struct gpio_bank *bank);
1328 1192
1329#ifdef CONFIG_ARCH_OMAP2PLUS 1193static int omap_gpio_runtime_suspend(struct device *dev)
1194{
1195 struct platform_device *pdev = to_platform_device(dev);
1196 struct gpio_bank *bank = platform_get_drvdata(pdev);
1197 u32 l1 = 0, l2 = 0;
1198 unsigned long flags;
1330 1199
1331static int workaround_enabled; 1200 spin_lock_irqsave(&bank->lock, flags);
1201 if (bank->power_mode != OFF_MODE) {
1202 bank->power_mode = 0;
1203 goto update_gpio_context_count;
1204 }
1205 /*
1206 * If going to OFF, remove triggering for all
1207 * non-wakeup GPIOs. Otherwise spurious IRQs will be
1208 * generated. See OMAP2420 Errata item 1.101.
1209 */
1210 if (!(bank->enabled_non_wakeup_gpios))
1211 goto update_gpio_context_count;
1332 1212
1333void omap2_gpio_prepare_for_idle(int off_mode) 1213 bank->saved_datain = __raw_readl(bank->base +
1334{ 1214 bank->regs->datain);
1335 int i, c = 0; 1215 l1 = __raw_readl(bank->base + bank->regs->fallingdetect);
1336 int min = 0; 1216 l2 = __raw_readl(bank->base + bank->regs->risingdetect);
1337 1217
1338 if (cpu_is_omap34xx()) 1218 bank->saved_fallingdetect = l1;
1339 min = 1; 1219 bank->saved_risingdetect = l2;
1220 l1 &= ~bank->enabled_non_wakeup_gpios;
1221 l2 &= ~bank->enabled_non_wakeup_gpios;
1340 1222
1341 for (i = min; i < gpio_bank_count; i++) { 1223 __raw_writel(l1, bank->base + bank->regs->fallingdetect);
1342 struct gpio_bank *bank = &gpio_bank[i]; 1224 __raw_writel(l2, bank->base + bank->regs->risingdetect);
1343 u32 l1 = 0, l2 = 0;
1344 int j;
1345 1225
1346 for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++) 1226 bank->workaround_enabled = true;
1347 clk_disable(bank->dbck);
1348 1227
1349 if (!off_mode) 1228update_gpio_context_count:
1350 continue; 1229 if (bank->get_context_loss_count)
1230 bank->context_loss_count =
1231 bank->get_context_loss_count(bank->dev);
1351 1232
1352 /* If going to OFF, remove triggering for all 1233 _gpio_dbck_disable(bank);
1353 * non-wakeup GPIOs. Otherwise spurious IRQs will be 1234 spin_unlock_irqrestore(&bank->lock, flags);
1354 * generated. See OMAP2420 Errata item 1.101. */
1355 if (!(bank->enabled_non_wakeup_gpios))
1356 continue;
1357 1235
1358 if (cpu_is_omap24xx() || cpu_is_omap34xx()) { 1236 return 0;
1359 bank->saved_datain = __raw_readl(bank->base + 1237}
1360 OMAP24XX_GPIO_DATAIN);
1361 l1 = __raw_readl(bank->base +
1362 OMAP24XX_GPIO_FALLINGDETECT);
1363 l2 = __raw_readl(bank->base +
1364 OMAP24XX_GPIO_RISINGDETECT);
1365 }
1366 1238
1367 if (cpu_is_omap44xx()) { 1239static int omap_gpio_runtime_resume(struct device *dev)
1368 bank->saved_datain = __raw_readl(bank->base + 1240{
1369 OMAP4_GPIO_DATAIN); 1241 struct platform_device *pdev = to_platform_device(dev);
1370 l1 = __raw_readl(bank->base + 1242 struct gpio_bank *bank = platform_get_drvdata(pdev);
1371 OMAP4_GPIO_FALLINGDETECT); 1243 int context_lost_cnt_after;
1372 l2 = __raw_readl(bank->base + 1244 u32 l = 0, gen, gen0, gen1;
1373 OMAP4_GPIO_RISINGDETECT); 1245 unsigned long flags;
1374 }
1375 1246
1376 bank->saved_fallingdetect = l1; 1247 spin_lock_irqsave(&bank->lock, flags);
1377 bank->saved_risingdetect = l2; 1248 _gpio_dbck_enable(bank);
1378 l1 &= ~bank->enabled_non_wakeup_gpios; 1249 if (!bank->enabled_non_wakeup_gpios || !bank->workaround_enabled) {
1379 l2 &= ~bank->enabled_non_wakeup_gpios; 1250 spin_unlock_irqrestore(&bank->lock, flags);
1251 return 0;
1252 }
1380 1253
1381 if (cpu_is_omap24xx() || cpu_is_omap34xx()) { 1254 if (bank->get_context_loss_count) {
1382 __raw_writel(l1, bank->base + 1255 context_lost_cnt_after =
1383 OMAP24XX_GPIO_FALLINGDETECT); 1256 bank->get_context_loss_count(bank->dev);
1384 __raw_writel(l2, bank->base + 1257 if (context_lost_cnt_after != bank->context_loss_count ||
1385 OMAP24XX_GPIO_RISINGDETECT); 1258 !context_lost_cnt_after) {
1259 omap_gpio_restore_context(bank);
1260 } else {
1261 spin_unlock_irqrestore(&bank->lock, flags);
1262 return 0;
1386 } 1263 }
1264 }
1387 1265
1388 if (cpu_is_omap44xx()) { 1266 __raw_writel(bank->saved_fallingdetect,
1389 __raw_writel(l1, bank->base + OMAP4_GPIO_FALLINGDETECT); 1267 bank->base + bank->regs->fallingdetect);
1390 __raw_writel(l2, bank->base + OMAP4_GPIO_RISINGDETECT); 1268 __raw_writel(bank->saved_risingdetect,
1391 } 1269 bank->base + bank->regs->risingdetect);
1270 l = __raw_readl(bank->base + bank->regs->datain);
1392 1271
1393 c++; 1272 /*
1394 } 1273 * Check if any of the non-wakeup interrupt GPIOs have changed
1395 if (!c) { 1274 * state. If so, generate an IRQ by software. This is
1396 workaround_enabled = 0; 1275 * horribly racy, but it's the best we can do to work around
1397 return; 1276 * this silicon bug.
1398 } 1277 */
1399 workaround_enabled = 1; 1278 l ^= bank->saved_datain;
1400} 1279 l &= bank->enabled_non_wakeup_gpios;
1401 1280
1402void omap2_gpio_resume_after_idle(void) 1281 /*
1403{ 1282 * No need to generate IRQs for the rising edge for gpio IRQs
1404 int i; 1283 * configured with falling edge only; and vice versa.
1405 int min = 0; 1284 */
1285 gen0 = l & bank->saved_fallingdetect;
1286 gen0 &= bank->saved_datain;
1406 1287
1407 if (cpu_is_omap34xx()) 1288 gen1 = l & bank->saved_risingdetect;
1408 min = 1; 1289 gen1 &= ~(bank->saved_datain);
1409 for (i = min; i < gpio_bank_count; i++) {
1410 struct gpio_bank *bank = &gpio_bank[i];
1411 u32 l = 0, gen, gen0, gen1;
1412 int j;
1413 1290
1414 for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++) 1291 /* FIXME: Consider GPIO IRQs with level detections properly! */
1415 clk_enable(bank->dbck); 1292 gen = l & (~(bank->saved_fallingdetect) & ~(bank->saved_risingdetect));
1293 /* Consider all GPIO IRQs needed to be updated */
1294 gen |= gen0 | gen1;
1416 1295
1417 if (!workaround_enabled) 1296 if (gen) {
1418 continue; 1297 u32 old0, old1;
1419 1298
1420 if (!(bank->enabled_non_wakeup_gpios)) 1299 old0 = __raw_readl(bank->base + bank->regs->leveldetect0);
1421 continue; 1300 old1 = __raw_readl(bank->base + bank->regs->leveldetect1);
1422 1301
1423 if (cpu_is_omap24xx() || cpu_is_omap34xx()) { 1302 if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
1424 __raw_writel(bank->saved_fallingdetect, 1303 __raw_writel(old0 | gen, bank->base +
1425 bank->base + OMAP24XX_GPIO_FALLINGDETECT); 1304 bank->regs->leveldetect0);
1426 __raw_writel(bank->saved_risingdetect, 1305 __raw_writel(old1 | gen, bank->base +
1427 bank->base + OMAP24XX_GPIO_RISINGDETECT); 1306 bank->regs->leveldetect1);
1428 l = __raw_readl(bank->base + OMAP24XX_GPIO_DATAIN);
1429 } 1307 }
1430 1308
1431 if (cpu_is_omap44xx()) { 1309 if (cpu_is_omap44xx()) {
1432 __raw_writel(bank->saved_fallingdetect, 1310 __raw_writel(old0 | l, bank->base +
1433 bank->base + OMAP4_GPIO_FALLINGDETECT); 1311 bank->regs->leveldetect0);
1434 __raw_writel(bank->saved_risingdetect, 1312 __raw_writel(old1 | l, bank->base +
1435 bank->base + OMAP4_GPIO_RISINGDETECT); 1313 bank->regs->leveldetect1);
1436 l = __raw_readl(bank->base + OMAP4_GPIO_DATAIN);
1437 }
1438
1439 /* Check if any of the non-wakeup interrupt GPIOs have changed
1440 * state. If so, generate an IRQ by software. This is
1441 * horribly racy, but it's the best we can do to work around
1442 * this silicon bug. */
1443 l ^= bank->saved_datain;
1444 l &= bank->enabled_non_wakeup_gpios;
1445
1446 /*
1447 * No need to generate IRQs for the rising edge for gpio IRQs
1448 * configured with falling edge only; and vice versa.
1449 */
1450 gen0 = l & bank->saved_fallingdetect;
1451 gen0 &= bank->saved_datain;
1452
1453 gen1 = l & bank->saved_risingdetect;
1454 gen1 &= ~(bank->saved_datain);
1455
1456 /* FIXME: Consider GPIO IRQs with level detections properly! */
1457 gen = l & (~(bank->saved_fallingdetect) &
1458 ~(bank->saved_risingdetect));
1459 /* Consider all GPIO IRQs needed to be updated */
1460 gen |= gen0 | gen1;
1461
1462 if (gen) {
1463 u32 old0, old1;
1464
1465 if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
1466 old0 = __raw_readl(bank->base +
1467 OMAP24XX_GPIO_LEVELDETECT0);
1468 old1 = __raw_readl(bank->base +
1469 OMAP24XX_GPIO_LEVELDETECT1);
1470 __raw_writel(old0 | gen, bank->base +
1471 OMAP24XX_GPIO_LEVELDETECT0);
1472 __raw_writel(old1 | gen, bank->base +
1473 OMAP24XX_GPIO_LEVELDETECT1);
1474 __raw_writel(old0, bank->base +
1475 OMAP24XX_GPIO_LEVELDETECT0);
1476 __raw_writel(old1, bank->base +
1477 OMAP24XX_GPIO_LEVELDETECT1);
1478 }
1479
1480 if (cpu_is_omap44xx()) {
1481 old0 = __raw_readl(bank->base +
1482 OMAP4_GPIO_LEVELDETECT0);
1483 old1 = __raw_readl(bank->base +
1484 OMAP4_GPIO_LEVELDETECT1);
1485 __raw_writel(old0 | l, bank->base +
1486 OMAP4_GPIO_LEVELDETECT0);
1487 __raw_writel(old1 | l, bank->base +
1488 OMAP4_GPIO_LEVELDETECT1);
1489 __raw_writel(old0, bank->base +
1490 OMAP4_GPIO_LEVELDETECT0);
1491 __raw_writel(old1, bank->base +
1492 OMAP4_GPIO_LEVELDETECT1);
1493 }
1494 } 1314 }
1315 __raw_writel(old0, bank->base + bank->regs->leveldetect0);
1316 __raw_writel(old1, bank->base + bank->regs->leveldetect1);
1495 } 1317 }
1496 1318
1319 bank->workaround_enabled = false;
1320 spin_unlock_irqrestore(&bank->lock, flags);
1321
1322 return 0;
1497} 1323}
1324#endif /* CONFIG_PM_RUNTIME */
1498 1325
1499#endif 1326void omap2_gpio_prepare_for_idle(int pwr_mode)
1327{
1328 struct gpio_bank *bank;
1329
1330 list_for_each_entry(bank, &omap_gpio_list, node) {
1331 if (!bank->mod_usage || !bank->loses_context)
1332 continue;
1333
1334 bank->power_mode = pwr_mode;
1335
1336 pm_runtime_put_sync_suspend(bank->dev);
1337 }
1338}
1500 1339
1501#ifdef CONFIG_ARCH_OMAP3 1340void omap2_gpio_resume_after_idle(void)
1502/* save the registers of bank 2-6 */
1503void omap_gpio_save_context(void)
1504{ 1341{
1505 int i; 1342 struct gpio_bank *bank;
1506 1343
1507 /* saving banks from 2-6 only since GPIO1 is in WKUP */ 1344 list_for_each_entry(bank, &omap_gpio_list, node) {
1508 for (i = 1; i < gpio_bank_count; i++) { 1345 if (!bank->mod_usage || !bank->loses_context)
1509 struct gpio_bank *bank = &gpio_bank[i]; 1346 continue;
1510 gpio_context[i].irqenable1 = 1347
1511 __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE1); 1348 pm_runtime_get_sync(bank->dev);
1512 gpio_context[i].irqenable2 =
1513 __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE2);
1514 gpio_context[i].wake_en =
1515 __raw_readl(bank->base + OMAP24XX_GPIO_WAKE_EN);
1516 gpio_context[i].ctrl =
1517 __raw_readl(bank->base + OMAP24XX_GPIO_CTRL);
1518 gpio_context[i].oe =
1519 __raw_readl(bank->base + OMAP24XX_GPIO_OE);
1520 gpio_context[i].leveldetect0 =
1521 __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0);
1522 gpio_context[i].leveldetect1 =
1523 __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
1524 gpio_context[i].risingdetect =
1525 __raw_readl(bank->base + OMAP24XX_GPIO_RISINGDETECT);
1526 gpio_context[i].fallingdetect =
1527 __raw_readl(bank->base + OMAP24XX_GPIO_FALLINGDETECT);
1528 gpio_context[i].dataout =
1529 __raw_readl(bank->base + OMAP24XX_GPIO_DATAOUT);
1530 } 1349 }
1531} 1350}
1532 1351
1533/* restore the required registers of bank 2-6 */ 1352#if defined(CONFIG_PM_RUNTIME)
1534void omap_gpio_restore_context(void) 1353static void omap_gpio_restore_context(struct gpio_bank *bank)
1535{ 1354{
1536 int i; 1355 __raw_writel(bank->context.wake_en,
1537 1356 bank->base + bank->regs->wkup_en);
1538 for (i = 1; i < gpio_bank_count; i++) { 1357 __raw_writel(bank->context.ctrl, bank->base + bank->regs->ctrl);
1539 struct gpio_bank *bank = &gpio_bank[i]; 1358 __raw_writel(bank->context.leveldetect0,
1540 __raw_writel(gpio_context[i].irqenable1, 1359 bank->base + bank->regs->leveldetect0);
1541 bank->base + OMAP24XX_GPIO_IRQENABLE1); 1360 __raw_writel(bank->context.leveldetect1,
1542 __raw_writel(gpio_context[i].irqenable2, 1361 bank->base + bank->regs->leveldetect1);
1543 bank->base + OMAP24XX_GPIO_IRQENABLE2); 1362 __raw_writel(bank->context.risingdetect,
1544 __raw_writel(gpio_context[i].wake_en, 1363 bank->base + bank->regs->risingdetect);
1545 bank->base + OMAP24XX_GPIO_WAKE_EN); 1364 __raw_writel(bank->context.fallingdetect,
1546 __raw_writel(gpio_context[i].ctrl, 1365 bank->base + bank->regs->fallingdetect);
1547 bank->base + OMAP24XX_GPIO_CTRL); 1366 if (bank->regs->set_dataout && bank->regs->clr_dataout)
1548 __raw_writel(gpio_context[i].oe, 1367 __raw_writel(bank->context.dataout,
1549 bank->base + OMAP24XX_GPIO_OE); 1368 bank->base + bank->regs->set_dataout);
1550 __raw_writel(gpio_context[i].leveldetect0, 1369 else
1551 bank->base + OMAP24XX_GPIO_LEVELDETECT0); 1370 __raw_writel(bank->context.dataout,
1552 __raw_writel(gpio_context[i].leveldetect1, 1371 bank->base + bank->regs->dataout);
1553 bank->base + OMAP24XX_GPIO_LEVELDETECT1); 1372 __raw_writel(bank->context.oe, bank->base + bank->regs->direction);
1554 __raw_writel(gpio_context[i].risingdetect, 1373
1555 bank->base + OMAP24XX_GPIO_RISINGDETECT); 1374 if (bank->dbck_enable_mask) {
1556 __raw_writel(gpio_context[i].fallingdetect, 1375 __raw_writel(bank->context.debounce, bank->base +
1557 bank->base + OMAP24XX_GPIO_FALLINGDETECT); 1376 bank->regs->debounce);
1558 __raw_writel(gpio_context[i].dataout, 1377 __raw_writel(bank->context.debounce_en,
1559 bank->base + OMAP24XX_GPIO_DATAOUT); 1378 bank->base + bank->regs->debounce_en);
1560 } 1379 }
1380
1381 __raw_writel(bank->context.irqenable1,
1382 bank->base + bank->regs->irqenable);
1383 __raw_writel(bank->context.irqenable2,
1384 bank->base + bank->regs->irqenable2);
1561} 1385}
1386#endif /* CONFIG_PM_RUNTIME */
1387#else
1388#define omap_gpio_suspend NULL
1389#define omap_gpio_resume NULL
1390#define omap_gpio_runtime_suspend NULL
1391#define omap_gpio_runtime_resume NULL
1562#endif 1392#endif
1563 1393
1394static const struct dev_pm_ops gpio_pm_ops = {
1395 SET_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume)
1396 SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
1397 NULL)
1398};
1399
1564static struct platform_driver omap_gpio_driver = { 1400static struct platform_driver omap_gpio_driver = {
1565 .probe = omap_gpio_probe, 1401 .probe = omap_gpio_probe,
1566 .driver = { 1402 .driver = {
1567 .name = "omap_gpio", 1403 .name = "omap_gpio",
1404 .pm = &gpio_pm_ops,
1568 }, 1405 },
1569}; 1406};
1570 1407
@@ -1578,17 +1415,3 @@ static int __init omap_gpio_drv_reg(void)
1578 return platform_driver_register(&omap_gpio_driver); 1415 return platform_driver_register(&omap_gpio_driver);
1579} 1416}
1580postcore_initcall(omap_gpio_drv_reg); 1417postcore_initcall(omap_gpio_drv_reg);
1581
1582static int __init omap_gpio_sysinit(void)
1583{
1584 mpuio_init();
1585
1586#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
1587 if (cpu_is_omap16xx() || cpu_class_is_omap2())
1588 register_syscore_ops(&omap_gpio_syscore_ops);
1589#endif
1590
1591 return 0;
1592}
1593
1594arch_initcall(omap_gpio_sysinit);