aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/plat-omap
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/plat-omap')
-rw-r--r--arch/arm/plat-omap/Kconfig2
-rw-r--r--arch/arm/plat-omap/clock.c4
-rw-r--r--arch/arm/plat-omap/cpu-omap.c18
-rw-r--r--arch/arm/plat-omap/devices.c10
-rw-r--r--arch/arm/plat-omap/dma.c84
-rw-r--r--arch/arm/plat-omap/dmtimer.c428
-rw-r--r--arch/arm/plat-omap/gpio.c103
-rw-r--r--arch/arm/plat-omap/sram.c9
-rw-r--r--arch/arm/plat-omap/timer32k.c122
9 files changed, 546 insertions, 234 deletions
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index ec49495e651e..ec752e16d618 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -91,7 +91,7 @@ config OMAP_32K_TIMER_HZ
91 91
92config OMAP_DM_TIMER 92config OMAP_DM_TIMER
93 bool "Use dual-mode timer" 93 bool "Use dual-mode timer"
94 depends on ARCH_OMAP16XX 94 depends on ARCH_OMAP16XX || ARCH_OMAP24XX
95 help 95 help
96 Select this option if you want to use OMAP Dual-Mode timers. 96 Select this option if you want to use OMAP Dual-Mode timers.
97 97
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index c520e9dcdd8a..7f45c7c3e673 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -27,9 +27,9 @@
27 27
28#include <asm/arch/clock.h> 28#include <asm/arch/clock.h>
29 29
30LIST_HEAD(clocks); 30static LIST_HEAD(clocks);
31static DEFINE_MUTEX(clocks_mutex); 31static DEFINE_MUTEX(clocks_mutex);
32DEFINE_SPINLOCK(clockfw_lock); 32static DEFINE_SPINLOCK(clockfw_lock);
33 33
34static struct clk_functions *arch_clock; 34static struct clk_functions *arch_clock;
35 35
diff --git a/arch/arm/plat-omap/cpu-omap.c b/arch/arm/plat-omap/cpu-omap.c
index 98edc9fdd6d1..a0c71dca2373 100644
--- a/arch/arm/plat-omap/cpu-omap.c
+++ b/arch/arm/plat-omap/cpu-omap.c
@@ -25,6 +25,14 @@
25#include <asm/io.h> 25#include <asm/io.h>
26#include <asm/system.h> 26#include <asm/system.h>
27 27
28#define VERY_HI_RATE 900000000
29
30#ifdef CONFIG_ARCH_OMAP1
31#define MPU_CLK "mpu"
32#else
33#define MPU_CLK "virt_prcm_set"
34#endif
35
28/* TODO: Add support for SDRAM timing changes */ 36/* TODO: Add support for SDRAM timing changes */
29 37
30int omap_verify_speed(struct cpufreq_policy *policy) 38int omap_verify_speed(struct cpufreq_policy *policy)
@@ -36,7 +44,7 @@ int omap_verify_speed(struct cpufreq_policy *policy)
36 44
37 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 45 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
38 policy->cpuinfo.max_freq); 46 policy->cpuinfo.max_freq);
39 mpu_clk = clk_get(NULL, "mpu"); 47 mpu_clk = clk_get(NULL, MPU_CLK);
40 if (IS_ERR(mpu_clk)) 48 if (IS_ERR(mpu_clk))
41 return PTR_ERR(mpu_clk); 49 return PTR_ERR(mpu_clk);
42 policy->min = clk_round_rate(mpu_clk, policy->min * 1000) / 1000; 50 policy->min = clk_round_rate(mpu_clk, policy->min * 1000) / 1000;
@@ -56,7 +64,7 @@ unsigned int omap_getspeed(unsigned int cpu)
56 if (cpu) 64 if (cpu)
57 return 0; 65 return 0;
58 66
59 mpu_clk = clk_get(NULL, "mpu"); 67 mpu_clk = clk_get(NULL, MPU_CLK);
60 if (IS_ERR(mpu_clk)) 68 if (IS_ERR(mpu_clk))
61 return 0; 69 return 0;
62 rate = clk_get_rate(mpu_clk) / 1000; 70 rate = clk_get_rate(mpu_clk) / 1000;
@@ -73,7 +81,7 @@ static int omap_target(struct cpufreq_policy *policy,
73 struct cpufreq_freqs freqs; 81 struct cpufreq_freqs freqs;
74 int ret = 0; 82 int ret = 0;
75 83
76 mpu_clk = clk_get(NULL, "mpu"); 84 mpu_clk = clk_get(NULL, MPU_CLK);
77 if (IS_ERR(mpu_clk)) 85 if (IS_ERR(mpu_clk))
78 return PTR_ERR(mpu_clk); 86 return PTR_ERR(mpu_clk);
79 87
@@ -93,7 +101,7 @@ static int __init omap_cpu_init(struct cpufreq_policy *policy)
93{ 101{
94 struct clk * mpu_clk; 102 struct clk * mpu_clk;
95 103
96 mpu_clk = clk_get(NULL, "mpu"); 104 mpu_clk = clk_get(NULL, MPU_CLK);
97 if (IS_ERR(mpu_clk)) 105 if (IS_ERR(mpu_clk))
98 return PTR_ERR(mpu_clk); 106 return PTR_ERR(mpu_clk);
99 107
@@ -102,7 +110,7 @@ static int __init omap_cpu_init(struct cpufreq_policy *policy)
102 policy->cur = policy->min = policy->max = omap_getspeed(0); 110 policy->cur = policy->min = policy->max = omap_getspeed(0);
103 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 111 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
104 policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000; 112 policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000;
105 policy->cpuinfo.max_freq = clk_round_rate(mpu_clk, 216000000) / 1000; 113 policy->cpuinfo.max_freq = clk_round_rate(mpu_clk, VERY_HI_RATE) / 1000;
106 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 114 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
107 clk_put(mpu_clk); 115 clk_put(mpu_clk);
108 116
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index ca486c9f36b5..1812f237d12f 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -104,7 +104,7 @@ static void omap_init_kp(void)
104 omap_cfg_reg(E20_1610_KBR3); 104 omap_cfg_reg(E20_1610_KBR3);
105 omap_cfg_reg(E19_1610_KBR4); 105 omap_cfg_reg(E19_1610_KBR4);
106 omap_cfg_reg(N19_1610_KBR5); 106 omap_cfg_reg(N19_1610_KBR5);
107 } else if (machine_is_omap_perseus2()) { 107 } else if (machine_is_omap_perseus2() || machine_is_omap_fsample()) {
108 omap_cfg_reg(E2_730_KBR0); 108 omap_cfg_reg(E2_730_KBR0);
109 omap_cfg_reg(J7_730_KBR1); 109 omap_cfg_reg(J7_730_KBR1);
110 omap_cfg_reg(E1_730_KBR2); 110 omap_cfg_reg(E1_730_KBR2);
@@ -161,8 +161,8 @@ static u64 mmc1_dmamask = 0xffffffff;
161 161
162static struct resource mmc1_resources[] = { 162static struct resource mmc1_resources[] = {
163 { 163 {
164 .start = IO_ADDRESS(OMAP_MMC1_BASE), 164 .start = OMAP_MMC1_BASE,
165 .end = IO_ADDRESS(OMAP_MMC1_BASE) + 0x7f, 165 .end = OMAP_MMC1_BASE + 0x7f,
166 .flags = IORESOURCE_MEM, 166 .flags = IORESOURCE_MEM,
167 }, 167 },
168 { 168 {
@@ -190,8 +190,8 @@ static u64 mmc2_dmamask = 0xffffffff;
190 190
191static struct resource mmc2_resources[] = { 191static struct resource mmc2_resources[] = {
192 { 192 {
193 .start = IO_ADDRESS(OMAP_MMC2_BASE), 193 .start = OMAP_MMC2_BASE,
194 .end = IO_ADDRESS(OMAP_MMC2_BASE) + 0x7f, 194 .end = OMAP_MMC2_BASE + 0x7f,
195 .flags = IORESOURCE_MEM, 195 .flags = IORESOURCE_MEM,
196 }, 196 },
197 { 197 {
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 5dac4230360d..c5d0214ef191 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -43,6 +43,7 @@
43 43
44#define OMAP_DMA_ACTIVE 0x01 44#define OMAP_DMA_ACTIVE 0x01
45#define OMAP_DMA_CCR_EN (1 << 7) 45#define OMAP_DMA_CCR_EN (1 << 7)
46#define OMAP2_DMA_CSR_CLEAR_MASK 0xffe
46 47
47#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec) 48#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
48 49
@@ -166,18 +167,24 @@ void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
166 if (cpu_is_omap24xx() && dma_trigger) { 167 if (cpu_is_omap24xx() && dma_trigger) {
167 u32 val = OMAP_DMA_CCR_REG(lch); 168 u32 val = OMAP_DMA_CCR_REG(lch);
168 169
170 val &= ~(3 << 19);
169 if (dma_trigger > 63) 171 if (dma_trigger > 63)
170 val |= 1 << 20; 172 val |= 1 << 20;
171 if (dma_trigger > 31) 173 if (dma_trigger > 31)
172 val |= 1 << 19; 174 val |= 1 << 19;
173 175
176 val &= ~(0x1f);
174 val |= (dma_trigger & 0x1f); 177 val |= (dma_trigger & 0x1f);
175 178
176 if (sync_mode & OMAP_DMA_SYNC_FRAME) 179 if (sync_mode & OMAP_DMA_SYNC_FRAME)
177 val |= 1 << 5; 180 val |= 1 << 5;
181 else
182 val &= ~(1 << 5);
178 183
179 if (sync_mode & OMAP_DMA_SYNC_BLOCK) 184 if (sync_mode & OMAP_DMA_SYNC_BLOCK)
180 val |= 1 << 18; 185 val |= 1 << 18;
186 else
187 val &= ~(1 << 18);
181 188
182 if (src_or_dst_synch) 189 if (src_or_dst_synch)
183 val |= 1 << 24; /* source synch */ 190 val |= 1 << 24; /* source synch */
@@ -286,22 +293,39 @@ void omap_set_dma_src_data_pack(int lch, int enable)
286 293
287void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) 294void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
288{ 295{
296 unsigned int burst = 0;
289 OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 7); 297 OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 7);
290 298
291 switch (burst_mode) { 299 switch (burst_mode) {
292 case OMAP_DMA_DATA_BURST_DIS: 300 case OMAP_DMA_DATA_BURST_DIS:
293 break; 301 break;
294 case OMAP_DMA_DATA_BURST_4: 302 case OMAP_DMA_DATA_BURST_4:
295 OMAP_DMA_CSDP_REG(lch) |= (0x02 << 7); 303 if (cpu_is_omap24xx())
304 burst = 0x1;
305 else
306 burst = 0x2;
296 break; 307 break;
297 case OMAP_DMA_DATA_BURST_8: 308 case OMAP_DMA_DATA_BURST_8:
298 /* not supported by current hardware 309 if (cpu_is_omap24xx()) {
310 burst = 0x2;
311 break;
312 }
313 /* not supported by current hardware on OMAP1
299 * w |= (0x03 << 7); 314 * w |= (0x03 << 7);
300 * fall through 315 * fall through
301 */ 316 */
317 case OMAP_DMA_DATA_BURST_16:
318 if (cpu_is_omap24xx()) {
319 burst = 0x3;
320 break;
321 }
322 /* OMAP1 don't support burst 16
323 * fall through
324 */
302 default: 325 default:
303 BUG(); 326 BUG();
304 } 327 }
328 OMAP_DMA_CSDP_REG(lch) |= (burst << 7);
305} 329}
306 330
307/* Note that dest_port is only for OMAP1 */ 331/* Note that dest_port is only for OMAP1 */
@@ -348,30 +372,49 @@ void omap_set_dma_dest_data_pack(int lch, int enable)
348 372
349void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) 373void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
350{ 374{
375 unsigned int burst = 0;
351 OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 14); 376 OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 14);
352 377
353 switch (burst_mode) { 378 switch (burst_mode) {
354 case OMAP_DMA_DATA_BURST_DIS: 379 case OMAP_DMA_DATA_BURST_DIS:
355 break; 380 break;
356 case OMAP_DMA_DATA_BURST_4: 381 case OMAP_DMA_DATA_BURST_4:
357 OMAP_DMA_CSDP_REG(lch) |= (0x02 << 14); 382 if (cpu_is_omap24xx())
383 burst = 0x1;
384 else
385 burst = 0x2;
358 break; 386 break;
359 case OMAP_DMA_DATA_BURST_8: 387 case OMAP_DMA_DATA_BURST_8:
360 OMAP_DMA_CSDP_REG(lch) |= (0x03 << 14); 388 if (cpu_is_omap24xx())
389 burst = 0x2;
390 else
391 burst = 0x3;
361 break; 392 break;
393 case OMAP_DMA_DATA_BURST_16:
394 if (cpu_is_omap24xx()) {
395 burst = 0x3;
396 break;
397 }
398 /* OMAP1 don't support burst 16
399 * fall through
400 */
362 default: 401 default:
363 printk(KERN_ERR "Invalid DMA burst mode\n"); 402 printk(KERN_ERR "Invalid DMA burst mode\n");
364 BUG(); 403 BUG();
365 return; 404 return;
366 } 405 }
406 OMAP_DMA_CSDP_REG(lch) |= (burst << 14);
367} 407}
368 408
369static inline void omap_enable_channel_irq(int lch) 409static inline void omap_enable_channel_irq(int lch)
370{ 410{
371 u32 status; 411 u32 status;
372 412
373 /* Read CSR to make sure it's cleared. */ 413 /* Clear CSR */
374 status = OMAP_DMA_CSR_REG(lch); 414 if (cpu_class_is_omap1())
415 status = OMAP_DMA_CSR_REG(lch);
416 else if (cpu_is_omap24xx())
417 OMAP_DMA_CSR_REG(lch) = OMAP2_DMA_CSR_CLEAR_MASK;
375 418
376 /* Enable some nice interrupts. */ 419 /* Enable some nice interrupts. */
377 OMAP_DMA_CICR_REG(lch) = dma_chan[lch].enabled_irqs; 420 OMAP_DMA_CICR_REG(lch) = dma_chan[lch].enabled_irqs;
@@ -470,11 +513,13 @@ int omap_request_dma(int dev_id, const char *dev_name,
470 chan->dev_name = dev_name; 513 chan->dev_name = dev_name;
471 chan->callback = callback; 514 chan->callback = callback;
472 chan->data = data; 515 chan->data = data;
473 chan->enabled_irqs = OMAP_DMA_TOUT_IRQ | OMAP_DMA_DROP_IRQ | 516 chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
474 OMAP_DMA_BLOCK_IRQ;
475 517
476 if (cpu_is_omap24xx()) 518 if (cpu_class_is_omap1())
477 chan->enabled_irqs |= OMAP2_DMA_TRANS_ERR_IRQ; 519 chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
520 else if (cpu_is_omap24xx())
521 chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
522 OMAP2_DMA_TRANS_ERR_IRQ;
478 523
479 if (cpu_is_omap16xx()) { 524 if (cpu_is_omap16xx()) {
480 /* If the sync device is set, configure it dynamically. */ 525 /* If the sync device is set, configure it dynamically. */
@@ -494,7 +539,7 @@ int omap_request_dma(int dev_id, const char *dev_name,
494 539
495 omap_enable_channel_irq(free_ch); 540 omap_enable_channel_irq(free_ch);
496 /* Clear the CSR register and IRQ status register */ 541 /* Clear the CSR register and IRQ status register */
497 OMAP_DMA_CSR_REG(free_ch) = 0x0; 542 OMAP_DMA_CSR_REG(free_ch) = OMAP2_DMA_CSR_CLEAR_MASK;
498 omap_writel(~0x0, OMAP_DMA4_IRQSTATUS_L0); 543 omap_writel(~0x0, OMAP_DMA4_IRQSTATUS_L0);
499 } 544 }
500 545
@@ -534,7 +579,7 @@ void omap_free_dma(int lch)
534 omap_writel(val, OMAP_DMA4_IRQENABLE_L0); 579 omap_writel(val, OMAP_DMA4_IRQENABLE_L0);
535 580
536 /* Clear the CSR register and IRQ status register */ 581 /* Clear the CSR register and IRQ status register */
537 OMAP_DMA_CSR_REG(lch) = 0x0; 582 OMAP_DMA_CSR_REG(lch) = OMAP2_DMA_CSR_CLEAR_MASK;
538 583
539 val = omap_readl(OMAP_DMA4_IRQSTATUS_L0); 584 val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
540 val |= 1 << lch; 585 val |= 1 << lch;
@@ -798,7 +843,7 @@ static int omap1_dma_handle_ch(int ch)
798 "%d (CSR %04x)\n", ch, csr); 843 "%d (CSR %04x)\n", ch, csr);
799 return 0; 844 return 0;
800 } 845 }
801 if (unlikely(csr & OMAP_DMA_TOUT_IRQ)) 846 if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
802 printk(KERN_WARNING "DMA timeout with device %d\n", 847 printk(KERN_WARNING "DMA timeout with device %d\n",
803 dma_chan[ch].dev_id); 848 dma_chan[ch].dev_id);
804 if (unlikely(csr & OMAP_DMA_DROP_IRQ)) 849 if (unlikely(csr & OMAP_DMA_DROP_IRQ))
@@ -846,20 +891,21 @@ static int omap2_dma_handle_ch(int ch)
846 return 0; 891 return 0;
847 if (unlikely(dma_chan[ch].dev_id == -1)) 892 if (unlikely(dma_chan[ch].dev_id == -1))
848 return 0; 893 return 0;
849 /* REVISIT: According to 24xx TRM, there's no TOUT_IE */
850 if (unlikely(status & OMAP_DMA_TOUT_IRQ))
851 printk(KERN_INFO "DMA timeout with device %d\n",
852 dma_chan[ch].dev_id);
853 if (unlikely(status & OMAP_DMA_DROP_IRQ)) 894 if (unlikely(status & OMAP_DMA_DROP_IRQ))
854 printk(KERN_INFO 895 printk(KERN_INFO
855 "DMA synchronization event drop occurred with device " 896 "DMA synchronization event drop occurred with device "
856 "%d\n", dma_chan[ch].dev_id); 897 "%d\n", dma_chan[ch].dev_id);
857
858 if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) 898 if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ))
859 printk(KERN_INFO "DMA transaction error with device %d\n", 899 printk(KERN_INFO "DMA transaction error with device %d\n",
860 dma_chan[ch].dev_id); 900 dma_chan[ch].dev_id);
901 if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
902 printk(KERN_INFO "DMA secure error with device %d\n",
903 dma_chan[ch].dev_id);
904 if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
905 printk(KERN_INFO "DMA misaligned error with device %d\n",
906 dma_chan[ch].dev_id);
861 907
862 OMAP_DMA_CSR_REG(ch) = 0x20; 908 OMAP_DMA_CSR_REG(ch) = OMAP2_DMA_CSR_CLEAR_MASK;
863 909
864 val = omap_readl(OMAP_DMA4_IRQSTATUS_L0); 910 val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
865 /* ch in this function is from 0-31 while in register it is 1-32 */ 911 /* ch in this function is from 0-31 while in register it is 1-32 */
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index eba3cb52ad87..50524436de63 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -4,7 +4,8 @@
4 * OMAP Dual-Mode Timers 4 * OMAP Dual-Mode Timers
5 * 5 *
6 * Copyright (C) 2005 Nokia Corporation 6 * Copyright (C) 2005 Nokia Corporation
7 * Author: Lauri Leukkunen <lauri.leukkunen@nokia.com> 7 * OMAP2 support by Juha Yrjola
8 * API improvements and OMAP2 clock framework support by Timo Teras
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the 11 * under the terms of the GNU General Public License as published by the
@@ -26,15 +27,17 @@
26 */ 27 */
27 28
28#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/spinlock.h>
31#include <linux/errno.h>
32#include <linux/list.h>
33#include <linux/clk.h>
34#include <linux/delay.h>
29#include <asm/hardware.h> 35#include <asm/hardware.h>
30#include <asm/arch/dmtimer.h> 36#include <asm/arch/dmtimer.h>
31#include <asm/io.h> 37#include <asm/io.h>
32#include <asm/arch/irqs.h> 38#include <asm/arch/irqs.h>
33#include <linux/spinlock.h>
34#include <linux/list.h>
35
36#define OMAP_TIMER_COUNT 8
37 39
40/* register offsets */
38#define OMAP_TIMER_ID_REG 0x00 41#define OMAP_TIMER_ID_REG 0x00
39#define OMAP_TIMER_OCP_CFG_REG 0x10 42#define OMAP_TIMER_OCP_CFG_REG 0x10
40#define OMAP_TIMER_SYS_STAT_REG 0x14 43#define OMAP_TIMER_SYS_STAT_REG 0x14
@@ -50,52 +53,196 @@
50#define OMAP_TIMER_CAPTURE_REG 0x3c 53#define OMAP_TIMER_CAPTURE_REG 0x3c
51#define OMAP_TIMER_IF_CTRL_REG 0x40 54#define OMAP_TIMER_IF_CTRL_REG 0x40
52 55
56/* timer control reg bits */
57#define OMAP_TIMER_CTRL_GPOCFG (1 << 14)
58#define OMAP_TIMER_CTRL_CAPTMODE (1 << 13)
59#define OMAP_TIMER_CTRL_PT (1 << 12)
60#define OMAP_TIMER_CTRL_TCM_LOWTOHIGH (0x1 << 8)
61#define OMAP_TIMER_CTRL_TCM_HIGHTOLOW (0x2 << 8)
62#define OMAP_TIMER_CTRL_TCM_BOTHEDGES (0x3 << 8)
63#define OMAP_TIMER_CTRL_SCPWM (1 << 7)
64#define OMAP_TIMER_CTRL_CE (1 << 6) /* compare enable */
65#define OMAP_TIMER_CTRL_PRE (1 << 5) /* prescaler enable */
66#define OMAP_TIMER_CTRL_PTV_SHIFT 2 /* how much to shift the prescaler value */
67#define OMAP_TIMER_CTRL_AR (1 << 1) /* auto-reload enable */
68#define OMAP_TIMER_CTRL_ST (1 << 0) /* start timer */
69
70struct omap_dm_timer {
71 unsigned long phys_base;
72 int irq;
73#ifdef CONFIG_ARCH_OMAP2
74 struct clk *iclk, *fclk;
75#endif
76 void __iomem *io_base;
77 unsigned reserved:1;
78};
53 79
54static struct dmtimer_info_struct { 80#ifdef CONFIG_ARCH_OMAP1
55 struct list_head unused_timers;
56 struct list_head reserved_timers;
57} dm_timer_info;
58 81
59static struct omap_dm_timer dm_timers[] = { 82static struct omap_dm_timer dm_timers[] = {
60 { .base=0xfffb1400, .irq=INT_1610_GPTIMER1 }, 83 { .phys_base = 0xfffb1400, .irq = INT_1610_GPTIMER1 },
61 { .base=0xfffb1c00, .irq=INT_1610_GPTIMER2 }, 84 { .phys_base = 0xfffb1c00, .irq = INT_1610_GPTIMER2 },
62 { .base=0xfffb2400, .irq=INT_1610_GPTIMER3 }, 85 { .phys_base = 0xfffb2400, .irq = INT_1610_GPTIMER3 },
63 { .base=0xfffb2c00, .irq=INT_1610_GPTIMER4 }, 86 { .phys_base = 0xfffb2c00, .irq = INT_1610_GPTIMER4 },
64 { .base=0xfffb3400, .irq=INT_1610_GPTIMER5 }, 87 { .phys_base = 0xfffb3400, .irq = INT_1610_GPTIMER5 },
65 { .base=0xfffb3c00, .irq=INT_1610_GPTIMER6 }, 88 { .phys_base = 0xfffb3c00, .irq = INT_1610_GPTIMER6 },
66 { .base=0xfffb4400, .irq=INT_1610_GPTIMER7 }, 89 { .phys_base = 0xfffb4400, .irq = INT_1610_GPTIMER7 },
67 { .base=0xfffb4c00, .irq=INT_1610_GPTIMER8 }, 90 { .phys_base = 0xfffb4c00, .irq = INT_1610_GPTIMER8 },
68 { .base=0x0 },
69}; 91};
70 92
93#elif defined(CONFIG_ARCH_OMAP2)
94
95static struct omap_dm_timer dm_timers[] = {
96 { .phys_base = 0x48028000, .irq = INT_24XX_GPTIMER1 },
97 { .phys_base = 0x4802a000, .irq = INT_24XX_GPTIMER2 },
98 { .phys_base = 0x48078000, .irq = INT_24XX_GPTIMER3 },
99 { .phys_base = 0x4807a000, .irq = INT_24XX_GPTIMER4 },
100 { .phys_base = 0x4807c000, .irq = INT_24XX_GPTIMER5 },
101 { .phys_base = 0x4807e000, .irq = INT_24XX_GPTIMER6 },
102 { .phys_base = 0x48080000, .irq = INT_24XX_GPTIMER7 },
103 { .phys_base = 0x48082000, .irq = INT_24XX_GPTIMER8 },
104 { .phys_base = 0x48084000, .irq = INT_24XX_GPTIMER9 },
105 { .phys_base = 0x48086000, .irq = INT_24XX_GPTIMER10 },
106 { .phys_base = 0x48088000, .irq = INT_24XX_GPTIMER11 },
107 { .phys_base = 0x4808a000, .irq = INT_24XX_GPTIMER12 },
108};
109
110static const char *dm_source_names[] = {
111 "sys_ck",
112 "func_32k_ck",
113 "alt_ck"
114};
71 115
116static struct clk *dm_source_clocks[3];
117
118#else
119
120#error OMAP architecture not supported!
121
122#endif
123
124static const int dm_timer_count = ARRAY_SIZE(dm_timers);
72static spinlock_t dm_timer_lock; 125static spinlock_t dm_timer_lock;
73 126
127static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, int reg)
128{
129 return readl(timer->io_base + reg);
130}
74 131
75inline void omap_dm_timer_write_reg(struct omap_dm_timer *timer, int reg, u32 value) 132static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, int reg, u32 value)
76{ 133{
77 omap_writel(value, timer->base + reg); 134 writel(value, timer->io_base + reg);
78 while (omap_dm_timer_read_reg(timer, OMAP_TIMER_WRITE_PEND_REG)) 135 while (omap_dm_timer_read_reg(timer, OMAP_TIMER_WRITE_PEND_REG))
79 ; 136 ;
80} 137}
81 138
82u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, int reg) 139static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer)
83{ 140{
84 return omap_readl(timer->base + reg); 141 int c;
142
143 c = 0;
144 while (!(omap_dm_timer_read_reg(timer, OMAP_TIMER_SYS_STAT_REG) & 1)) {
145 c++;
146 if (c > 100000) {
147 printk(KERN_ERR "Timer failed to reset\n");
148 return;
149 }
150 }
85} 151}
86 152
87int omap_dm_timers_active(void) 153static void omap_dm_timer_reset(struct omap_dm_timer *timer)
154{
155 u32 l;
156
157 if (timer != &dm_timers[0]) {
158 omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
159 omap_dm_timer_wait_for_reset(timer);
160 }
161 omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_SYS_CLK);
162
163 /* Set to smart-idle mode */
164 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_OCP_CFG_REG);
165 l |= 0x02 << 3;
166 omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_REG, l);
167}
168
169static void omap_dm_timer_prepare(struct omap_dm_timer *timer)
170{
171#ifdef CONFIG_ARCH_OMAP2
172 clk_enable(timer->iclk);
173 clk_enable(timer->fclk);
174#endif
175 omap_dm_timer_reset(timer);
176}
177
178struct omap_dm_timer *omap_dm_timer_request(void)
179{
180 struct omap_dm_timer *timer = NULL;
181 unsigned long flags;
182 int i;
183
184 spin_lock_irqsave(&dm_timer_lock, flags);
185 for (i = 0; i < dm_timer_count; i++) {
186 if (dm_timers[i].reserved)
187 continue;
188
189 timer = &dm_timers[i];
190 timer->reserved = 1;
191 break;
192 }
193 spin_unlock_irqrestore(&dm_timer_lock, flags);
194
195 if (timer != NULL)
196 omap_dm_timer_prepare(timer);
197
198 return timer;
199}
200
201struct omap_dm_timer *omap_dm_timer_request_specific(int id)
88{ 202{
89 struct omap_dm_timer *timer; 203 struct omap_dm_timer *timer;
204 unsigned long flags;
90 205
91 for (timer = &dm_timers[0]; timer->base; ++timer) 206 spin_lock_irqsave(&dm_timer_lock, flags);
92 if (omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG) & 207 if (id <= 0 || id > dm_timer_count || dm_timers[id-1].reserved) {
93 OMAP_TIMER_CTRL_ST) 208 spin_unlock_irqrestore(&dm_timer_lock, flags);
94 return 1; 209 printk("BUG: warning at %s:%d/%s(): unable to get timer %d\n",
210 __FILE__, __LINE__, __FUNCTION__, id);
211 dump_stack();
212 return NULL;
213 }
95 214
96 return 0; 215 timer = &dm_timers[id-1];
216 timer->reserved = 1;
217 spin_unlock_irqrestore(&dm_timer_lock, flags);
218
219 omap_dm_timer_prepare(timer);
220
221 return timer;
97} 222}
98 223
224void omap_dm_timer_free(struct omap_dm_timer *timer)
225{
226 omap_dm_timer_reset(timer);
227#ifdef CONFIG_ARCH_OMAP2
228 clk_disable(timer->iclk);
229 clk_disable(timer->fclk);
230#endif
231 WARN_ON(!timer->reserved);
232 timer->reserved = 0;
233}
234
235int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
236{
237 return timer->irq;
238}
239
240#if defined(CONFIG_ARCH_OMAP1)
241
242struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
243{
244 BUG();
245}
99 246
100/** 247/**
101 * omap_dm_timer_modify_idlect_mask - Check if any running timers use ARMXOR 248 * omap_dm_timer_modify_idlect_mask - Check if any running timers use ARMXOR
@@ -103,184 +250,229 @@ int omap_dm_timers_active(void)
103 */ 250 */
104__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask) 251__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
105{ 252{
106 int n; 253 int i;
107 254
108 /* If ARMXOR cannot be idled this function call is unnecessary */ 255 /* If ARMXOR cannot be idled this function call is unnecessary */
109 if (!(inputmask & (1 << 1))) 256 if (!(inputmask & (1 << 1)))
110 return inputmask; 257 return inputmask;
111 258
112 /* If any active timer is using ARMXOR return modified mask */ 259 /* If any active timer is using ARMXOR return modified mask */
113 for (n = 0; dm_timers[n].base; ++n) 260 for (i = 0; i < dm_timer_count; i++) {
114 if (omap_dm_timer_read_reg(&dm_timers[n], OMAP_TIMER_CTRL_REG)& 261 u32 l;
115 OMAP_TIMER_CTRL_ST) { 262
116 if (((omap_readl(MOD_CONF_CTRL_1)>>(n*2)) & 0x03) == 0) 263 l = omap_dm_timer_read_reg(&dm_timers[i], OMAP_TIMER_CTRL_REG);
264 if (l & OMAP_TIMER_CTRL_ST) {
265 if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
117 inputmask &= ~(1 << 1); 266 inputmask &= ~(1 << 1);
118 else 267 else
119 inputmask &= ~(1 << 2); 268 inputmask &= ~(1 << 2);
120 } 269 }
270 }
121 271
122 return inputmask; 272 return inputmask;
123} 273}
124 274
275#elif defined(CONFIG_ARCH_OMAP2)
125 276
126void omap_dm_timer_set_source(struct omap_dm_timer *timer, int source) 277struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
127{ 278{
128 int n = (timer - dm_timers) << 1; 279 return timer->fclk;
129 u32 l; 280}
130 281
131 l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n); 282__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
132 l |= source << n; 283{
133 omap_writel(l, MOD_CONF_CTRL_1); 284 BUG();
134} 285}
135 286
287#endif
136 288
137static void omap_dm_timer_reset(struct omap_dm_timer *timer) 289void omap_dm_timer_trigger(struct omap_dm_timer *timer)
138{ 290{
139 /* Reset and set posted mode */ 291 omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
140 omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
141 omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_REG, 0x02);
142
143 omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_ARMXOR);
144} 292}
145 293
294void omap_dm_timer_start(struct omap_dm_timer *timer)
295{
296 u32 l;
146 297
298 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
299 if (!(l & OMAP_TIMER_CTRL_ST)) {
300 l |= OMAP_TIMER_CTRL_ST;
301 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
302 }
303}
147 304
148struct omap_dm_timer * omap_dm_timer_request(void) 305void omap_dm_timer_stop(struct omap_dm_timer *timer)
149{ 306{
150 struct omap_dm_timer *timer = NULL; 307 u32 l;
151 unsigned long flags;
152 308
153 spin_lock_irqsave(&dm_timer_lock, flags); 309 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
154 if (!list_empty(&dm_timer_info.unused_timers)) { 310 if (l & OMAP_TIMER_CTRL_ST) {
155 timer = (struct omap_dm_timer *) 311 l &= ~0x1;
156 dm_timer_info.unused_timers.next; 312 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
157 list_move_tail((struct list_head *)timer,
158 &dm_timer_info.reserved_timers);
159 } 313 }
160 spin_unlock_irqrestore(&dm_timer_lock, flags);
161
162 return timer;
163} 314}
164 315
316#ifdef CONFIG_ARCH_OMAP1
165 317
166void omap_dm_timer_free(struct omap_dm_timer *timer) 318void omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
167{ 319{
168 unsigned long flags; 320 int n = (timer - dm_timers) << 1;
169 321 u32 l;
170 omap_dm_timer_reset(timer);
171 322
172 spin_lock_irqsave(&dm_timer_lock, flags); 323 l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
173 list_move_tail((struct list_head *)timer, &dm_timer_info.unused_timers); 324 l |= source << n;
174 spin_unlock_irqrestore(&dm_timer_lock, flags); 325 omap_writel(l, MOD_CONF_CTRL_1);
175} 326}
176 327
177void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer, 328#else
178 unsigned int value)
179{
180 omap_dm_timer_write_reg(timer, OMAP_TIMER_INT_EN_REG, value);
181}
182 329
183unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer) 330void omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
184{ 331{
185 return omap_dm_timer_read_reg(timer, OMAP_TIMER_STAT_REG); 332 if (source < 0 || source >= 3)
186} 333 return;
187 334
188void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value) 335 clk_disable(timer->fclk);
189{ 336 clk_set_parent(timer->fclk, dm_source_clocks[source]);
190 omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, value); 337 clk_enable(timer->fclk);
338
339 /* When the functional clock disappears, too quick writes seem to
340 * cause an abort. */
341 __delay(15000);
191} 342}
192 343
193void omap_dm_timer_enable_autoreload(struct omap_dm_timer *timer) 344#endif
345
346void omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
347 unsigned int load)
194{ 348{
195 u32 l; 349 u32 l;
350
196 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); 351 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
197 l |= OMAP_TIMER_CTRL_AR; 352 if (autoreload)
353 l |= OMAP_TIMER_CTRL_AR;
354 else
355 l &= ~OMAP_TIMER_CTRL_AR;
198 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); 356 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
357 omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
358 omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
199} 359}
200 360
201void omap_dm_timer_trigger(struct omap_dm_timer *timer) 361void omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
202{ 362 unsigned int match)
203 omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 1);
204}
205
206void omap_dm_timer_set_trigger(struct omap_dm_timer *timer, unsigned int value)
207{ 363{
208 u32 l; 364 u32 l;
209 365
210 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); 366 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
211 l |= value & 0x3; 367 if (enable)
368 l |= OMAP_TIMER_CTRL_CE;
369 else
370 l &= ~OMAP_TIMER_CTRL_CE;
212 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); 371 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
372 omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
213} 373}
214 374
215void omap_dm_timer_start(struct omap_dm_timer *timer) 375
376void omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
377 int toggle, int trigger)
216{ 378{
217 u32 l; 379 u32 l;
218 380
219 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); 381 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
220 l |= OMAP_TIMER_CTRL_ST; 382 l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
383 OMAP_TIMER_CTRL_PT | (0x03 << 10));
384 if (def_on)
385 l |= OMAP_TIMER_CTRL_SCPWM;
386 if (toggle)
387 l |= OMAP_TIMER_CTRL_PT;
388 l |= trigger << 10;
221 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); 389 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
222} 390}
223 391
224void omap_dm_timer_stop(struct omap_dm_timer *timer) 392void omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler)
225{ 393{
226 u32 l; 394 u32 l;
227 395
228 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); 396 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
229 l &= ~0x1; 397 l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
398 if (prescaler >= 0x00 && prescaler <= 0x07) {
399 l |= OMAP_TIMER_CTRL_PRE;
400 l |= prescaler << 2;
401 }
230 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); 402 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
231} 403}
232 404
233unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer) 405void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
406 unsigned int value)
234{ 407{
235 return omap_dm_timer_read_reg(timer, OMAP_TIMER_COUNTER_REG); 408 omap_dm_timer_write_reg(timer, OMAP_TIMER_INT_EN_REG, value);
236} 409}
237 410
238void omap_dm_timer_reset_counter(struct omap_dm_timer *timer) 411unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
239{ 412{
240 omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, 0); 413 return omap_dm_timer_read_reg(timer, OMAP_TIMER_STAT_REG);
241} 414}
242 415
243void omap_dm_timer_set_load(struct omap_dm_timer *timer, unsigned int load) 416void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
244{ 417{
245 omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load); 418 omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, value);
246} 419}
247 420
248void omap_dm_timer_set_match(struct omap_dm_timer *timer, unsigned int match) 421unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
249{ 422{
250 omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match); 423 return omap_dm_timer_read_reg(timer, OMAP_TIMER_COUNTER_REG);
251} 424}
252 425
253void omap_dm_timer_enable_compare(struct omap_dm_timer *timer) 426void omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
254{ 427{
255 u32 l; 428 return omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value);
256
257 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
258 l |= OMAP_TIMER_CTRL_CE;
259 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
260} 429}
261 430
431int omap_dm_timers_active(void)
432{
433 int i;
434
435 for (i = 0; i < dm_timer_count; i++) {
436 struct omap_dm_timer *timer;
437
438 timer = &dm_timers[i];
439 if (omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG) &
440 OMAP_TIMER_CTRL_ST)
441 return 1;
442 }
443 return 0;
444}
262 445
263static inline void __dm_timer_init(void) 446int omap_dm_timer_init(void)
264{ 447{
265 struct omap_dm_timer *timer; 448 struct omap_dm_timer *timer;
449 int i;
450
451 if (!(cpu_is_omap16xx() || cpu_is_omap24xx()))
452 return -ENODEV;
266 453
267 spin_lock_init(&dm_timer_lock); 454 spin_lock_init(&dm_timer_lock);
268 INIT_LIST_HEAD(&dm_timer_info.unused_timers); 455#ifdef CONFIG_ARCH_OMAP2
269 INIT_LIST_HEAD(&dm_timer_info.reserved_timers); 456 for (i = 0; i < ARRAY_SIZE(dm_source_names); i++) {
270 457 dm_source_clocks[i] = clk_get(NULL, dm_source_names[i]);
271 timer = &dm_timers[0]; 458 BUG_ON(dm_source_clocks[i] == NULL);
272 while (timer->base) { 459 }
273 list_add_tail((struct list_head *)timer, &dm_timer_info.unused_timers); 460#endif
274 omap_dm_timer_reset(timer); 461
275 timer++; 462 for (i = 0; i < dm_timer_count; i++) {
463#ifdef CONFIG_ARCH_OMAP2
464 char clk_name[16];
465#endif
466
467 timer = &dm_timers[i];
468 timer->io_base = (void __iomem *) io_p2v(timer->phys_base);
469#ifdef CONFIG_ARCH_OMAP2
470 sprintf(clk_name, "gpt%d_ick", i + 1);
471 timer->iclk = clk_get(NULL, clk_name);
472 sprintf(clk_name, "gpt%d_fck", i + 1);
473 timer->fclk = clk_get(NULL, clk_name);
474#endif
276 } 475 }
277}
278 476
279static int __init omap_dm_timer_init(void)
280{
281 if (cpu_is_omap16xx())
282 __dm_timer_init();
283 return 0; 477 return 0;
284} 478}
285
286arch_initcall(omap_dm_timer_init);
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 418b88fbea8e..ae08eeec7aad 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -536,6 +536,49 @@ static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
536 _clear_gpio_irqbank(bank, 1 << get_gpio_index(gpio)); 536 _clear_gpio_irqbank(bank, 1 << get_gpio_index(gpio));
537} 537}
538 538
539static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
540{
541 void __iomem *reg = bank->base;
542 int inv = 0;
543 u32 l;
544 u32 mask;
545
546 switch (bank->method) {
547 case METHOD_MPUIO:
548 reg += OMAP_MPUIO_GPIO_MASKIT;
549 mask = 0xffff;
550 inv = 1;
551 break;
552 case METHOD_GPIO_1510:
553 reg += OMAP1510_GPIO_INT_MASK;
554 mask = 0xffff;
555 inv = 1;
556 break;
557 case METHOD_GPIO_1610:
558 reg += OMAP1610_GPIO_IRQENABLE1;
559 mask = 0xffff;
560 break;
561 case METHOD_GPIO_730:
562 reg += OMAP730_GPIO_INT_MASK;
563 mask = 0xffffffff;
564 inv = 1;
565 break;
566 case METHOD_GPIO_24XX:
567 reg += OMAP24XX_GPIO_IRQENABLE1;
568 mask = 0xffffffff;
569 break;
570 default:
571 BUG();
572 return 0;
573 }
574
575 l = __raw_readl(reg);
576 if (inv)
577 l = ~l;
578 l &= mask;
579 return l;
580}
581
539static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask, int enable) 582static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask, int enable)
540{ 583{
541 void __iomem *reg = bank->base; 584 void __iomem *reg = bank->base;
@@ -735,6 +778,8 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
735 u32 isr; 778 u32 isr;
736 unsigned int gpio_irq; 779 unsigned int gpio_irq;
737 struct gpio_bank *bank; 780 struct gpio_bank *bank;
781 u32 retrigger = 0;
782 int unmasked = 0;
738 783
739 desc->chip->ack(irq); 784 desc->chip->ack(irq);
740 785
@@ -759,18 +804,22 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
759#endif 804#endif
760 while(1) { 805 while(1) {
761 u32 isr_saved, level_mask = 0; 806 u32 isr_saved, level_mask = 0;
807 u32 enabled;
762 808
763 isr_saved = isr = __raw_readl(isr_reg); 809 enabled = _get_gpio_irqbank_mask(bank);
810 isr_saved = isr = __raw_readl(isr_reg) & enabled;
764 811
765 if (cpu_is_omap15xx() && (bank->method == METHOD_MPUIO)) 812 if (cpu_is_omap15xx() && (bank->method == METHOD_MPUIO))
766 isr &= 0x0000ffff; 813 isr &= 0x0000ffff;
767 814
768 if (cpu_is_omap24xx()) 815 if (cpu_is_omap24xx()) {
769 level_mask = 816 level_mask =
770 __raw_readl(bank->base + 817 __raw_readl(bank->base +
771 OMAP24XX_GPIO_LEVELDETECT0) | 818 OMAP24XX_GPIO_LEVELDETECT0) |
772 __raw_readl(bank->base + 819 __raw_readl(bank->base +
773 OMAP24XX_GPIO_LEVELDETECT1); 820 OMAP24XX_GPIO_LEVELDETECT1);
821 level_mask &= enabled;
822 }
774 823
775 /* clear edge sensitive interrupts before handler(s) are 824 /* clear edge sensitive interrupts before handler(s) are
776 called so that we don't miss any interrupt occurred while 825 called so that we don't miss any interrupt occurred while
@@ -781,19 +830,54 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
781 830
782 /* if there is only edge sensitive GPIO pin interrupts 831 /* if there is only edge sensitive GPIO pin interrupts
783 configured, we could unmask GPIO bank interrupt immediately */ 832 configured, we could unmask GPIO bank interrupt immediately */
784 if (!level_mask) 833 if (!level_mask && !unmasked) {
834 unmasked = 1;
785 desc->chip->unmask(irq); 835 desc->chip->unmask(irq);
836 }
786 837
838 isr |= retrigger;
839 retrigger = 0;
787 if (!isr) 840 if (!isr)
788 break; 841 break;
789 842
790 gpio_irq = bank->virtual_irq_start; 843 gpio_irq = bank->virtual_irq_start;
791 for (; isr != 0; isr >>= 1, gpio_irq++) { 844 for (; isr != 0; isr >>= 1, gpio_irq++) {
792 struct irqdesc *d; 845 struct irqdesc *d;
846 int irq_mask;
793 if (!(isr & 1)) 847 if (!(isr & 1))
794 continue; 848 continue;
795 d = irq_desc + gpio_irq; 849 d = irq_desc + gpio_irq;
850 /* Don't run the handler if it's already running
851 * or was disabled lazely.
852 */
853 if (unlikely((d->disable_depth || d->running))) {
854 irq_mask = 1 <<
855 (gpio_irq - bank->virtual_irq_start);
856 /* The unmasking will be done by
857 * enable_irq in case it is disabled or
858 * after returning from the handler if
859 * it's already running.
860 */
861 _enable_gpio_irqbank(bank, irq_mask, 0);
862 if (!d->disable_depth) {
863 /* Level triggered interrupts
864 * won't ever be reentered
865 */
866 BUG_ON(level_mask & irq_mask);
867 d->pending = 1;
868 }
869 continue;
870 }
871 d->running = 1;
796 desc_handle_irq(gpio_irq, d, regs); 872 desc_handle_irq(gpio_irq, d, regs);
873 d->running = 0;
874 if (unlikely(d->pending && !d->disable_depth)) {
875 irq_mask = 1 <<
876 (gpio_irq - bank->virtual_irq_start);
877 d->pending = 0;
878 _enable_gpio_irqbank(bank, irq_mask, 1);
879 retrigger |= irq_mask;
880 }
797 } 881 }
798 882
799 if (cpu_is_omap24xx()) { 883 if (cpu_is_omap24xx()) {
@@ -803,13 +887,14 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
803 _enable_gpio_irqbank(bank, isr_saved & level_mask, 1); 887 _enable_gpio_irqbank(bank, isr_saved & level_mask, 1);
804 } 888 }
805 889
806 /* if bank has any level sensitive GPIO pin interrupt
807 configured, we must unmask the bank interrupt only after
808 handler(s) are executed in order to avoid spurious bank
809 interrupt */
810 if (level_mask)
811 desc->chip->unmask(irq);
812 } 890 }
891 /* if bank has any level sensitive GPIO pin interrupt
892 configured, we must unmask the bank interrupt only after
893 handler(s) are executed in order to avoid spurious bank
894 interrupt */
895 if (!unmasked)
896 desc->chip->unmask(irq);
897
813} 898}
814 899
815static void gpio_ack_irq(unsigned int irq) 900static void gpio_ack_irq(unsigned int irq)
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 72ce52ce815b..e75718301b0f 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -157,14 +157,12 @@ static struct map_desc omap_sram_io_desc[] __initdata = {
157 { /* .length gets filled in at runtime */ 157 { /* .length gets filled in at runtime */
158 .virtual = OMAP1_SRAM_VA, 158 .virtual = OMAP1_SRAM_VA,
159 .pfn = __phys_to_pfn(OMAP1_SRAM_PA), 159 .pfn = __phys_to_pfn(OMAP1_SRAM_PA),
160 .type = MT_DEVICE 160 .type = MT_MEMORY
161 } 161 }
162}; 162};
163 163
164/* 164/*
165 * In order to use last 2kB of SRAM on 1611b, we must round the size 165 * Note that we cannot use ioremap for SRAM, as clock init needs SRAM early.
166 * up to multiple of PAGE_SIZE. We cannot use ioremap for SRAM, as
167 * clock init needs SRAM early.
168 */ 166 */
169void __init omap_map_sram(void) 167void __init omap_map_sram(void)
170{ 168{
@@ -184,8 +182,7 @@ void __init omap_map_sram(void)
184 omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 182 omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
185 } 183 }
186 184
187 omap_sram_io_desc[0].length = (omap_sram_size + PAGE_SIZE-1)/PAGE_SIZE; 185 omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */
188 omap_sram_io_desc[0].length *= PAGE_SIZE;
189 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); 186 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
190 187
191 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n", 188 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
diff --git a/arch/arm/plat-omap/timer32k.c b/arch/arm/plat-omap/timer32k.c
index 053c18132ef4..ddf4360dea72 100644
--- a/arch/arm/plat-omap/timer32k.c
+++ b/arch/arm/plat-omap/timer32k.c
@@ -7,6 +7,7 @@
7 * Partial timer rewrite and additional dynamic tick timer support by 7 * Partial timer rewrite and additional dynamic tick timer support by
8 * Tony Lindgen <tony@atomide.com> and 8 * Tony Lindgen <tony@atomide.com> and
9 * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> 9 * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10 * OMAP Dual-mode timer framework support by Timo Teras
10 * 11 *
11 * MPU timer code based on the older MPU timer code for OMAP 12 * MPU timer code based on the older MPU timer code for OMAP
12 * Copyright (C) 2000 RidgeRun, Inc. 13 * Copyright (C) 2000 RidgeRun, Inc.
@@ -49,6 +50,7 @@
49#include <asm/irq.h> 50#include <asm/irq.h>
50#include <asm/mach/irq.h> 51#include <asm/mach/irq.h>
51#include <asm/mach/time.h> 52#include <asm/mach/time.h>
53#include <asm/arch/dmtimer.h>
52 54
53struct sys_timer omap_timer; 55struct sys_timer omap_timer;
54 56
@@ -78,18 +80,6 @@ struct sys_timer omap_timer;
78#define OMAP1_32K_TIMER_TVR 0x00 80#define OMAP1_32K_TIMER_TVR 0x00
79#define OMAP1_32K_TIMER_TCR 0x04 81#define OMAP1_32K_TIMER_TCR 0x04
80 82
81/* 24xx specific defines */
82#define OMAP2_GP_TIMER_BASE 0x48028000
83#define CM_CLKSEL_WKUP 0x48008440
84#define GP_TIMER_TIDR 0x00
85#define GP_TIMER_TISR 0x18
86#define GP_TIMER_TIER 0x1c
87#define GP_TIMER_TCLR 0x24
88#define GP_TIMER_TCRR 0x28
89#define GP_TIMER_TLDR 0x2c
90#define GP_TIMER_TTGR 0x30
91#define GP_TIMER_TSICR 0x40
92
93#define OMAP_32K_TICKS_PER_HZ (32768 / HZ) 83#define OMAP_32K_TICKS_PER_HZ (32768 / HZ)
94 84
95/* 85/*
@@ -101,54 +91,62 @@ struct sys_timer omap_timer;
101#define JIFFIES_TO_HW_TICKS(nr_jiffies, clock_rate) \ 91#define JIFFIES_TO_HW_TICKS(nr_jiffies, clock_rate) \
102 (((nr_jiffies) * (clock_rate)) / HZ) 92 (((nr_jiffies) * (clock_rate)) / HZ)
103 93
94#if defined(CONFIG_ARCH_OMAP1)
95
104static inline void omap_32k_timer_write(int val, int reg) 96static inline void omap_32k_timer_write(int val, int reg)
105{ 97{
106 if (cpu_class_is_omap1()) 98 omap_writew(val, OMAP1_32K_TIMER_BASE + reg);
107 omap_writew(val, OMAP1_32K_TIMER_BASE + reg);
108
109 if (cpu_is_omap24xx())
110 omap_writel(val, OMAP2_GP_TIMER_BASE + reg);
111} 99}
112 100
113static inline unsigned long omap_32k_timer_read(int reg) 101static inline unsigned long omap_32k_timer_read(int reg)
114{ 102{
115 if (cpu_class_is_omap1()) 103 return omap_readl(OMAP1_32K_TIMER_BASE + reg) & 0xffffff;
116 return omap_readl(OMAP1_32K_TIMER_BASE + reg) & 0xffffff; 104}
117 105
118 if (cpu_is_omap24xx()) 106static inline void omap_32k_timer_start(unsigned long load_val)
119 return omap_readl(OMAP2_GP_TIMER_BASE + reg); 107{
108 omap_32k_timer_write(load_val, OMAP1_32K_TIMER_TVR);
109 omap_32k_timer_write(0x0f, OMAP1_32K_TIMER_CR);
120} 110}
121 111
122/* 112static inline void omap_32k_timer_stop(void)
123 * The 32KHz synchronized timer is an additional timer on 16xx.
124 * It is always running.
125 */
126static inline unsigned long omap_32k_sync_timer_read(void)
127{ 113{
128 return omap_readl(TIMER_32K_SYNCHRONIZED); 114 omap_32k_timer_write(0x0, OMAP1_32K_TIMER_CR);
129} 115}
130 116
117#define omap_32k_timer_ack_irq()
118
119#elif defined(CONFIG_ARCH_OMAP2)
120
121static struct omap_dm_timer *gptimer;
122
131static inline void omap_32k_timer_start(unsigned long load_val) 123static inline void omap_32k_timer_start(unsigned long load_val)
132{ 124{
133 if (cpu_class_is_omap1()) { 125 omap_dm_timer_set_load(gptimer, 1, 0xffffffff - load_val);
134 omap_32k_timer_write(load_val, OMAP1_32K_TIMER_TVR); 126 omap_dm_timer_set_int_enable(gptimer, OMAP_TIMER_INT_OVERFLOW);
135 omap_32k_timer_write(0x0f, OMAP1_32K_TIMER_CR); 127 omap_dm_timer_start(gptimer);
136 }
137
138 if (cpu_is_omap24xx()) {
139 omap_32k_timer_write(0xffffffff - load_val, GP_TIMER_TCRR);
140 omap_32k_timer_write((1 << 1), GP_TIMER_TIER);
141 omap_32k_timer_write((1 << 1) | 1, GP_TIMER_TCLR);
142 }
143} 128}
144 129
145static inline void omap_32k_timer_stop(void) 130static inline void omap_32k_timer_stop(void)
146{ 131{
147 if (cpu_class_is_omap1()) 132 omap_dm_timer_stop(gptimer);
148 omap_32k_timer_write(0x0, OMAP1_32K_TIMER_CR); 133}
149 134
150 if (cpu_is_omap24xx()) 135static inline void omap_32k_timer_ack_irq(void)
151 omap_32k_timer_write(0x0, GP_TIMER_TCLR); 136{
137 u32 status = omap_dm_timer_read_status(gptimer);
138 omap_dm_timer_write_status(gptimer, status);
139}
140
141#endif
142
143/*
144 * The 32KHz synchronized timer is an additional timer on 16xx.
145 * It is always running.
146 */
147static inline unsigned long omap_32k_sync_timer_read(void)
148{
149 return omap_readl(TIMER_32K_SYNCHRONIZED);
152} 150}
153 151
154/* 152/*
@@ -202,11 +200,7 @@ static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id,
202 200
203 write_seqlock_irqsave(&xtime_lock, flags); 201 write_seqlock_irqsave(&xtime_lock, flags);
204 202
205 if (cpu_is_omap24xx()) { 203 omap_32k_timer_ack_irq();
206 u32 status = omap_32k_timer_read(GP_TIMER_TISR);
207 omap_32k_timer_write(status, GP_TIMER_TISR);
208 }
209
210 now = omap_32k_sync_timer_read(); 204 now = omap_32k_sync_timer_read();
211 205
212 while ((signed long)(now - omap_32k_last_tick) 206 while ((signed long)(now - omap_32k_last_tick)
@@ -268,9 +262,6 @@ static struct irqaction omap_32k_timer_irq = {
268 .handler = omap_32k_timer_interrupt, 262 .handler = omap_32k_timer_interrupt,
269}; 263};
270 264
271static struct clk * gpt1_ick;
272static struct clk * gpt1_fck;
273
274static __init void omap_init_32k_timer(void) 265static __init void omap_init_32k_timer(void)
275{ 266{
276#ifdef CONFIG_NO_IDLE_HZ 267#ifdef CONFIG_NO_IDLE_HZ
@@ -279,32 +270,22 @@ static __init void omap_init_32k_timer(void)
279 270
280 if (cpu_class_is_omap1()) 271 if (cpu_class_is_omap1())
281 setup_irq(INT_OS_TIMER, &omap_32k_timer_irq); 272 setup_irq(INT_OS_TIMER, &omap_32k_timer_irq);
282 if (cpu_is_omap24xx())
283 setup_irq(37, &omap_32k_timer_irq);
284 omap_timer.offset = omap_32k_timer_gettimeoffset; 273 omap_timer.offset = omap_32k_timer_gettimeoffset;
285 omap_32k_last_tick = omap_32k_sync_timer_read(); 274 omap_32k_last_tick = omap_32k_sync_timer_read();
286 275
276#ifdef CONFIG_ARCH_OMAP2
287 /* REVISIT: Check 24xx TIOCP_CFG settings after idle works */ 277 /* REVISIT: Check 24xx TIOCP_CFG settings after idle works */
288 if (cpu_is_omap24xx()) { 278 if (cpu_is_omap24xx()) {
289 omap_32k_timer_write(0, GP_TIMER_TCLR); 279 gptimer = omap_dm_timer_request_specific(1);
290 omap_writel(0, CM_CLKSEL_WKUP); /* 32KHz clock source */ 280 BUG_ON(gptimer == NULL);
291 281
292 gpt1_ick = clk_get(NULL, "gpt1_ick"); 282 omap_dm_timer_set_source(gptimer, OMAP_TIMER_SRC_32_KHZ);
293 if (IS_ERR(gpt1_ick)) 283 setup_irq(omap_dm_timer_get_irq(gptimer), &omap_32k_timer_irq);
294 printk(KERN_ERR "Could not get gpt1_ick\n"); 284 omap_dm_timer_set_int_enable(gptimer,
295 else 285 OMAP_TIMER_INT_CAPTURE | OMAP_TIMER_INT_OVERFLOW |
296 clk_enable(gpt1_ick); 286 OMAP_TIMER_INT_MATCH);
297
298 gpt1_fck = clk_get(NULL, "gpt1_fck");
299 if (IS_ERR(gpt1_fck))
300 printk(KERN_ERR "Could not get gpt1_fck\n");
301 else
302 clk_enable(gpt1_fck);
303
304 mdelay(100); /* Wait for clocks to stabilize */
305
306 omap_32k_timer_write(0x7, GP_TIMER_TISR);
307 } 287 }
288#endif
308 289
309 omap_32k_timer_start(OMAP_32K_TIMER_TICK_PERIOD); 290 omap_32k_timer_start(OMAP_32K_TIMER_TICK_PERIOD);
310} 291}
@@ -316,6 +297,9 @@ static __init void omap_init_32k_timer(void)
316 */ 297 */
317static void __init omap_timer_init(void) 298static void __init omap_timer_init(void)
318{ 299{
300#ifdef CONFIG_OMAP_DM_TIMER
301 omap_dm_timer_init();
302#endif
319 omap_init_32k_timer(); 303 omap_init_32k_timer();
320} 304}
321 305