aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-exynos/common.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-01-05 08:25:15 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-01-05 08:25:27 -0500
commit7b9dd47136c07ffd883aff6926c7b281e4c1eea4 (patch)
treeb835312e76fe323de3e1cbbb0d15fca5a3f7ef9c /arch/arm/mach-exynos/common.c
parent2e0e943436912ffe0848ece58167edfe754edb96 (diff)
parent0575fb754dbfc32a01f297e778533340a533ec68 (diff)
Merge branch 'restart' into for-linus
Conflicts: arch/arm/mach-exynos/cpu.c The changes to arch/arm/mach-exynos/cpu.c were moved to mach-exynos/common.c.
Diffstat (limited to 'arch/arm/mach-exynos/common.c')
-rw-r--r--arch/arm/mach-exynos/common.c713
1 files changed, 713 insertions, 0 deletions
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
new file mode 100644
index 000000000000..d2acb0f948c6
--- /dev/null
+++ b/arch/arm/mach-exynos/common.c
@@ -0,0 +1,713 @@
1/*
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Common Codes for EXYNOS
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/io.h>
16#include <linux/sysdev.h>
17#include <linux/gpio.h>
18#include <linux/sched.h>
19#include <linux/serial_core.h>
20
21#include <asm/proc-fns.h>
22#include <asm/hardware/cache-l2x0.h>
23#include <asm/hardware/gic.h>
24#include <asm/mach/map.h>
25#include <asm/mach/irq.h>
26
27#include <mach/regs-irq.h>
28#include <mach/regs-pmu.h>
29#include <mach/regs-gpio.h>
30
31#include <plat/cpu.h>
32#include <plat/clock.h>
33#include <plat/devs.h>
34#include <plat/pm.h>
35#include <plat/sdhci.h>
36#include <plat/gpio-cfg.h>
37#include <plat/adc-core.h>
38#include <plat/fb-core.h>
39#include <plat/fimc-core.h>
40#include <plat/iic-core.h>
41#include <plat/tv-core.h>
42#include <plat/regs-serial.h>
43
44#include "common.h"
45
46unsigned int gic_bank_offset __read_mostly;
47
48static const char name_exynos4210[] = "EXYNOS4210";
49static const char name_exynos4212[] = "EXYNOS4212";
50static const char name_exynos4412[] = "EXYNOS4412";
51
52static struct cpu_table cpu_ids[] __initdata = {
53 {
54 .idcode = EXYNOS4210_CPU_ID,
55 .idmask = EXYNOS4_CPU_MASK,
56 .map_io = exynos4_map_io,
57 .init_clocks = exynos4_init_clocks,
58 .init_uarts = exynos4_init_uarts,
59 .init = exynos_init,
60 .name = name_exynos4210,
61 }, {
62 .idcode = EXYNOS4212_CPU_ID,
63 .idmask = EXYNOS4_CPU_MASK,
64 .map_io = exynos4_map_io,
65 .init_clocks = exynos4_init_clocks,
66 .init_uarts = exynos4_init_uarts,
67 .init = exynos_init,
68 .name = name_exynos4212,
69 }, {
70 .idcode = EXYNOS4412_CPU_ID,
71 .idmask = EXYNOS4_CPU_MASK,
72 .map_io = exynos4_map_io,
73 .init_clocks = exynos4_init_clocks,
74 .init_uarts = exynos4_init_uarts,
75 .init = exynos_init,
76 .name = name_exynos4412,
77 },
78};
79
80/* Initial IO mappings */
81
82static struct map_desc exynos_iodesc[] __initdata = {
83 {
84 .virtual = (unsigned long)S5P_VA_CHIPID,
85 .pfn = __phys_to_pfn(EXYNOS4_PA_CHIPID),
86 .length = SZ_4K,
87 .type = MT_DEVICE,
88 }, {
89 .virtual = (unsigned long)S3C_VA_SYS,
90 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSCON),
91 .length = SZ_64K,
92 .type = MT_DEVICE,
93 }, {
94 .virtual = (unsigned long)S3C_VA_TIMER,
95 .pfn = __phys_to_pfn(EXYNOS4_PA_TIMER),
96 .length = SZ_16K,
97 .type = MT_DEVICE,
98 }, {
99 .virtual = (unsigned long)S3C_VA_WATCHDOG,
100 .pfn = __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
101 .length = SZ_4K,
102 .type = MT_DEVICE,
103 }, {
104 .virtual = (unsigned long)S5P_VA_SROMC,
105 .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
106 .length = SZ_4K,
107 .type = MT_DEVICE,
108 }, {
109 .virtual = (unsigned long)S5P_VA_SYSTIMER,
110 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
111 .length = SZ_4K,
112 .type = MT_DEVICE,
113 }, {
114 .virtual = (unsigned long)S5P_VA_PMU,
115 .pfn = __phys_to_pfn(EXYNOS4_PA_PMU),
116 .length = SZ_64K,
117 .type = MT_DEVICE,
118 }, {
119 .virtual = (unsigned long)S5P_VA_COMBINER_BASE,
120 .pfn = __phys_to_pfn(EXYNOS4_PA_COMBINER),
121 .length = SZ_4K,
122 .type = MT_DEVICE,
123 }, {
124 .virtual = (unsigned long)S5P_VA_GIC_CPU,
125 .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
126 .length = SZ_64K,
127 .type = MT_DEVICE,
128 }, {
129 .virtual = (unsigned long)S5P_VA_GIC_DIST,
130 .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
131 .length = SZ_64K,
132 .type = MT_DEVICE,
133 }, {
134 .virtual = (unsigned long)S3C_VA_UART,
135 .pfn = __phys_to_pfn(EXYNOS4_PA_UART),
136 .length = SZ_512K,
137 .type = MT_DEVICE,
138 },
139};
140
141static struct map_desc exynos4_iodesc[] __initdata = {
142 {
143 .virtual = (unsigned long)S5P_VA_CMU,
144 .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
145 .length = SZ_128K,
146 .type = MT_DEVICE,
147 }, {
148 .virtual = (unsigned long)S5P_VA_COREPERI_BASE,
149 .pfn = __phys_to_pfn(EXYNOS4_PA_COREPERI),
150 .length = SZ_8K,
151 .type = MT_DEVICE,
152 }, {
153 .virtual = (unsigned long)S5P_VA_L2CC,
154 .pfn = __phys_to_pfn(EXYNOS4_PA_L2CC),
155 .length = SZ_4K,
156 .type = MT_DEVICE,
157 }, {
158 .virtual = (unsigned long)S5P_VA_GPIO1,
159 .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO1),
160 .length = SZ_4K,
161 .type = MT_DEVICE,
162 }, {
163 .virtual = (unsigned long)S5P_VA_GPIO2,
164 .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO2),
165 .length = SZ_4K,
166 .type = MT_DEVICE,
167 }, {
168 .virtual = (unsigned long)S5P_VA_GPIO3,
169 .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO3),
170 .length = SZ_256,
171 .type = MT_DEVICE,
172 }, {
173 .virtual = (unsigned long)S5P_VA_DMC0,
174 .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0),
175 .length = SZ_4K,
176 .type = MT_DEVICE,
177 }, {
178 .virtual = (unsigned long)S3C_VA_USB_HSPHY,
179 .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY),
180 .length = SZ_4K,
181 .type = MT_DEVICE,
182 },
183};
184
185static struct map_desc exynos4_iodesc0[] __initdata = {
186 {
187 .virtual = (unsigned long)S5P_VA_SYSRAM,
188 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
189 .length = SZ_4K,
190 .type = MT_DEVICE,
191 },
192};
193
194static struct map_desc exynos4_iodesc1[] __initdata = {
195 {
196 .virtual = (unsigned long)S5P_VA_SYSRAM,
197 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
198 .length = SZ_4K,
199 .type = MT_DEVICE,
200 },
201};
202
203static void exynos_idle(void)
204{
205 if (!need_resched())
206 cpu_do_idle();
207
208 local_irq_enable();
209}
210
211void exynos4_restart(char mode, const char *cmd)
212{
213 __raw_writel(0x1, S5P_SWRESET);
214}
215
216/*
217 * exynos_map_io
218 *
219 * register the standard cpu IO areas
220 */
221
222void __init exynos_init_io(struct map_desc *mach_desc, int size)
223{
224 /* initialize the io descriptors we need for initialization */
225 iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
226 if (mach_desc)
227 iotable_init(mach_desc, size);
228
229 /* detect cpu id and rev. */
230 s5p_init_cpu(S5P_VA_CHIPID);
231
232 s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
233}
234
235void __init exynos4_map_io(void)
236{
237 iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
238
239 if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
240 iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
241 else
242 iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
243
244 /* initialize device information early */
245 exynos4_default_sdhci0();
246 exynos4_default_sdhci1();
247 exynos4_default_sdhci2();
248 exynos4_default_sdhci3();
249
250 s3c_adc_setname("samsung-adc-v3");
251
252 s3c_fimc_setname(0, "exynos4-fimc");
253 s3c_fimc_setname(1, "exynos4-fimc");
254 s3c_fimc_setname(2, "exynos4-fimc");
255 s3c_fimc_setname(3, "exynos4-fimc");
256
257 /* The I2C bus controllers are directly compatible with s3c2440 */
258 s3c_i2c0_setname("s3c2440-i2c");
259 s3c_i2c1_setname("s3c2440-i2c");
260 s3c_i2c2_setname("s3c2440-i2c");
261
262 s5p_fb_setname(0, "exynos4-fb");
263 s5p_hdmi_setname("exynos4-hdmi");
264}
265
266void __init exynos4_init_clocks(int xtal)
267{
268 printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
269
270 s3c24xx_register_baseclocks(xtal);
271 s5p_register_clocks(xtal);
272
273 if (soc_is_exynos4210())
274 exynos4210_register_clocks();
275 else if (soc_is_exynos4212() || soc_is_exynos4412())
276 exynos4212_register_clocks();
277
278 exynos4_register_clocks();
279 exynos4_setup_clocks();
280}
281
282#define COMBINER_ENABLE_SET 0x0
283#define COMBINER_ENABLE_CLEAR 0x4
284#define COMBINER_INT_STATUS 0xC
285
286static DEFINE_SPINLOCK(irq_controller_lock);
287
288struct combiner_chip_data {
289 unsigned int irq_offset;
290 unsigned int irq_mask;
291 void __iomem *base;
292};
293
294static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
295
296static inline void __iomem *combiner_base(struct irq_data *data)
297{
298 struct combiner_chip_data *combiner_data =
299 irq_data_get_irq_chip_data(data);
300
301 return combiner_data->base;
302}
303
304static void combiner_mask_irq(struct irq_data *data)
305{
306 u32 mask = 1 << (data->irq % 32);
307
308 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
309}
310
311static void combiner_unmask_irq(struct irq_data *data)
312{
313 u32 mask = 1 << (data->irq % 32);
314
315 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
316}
317
318static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
319{
320 struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
321 struct irq_chip *chip = irq_get_chip(irq);
322 unsigned int cascade_irq, combiner_irq;
323 unsigned long status;
324
325 chained_irq_enter(chip, desc);
326
327 spin_lock(&irq_controller_lock);
328 status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
329 spin_unlock(&irq_controller_lock);
330 status &= chip_data->irq_mask;
331
332 if (status == 0)
333 goto out;
334
335 combiner_irq = __ffs(status);
336
337 cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
338 if (unlikely(cascade_irq >= NR_IRQS))
339 do_bad_IRQ(cascade_irq, desc);
340 else
341 generic_handle_irq(cascade_irq);
342
343 out:
344 chained_irq_exit(chip, desc);
345}
346
347static struct irq_chip combiner_chip = {
348 .name = "COMBINER",
349 .irq_mask = combiner_mask_irq,
350 .irq_unmask = combiner_unmask_irq,
351};
352
353static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
354{
355 if (combiner_nr >= MAX_COMBINER_NR)
356 BUG();
357 if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
358 BUG();
359 irq_set_chained_handler(irq, combiner_handle_cascade_irq);
360}
361
362static void __init combiner_init(unsigned int combiner_nr, void __iomem *base,
363 unsigned int irq_start)
364{
365 unsigned int i;
366
367 if (combiner_nr >= MAX_COMBINER_NR)
368 BUG();
369
370 combiner_data[combiner_nr].base = base;
371 combiner_data[combiner_nr].irq_offset = irq_start;
372 combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
373
374 /* Disable all interrupts */
375
376 __raw_writel(combiner_data[combiner_nr].irq_mask,
377 base + COMBINER_ENABLE_CLEAR);
378
379 /* Setup the Linux IRQ subsystem */
380
381 for (i = irq_start; i < combiner_data[combiner_nr].irq_offset
382 + MAX_IRQ_IN_COMBINER; i++) {
383 irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq);
384 irq_set_chip_data(i, &combiner_data[combiner_nr]);
385 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
386 }
387}
388
389static void exynos4_gic_irq_fix_base(struct irq_data *d)
390{
391 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
392
393 gic_data->cpu_base = S5P_VA_GIC_CPU +
394 (gic_bank_offset * smp_processor_id());
395
396 gic_data->dist_base = S5P_VA_GIC_DIST +
397 (gic_bank_offset * smp_processor_id());
398}
399
400void __init exynos4_init_irq(void)
401{
402 int irq;
403
404 gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
405
406 gic_init(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU);
407 gic_arch_extn.irq_eoi = exynos4_gic_irq_fix_base;
408 gic_arch_extn.irq_unmask = exynos4_gic_irq_fix_base;
409 gic_arch_extn.irq_mask = exynos4_gic_irq_fix_base;
410
411 for (irq = 0; irq < MAX_COMBINER_NR; irq++) {
412
413 combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
414 COMBINER_IRQ(irq, 0));
415 combiner_cascade_irq(irq, IRQ_SPI(irq));
416 }
417
418 /*
419 * The parameters of s5p_init_irq() are for VIC init.
420 * Theses parameters should be NULL and 0 because EXYNOS4
421 * uses GIC instead of VIC.
422 */
423 s5p_init_irq(NULL, 0);
424}
425
426struct sysdev_class exynos4_sysclass = {
427 .name = "exynos4-core",
428};
429
430static struct sys_device exynos4_sysdev = {
431 .cls = &exynos4_sysclass,
432};
433
434static int __init exynos4_core_init(void)
435{
436 return sysdev_class_register(&exynos4_sysclass);
437}
438core_initcall(exynos4_core_init);
439
440#ifdef CONFIG_CACHE_L2X0
441static int __init exynos4_l2x0_cache_init(void)
442{
443 /* TAG, Data Latency Control: 2cycle */
444 __raw_writel(0x110, S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
445
446 if (soc_is_exynos4210())
447 __raw_writel(0x110, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
448 else if (soc_is_exynos4212() || soc_is_exynos4412())
449 __raw_writel(0x120, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
450
451 /* L2X0 Prefetch Control */
452 __raw_writel(0x30000007, S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
453
454 /* L2X0 Power Control */
455 __raw_writel(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN,
456 S5P_VA_L2CC + L2X0_POWER_CTRL);
457
458 l2x0_init(S5P_VA_L2CC, 0x7C470001, 0xC200ffff);
459
460 return 0;
461}
462
463early_initcall(exynos4_l2x0_cache_init);
464#endif
465
466int __init exynos_init(void)
467{
468 printk(KERN_INFO "EXYNOS: Initializing architecture\n");
469
470 /* set idle function */
471 pm_idle = exynos_idle;
472
473 return sysdev_register(&exynos4_sysdev);
474}
475
476static struct s3c24xx_uart_clksrc exynos4_serial_clocks[] = {
477 [0] = {
478 .name = "uclk1",
479 .divisor = 1,
480 .min_baud = 0,
481 .max_baud = 0,
482 },
483};
484
485/* uart registration process */
486
487void __init exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no)
488{
489 struct s3c2410_uartcfg *tcfg = cfg;
490 u32 ucnt;
491
492 for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
493 if (!tcfg->clocks) {
494 tcfg->has_fracval = 1;
495 tcfg->clocks = exynos4_serial_clocks;
496 tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks);
497 }
498 tcfg->flags |= NO_NEED_CHECK_CLKSRC;
499 }
500
501 s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
502}
503
504static DEFINE_SPINLOCK(eint_lock);
505
506static unsigned int eint0_15_data[16];
507
508static unsigned int exynos4_get_irq_nr(unsigned int number)
509{
510 u32 ret = 0;
511
512 switch (number) {
513 case 0 ... 3:
514 ret = (number + IRQ_EINT0);
515 break;
516 case 4 ... 7:
517 ret = (number + (IRQ_EINT4 - 4));
518 break;
519 case 8 ... 15:
520 ret = (number + (IRQ_EINT8 - 8));
521 break;
522 default:
523 printk(KERN_ERR "number available : %d\n", number);
524 }
525
526 return ret;
527}
528
529static inline void exynos4_irq_eint_mask(struct irq_data *data)
530{
531 u32 mask;
532
533 spin_lock(&eint_lock);
534 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
535 mask |= eint_irq_to_bit(data->irq);
536 __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
537 spin_unlock(&eint_lock);
538}
539
540static void exynos4_irq_eint_unmask(struct irq_data *data)
541{
542 u32 mask;
543
544 spin_lock(&eint_lock);
545 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
546 mask &= ~(eint_irq_to_bit(data->irq));
547 __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
548 spin_unlock(&eint_lock);
549}
550
551static inline void exynos4_irq_eint_ack(struct irq_data *data)
552{
553 __raw_writel(eint_irq_to_bit(data->irq),
554 S5P_EINT_PEND(EINT_REG_NR(data->irq)));
555}
556
557static void exynos4_irq_eint_maskack(struct irq_data *data)
558{
559 exynos4_irq_eint_mask(data);
560 exynos4_irq_eint_ack(data);
561}
562
563static int exynos4_irq_eint_set_type(struct irq_data *data, unsigned int type)
564{
565 int offs = EINT_OFFSET(data->irq);
566 int shift;
567 u32 ctrl, mask;
568 u32 newvalue = 0;
569
570 switch (type) {
571 case IRQ_TYPE_EDGE_RISING:
572 newvalue = S5P_IRQ_TYPE_EDGE_RISING;
573 break;
574
575 case IRQ_TYPE_EDGE_FALLING:
576 newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
577 break;
578
579 case IRQ_TYPE_EDGE_BOTH:
580 newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
581 break;
582
583 case IRQ_TYPE_LEVEL_LOW:
584 newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
585 break;
586
587 case IRQ_TYPE_LEVEL_HIGH:
588 newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
589 break;
590
591 default:
592 printk(KERN_ERR "No such irq type %d", type);
593 return -EINVAL;
594 }
595
596 shift = (offs & 0x7) * 4;
597 mask = 0x7 << shift;
598
599 spin_lock(&eint_lock);
600 ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
601 ctrl &= ~mask;
602 ctrl |= newvalue << shift;
603 __raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
604 spin_unlock(&eint_lock);
605
606 switch (offs) {
607 case 0 ... 7:
608 s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
609 break;
610 case 8 ... 15:
611 s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
612 break;
613 case 16 ... 23:
614 s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
615 break;
616 case 24 ... 31:
617 s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
618 break;
619 default:
620 printk(KERN_ERR "No such irq number %d", offs);
621 }
622
623 return 0;
624}
625
626static struct irq_chip exynos4_irq_eint = {
627 .name = "exynos4-eint",
628 .irq_mask = exynos4_irq_eint_mask,
629 .irq_unmask = exynos4_irq_eint_unmask,
630 .irq_mask_ack = exynos4_irq_eint_maskack,
631 .irq_ack = exynos4_irq_eint_ack,
632 .irq_set_type = exynos4_irq_eint_set_type,
633#ifdef CONFIG_PM
634 .irq_set_wake = s3c_irqext_wake,
635#endif
636};
637
638/*
639 * exynos4_irq_demux_eint
640 *
641 * This function demuxes the IRQ from from EINTs 16 to 31.
642 * It is designed to be inlined into the specific handler
643 * s5p_irq_demux_eintX_Y.
644 *
645 * Each EINT pend/mask registers handle eight of them.
646 */
647static inline void exynos4_irq_demux_eint(unsigned int start)
648{
649 unsigned int irq;
650
651 u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
652 u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
653
654 status &= ~mask;
655 status &= 0xff;
656
657 while (status) {
658 irq = fls(status) - 1;
659 generic_handle_irq(irq + start);
660 status &= ~(1 << irq);
661 }
662}
663
664static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
665{
666 struct irq_chip *chip = irq_get_chip(irq);
667 chained_irq_enter(chip, desc);
668 exynos4_irq_demux_eint(IRQ_EINT(16));
669 exynos4_irq_demux_eint(IRQ_EINT(24));
670 chained_irq_exit(chip, desc);
671}
672
673static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
674{
675 u32 *irq_data = irq_get_handler_data(irq);
676 struct irq_chip *chip = irq_get_chip(irq);
677
678 chained_irq_enter(chip, desc);
679 chip->irq_mask(&desc->irq_data);
680
681 if (chip->irq_ack)
682 chip->irq_ack(&desc->irq_data);
683
684 generic_handle_irq(*irq_data);
685
686 chip->irq_unmask(&desc->irq_data);
687 chained_irq_exit(chip, desc);
688}
689
690int __init exynos4_init_irq_eint(void)
691{
692 int irq;
693
694 for (irq = 0 ; irq <= 31 ; irq++) {
695 irq_set_chip_and_handler(IRQ_EINT(irq), &exynos4_irq_eint,
696 handle_level_irq);
697 set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
698 }
699
700 irq_set_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31);
701
702 for (irq = 0 ; irq <= 15 ; irq++) {
703 eint0_15_data[irq] = IRQ_EINT(irq);
704
705 irq_set_handler_data(exynos4_get_irq_nr(irq),
706 &eint0_15_data[irq]);
707 irq_set_chained_handler(exynos4_get_irq_nr(irq),
708 exynos4_irq_eint0_15);
709 }
710
711 return 0;
712}
713arch_initcall(exynos4_init_irq_eint);