diff options
Diffstat (limited to 'drivers')
27 files changed, 2337 insertions, 118 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 7fdcbd3f4da5..7d978c1bd528 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
| @@ -1,3 +1,6 @@ | |||
| 1 | config CLKSRC_OF | ||
| 2 | bool | ||
| 3 | |||
| 1 | config CLKSRC_I8253 | 4 | config CLKSRC_I8253 |
| 2 | bool | 5 | bool |
| 3 | 6 | ||
| @@ -25,6 +28,9 @@ config ARMADA_370_XP_TIMER | |||
| 25 | config SUNXI_TIMER | 28 | config SUNXI_TIMER |
| 26 | bool | 29 | bool |
| 27 | 30 | ||
| 31 | config VT8500_TIMER | ||
| 32 | bool | ||
| 33 | |||
| 28 | config CLKSRC_NOMADIK_MTU | 34 | config CLKSRC_NOMADIK_MTU |
| 29 | bool | 35 | bool |
| 30 | depends on (ARCH_NOMADIK || ARCH_U8500) | 36 | depends on (ARCH_NOMADIK || ARCH_U8500) |
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index f93453d01673..596c45c2f192 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | obj-$(CONFIG_CLKSRC_OF) += clksrc-of.o | ||
| 1 | obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o | 2 | obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o |
| 2 | obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o | 3 | obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o |
| 3 | obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o | 4 | obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o |
| @@ -16,5 +17,7 @@ obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o | |||
| 16 | obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o | 17 | obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o |
| 17 | obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o | 18 | obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o |
| 18 | obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o | 19 | obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o |
| 20 | obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o | ||
| 21 | obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o | ||
| 19 | 22 | ||
| 20 | obj-$(CONFIG_CLKSRC_ARM_GENERIC) += arm_generic.o | 23 | obj-$(CONFIG_CLKSRC_ARM_GENERIC) += arm_generic.o |
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c index bc19f12c20ce..50c68fef944b 100644 --- a/drivers/clocksource/bcm2835_timer.c +++ b/drivers/clocksource/bcm2835_timer.c | |||
| @@ -16,7 +16,6 @@ | |||
| 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #include <linux/bcm2835_timer.h> | ||
| 20 | #include <linux/bitops.h> | 19 | #include <linux/bitops.h> |
| 21 | #include <linux/clockchips.h> | 20 | #include <linux/clockchips.h> |
| 22 | #include <linux/clocksource.h> | 21 | #include <linux/clocksource.h> |
| @@ -101,7 +100,7 @@ static struct of_device_id bcm2835_time_match[] __initconst = { | |||
| 101 | {} | 100 | {} |
| 102 | }; | 101 | }; |
| 103 | 102 | ||
| 104 | static void __init bcm2835_time_init(void) | 103 | static void __init bcm2835_timer_init(void) |
| 105 | { | 104 | { |
| 106 | struct device_node *node; | 105 | struct device_node *node; |
| 107 | void __iomem *base; | 106 | void __iomem *base; |
| @@ -155,7 +154,5 @@ static void __init bcm2835_time_init(void) | |||
| 155 | 154 | ||
| 156 | pr_info("bcm2835: system timer (irq = %d)\n", irq); | 155 | pr_info("bcm2835: system timer (irq = %d)\n", irq); |
| 157 | } | 156 | } |
| 158 | 157 | CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer", | |
| 159 | struct sys_timer bcm2835_timer = { | 158 | bcm2835_timer_init); |
| 160 | .init = bcm2835_time_init, | ||
| 161 | }; | ||
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c new file mode 100644 index 000000000000..bdabdaa8d00f --- /dev/null +++ b/drivers/clocksource/clksrc-of.c | |||
| @@ -0,0 +1,35 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms and conditions of the GNU General Public License, | ||
| 6 | * version 2, as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License | ||
| 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/init.h> | ||
| 18 | #include <linux/of.h> | ||
| 19 | |||
| 20 | extern struct of_device_id __clksrc_of_table[]; | ||
| 21 | |||
| 22 | static const struct of_device_id __clksrc_of_table_sentinel | ||
| 23 | __used __section(__clksrc_of_table_end); | ||
| 24 | |||
| 25 | void __init clocksource_of_init(void) | ||
| 26 | { | ||
| 27 | struct device_node *np; | ||
| 28 | const struct of_device_id *match; | ||
| 29 | void (*init_func)(void); | ||
| 30 | |||
| 31 | for_each_matching_node_and_match(np, __clksrc_of_table, &match) { | ||
| 32 | init_func = match->data; | ||
| 33 | init_func(); | ||
| 34 | } | ||
| 35 | } | ||
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c index d9279385304d..ea210482dd20 100644 --- a/drivers/clocksource/cs5535-clockevt.c +++ b/drivers/clocksource/cs5535-clockevt.c | |||
| @@ -100,7 +100,6 @@ static struct clock_event_device cs5535_clockevent = { | |||
| 100 | .set_mode = mfgpt_set_mode, | 100 | .set_mode = mfgpt_set_mode, |
| 101 | .set_next_event = mfgpt_next_event, | 101 | .set_next_event = mfgpt_next_event, |
| 102 | .rating = 250, | 102 | .rating = 250, |
| 103 | .shift = 32 | ||
| 104 | }; | 103 | }; |
| 105 | 104 | ||
| 106 | static irqreturn_t mfgpt_tick(int irq, void *dev_id) | 105 | static irqreturn_t mfgpt_tick(int irq, void *dev_id) |
| @@ -169,17 +168,11 @@ static int __init cs5535_mfgpt_init(void) | |||
| 169 | cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, val); | 168 | cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, val); |
| 170 | 169 | ||
| 171 | /* Set up the clock event */ | 170 | /* Set up the clock event */ |
| 172 | cs5535_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC, | ||
| 173 | cs5535_clockevent.shift); | ||
| 174 | cs5535_clockevent.min_delta_ns = clockevent_delta2ns(0xF, | ||
| 175 | &cs5535_clockevent); | ||
| 176 | cs5535_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE, | ||
| 177 | &cs5535_clockevent); | ||
| 178 | |||
| 179 | printk(KERN_INFO DRV_NAME | 171 | printk(KERN_INFO DRV_NAME |
| 180 | ": Registering MFGPT timer as a clock event, using IRQ %d\n", | 172 | ": Registering MFGPT timer as a clock event, using IRQ %d\n", |
| 181 | timer_irq); | 173 | timer_irq); |
| 182 | clockevents_register_device(&cs5535_clockevent); | 174 | clockevents_config_and_register(&cs5535_clockevent, MFGPT_HZ, |
| 175 | 0xF, 0xFFFE); | ||
| 183 | 176 | ||
| 184 | return 0; | 177 | return 0; |
| 185 | 178 | ||
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index f7dba5b79b44..ab09ed3742ee 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c | |||
| @@ -107,7 +107,7 @@ static const struct of_device_id osctimer_ids[] __initconst = { | |||
| 107 | {}, | 107 | {}, |
| 108 | }; | 108 | }; |
| 109 | 109 | ||
| 110 | static void __init timer_init(void) | 110 | void __init dw_apb_timer_init(void) |
| 111 | { | 111 | { |
| 112 | struct device_node *event_timer, *source_timer; | 112 | struct device_node *event_timer, *source_timer; |
| 113 | 113 | ||
| @@ -125,7 +125,3 @@ static void __init timer_init(void) | |||
| 125 | 125 | ||
| 126 | init_sched_clock(); | 126 | init_sched_clock(); |
| 127 | } | 127 | } |
| 128 | |||
| 129 | struct sys_timer dw_apb_timer = { | ||
| 130 | .init = timer_init, | ||
| 131 | }; | ||
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c index 8914c3c1c88b..025afc6dd324 100644 --- a/drivers/clocksource/nomadik-mtu.c +++ b/drivers/clocksource/nomadik-mtu.c | |||
| @@ -134,12 +134,32 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode, | |||
| 134 | } | 134 | } |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | void nmdk_clksrc_reset(void) | ||
| 138 | { | ||
| 139 | /* Disable */ | ||
| 140 | writel(0, mtu_base + MTU_CR(0)); | ||
| 141 | |||
| 142 | /* ClockSource: configure load and background-load, and fire it up */ | ||
| 143 | writel(nmdk_cycle, mtu_base + MTU_LR(0)); | ||
| 144 | writel(nmdk_cycle, mtu_base + MTU_BGLR(0)); | ||
| 145 | |||
| 146 | writel(clk_prescale | MTU_CRn_32BITS | MTU_CRn_ENA, | ||
| 147 | mtu_base + MTU_CR(0)); | ||
| 148 | } | ||
| 149 | |||
| 150 | static void nmdk_clkevt_resume(struct clock_event_device *cedev) | ||
| 151 | { | ||
| 152 | nmdk_clkevt_reset(); | ||
| 153 | nmdk_clksrc_reset(); | ||
| 154 | } | ||
| 155 | |||
| 137 | static struct clock_event_device nmdk_clkevt = { | 156 | static struct clock_event_device nmdk_clkevt = { |
| 138 | .name = "mtu_1", | 157 | .name = "mtu_1", |
| 139 | .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, | 158 | .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, |
| 140 | .rating = 200, | 159 | .rating = 200, |
| 141 | .set_mode = nmdk_clkevt_mode, | 160 | .set_mode = nmdk_clkevt_mode, |
| 142 | .set_next_event = nmdk_clkevt_next, | 161 | .set_next_event = nmdk_clkevt_next, |
| 162 | .resume = nmdk_clkevt_resume, | ||
| 143 | }; | 163 | }; |
| 144 | 164 | ||
| 145 | /* | 165 | /* |
| @@ -161,19 +181,6 @@ static struct irqaction nmdk_timer_irq = { | |||
| 161 | .dev_id = &nmdk_clkevt, | 181 | .dev_id = &nmdk_clkevt, |
| 162 | }; | 182 | }; |
| 163 | 183 | ||
| 164 | void nmdk_clksrc_reset(void) | ||
| 165 | { | ||
| 166 | /* Disable */ | ||
| 167 | writel(0, mtu_base + MTU_CR(0)); | ||
| 168 | |||
| 169 | /* ClockSource: configure load and background-load, and fire it up */ | ||
| 170 | writel(nmdk_cycle, mtu_base + MTU_LR(0)); | ||
| 171 | writel(nmdk_cycle, mtu_base + MTU_BGLR(0)); | ||
| 172 | |||
| 173 | writel(clk_prescale | MTU_CRn_32BITS | MTU_CRn_ENA, | ||
| 174 | mtu_base + MTU_CR(0)); | ||
| 175 | } | ||
| 176 | |||
| 177 | void __init nmdk_timer_init(void __iomem *base, int irq) | 184 | void __init nmdk_timer_init(void __iomem *base, int irq) |
| 178 | { | 185 | { |
| 179 | unsigned long rate; | 186 | unsigned long rate; |
diff --git a/drivers/clocksource/sunxi_timer.c b/drivers/clocksource/sunxi_timer.c index 93d09d0e009f..4086b9167159 100644 --- a/drivers/clocksource/sunxi_timer.c +++ b/drivers/clocksource/sunxi_timer.c | |||
| @@ -74,7 +74,6 @@ static int sunxi_clkevt_next_event(unsigned long evt, | |||
| 74 | 74 | ||
| 75 | static struct clock_event_device sunxi_clockevent = { | 75 | static struct clock_event_device sunxi_clockevent = { |
| 76 | .name = "sunxi_tick", | 76 | .name = "sunxi_tick", |
| 77 | .shift = 32, | ||
| 78 | .rating = 300, | 77 | .rating = 300, |
| 79 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 78 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
| 80 | .set_mode = sunxi_clkevt_mode, | 79 | .set_mode = sunxi_clkevt_mode, |
| @@ -104,7 +103,7 @@ static struct of_device_id sunxi_timer_dt_ids[] = { | |||
| 104 | { } | 103 | { } |
| 105 | }; | 104 | }; |
| 106 | 105 | ||
| 107 | static void __init sunxi_timer_init(void) | 106 | void __init sunxi_timer_init(void) |
| 108 | { | 107 | { |
| 109 | struct device_node *node; | 108 | struct device_node *node; |
| 110 | unsigned long rate = 0; | 109 | unsigned long rate = 0; |
| @@ -154,18 +153,8 @@ static void __init sunxi_timer_init(void) | |||
| 154 | val = readl(timer_base + TIMER_CTL_REG); | 153 | val = readl(timer_base + TIMER_CTL_REG); |
| 155 | writel(val | TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG); | 154 | writel(val | TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG); |
| 156 | 155 | ||
| 157 | sunxi_clockevent.mult = div_sc(rate / TIMER_SCAL, | ||
| 158 | NSEC_PER_SEC, | ||
| 159 | sunxi_clockevent.shift); | ||
| 160 | sunxi_clockevent.max_delta_ns = clockevent_delta2ns(0xff, | ||
| 161 | &sunxi_clockevent); | ||
| 162 | sunxi_clockevent.min_delta_ns = clockevent_delta2ns(0x1, | ||
| 163 | &sunxi_clockevent); | ||
| 164 | sunxi_clockevent.cpumask = cpumask_of(0); | 156 | sunxi_clockevent.cpumask = cpumask_of(0); |
| 165 | 157 | ||
| 166 | clockevents_register_device(&sunxi_clockevent); | 158 | clockevents_config_and_register(&sunxi_clockevent, rate / TIMER_SCAL, |
| 159 | 0x1, 0xff); | ||
| 167 | } | 160 | } |
| 168 | |||
| 169 | struct sys_timer sunxi_timer = { | ||
| 170 | .init = sunxi_timer_init, | ||
| 171 | }; | ||
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 32cb929b8eb6..8a6187225dd0 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c | |||
| @@ -157,7 +157,6 @@ static struct tc_clkevt_device clkevt = { | |||
| 157 | .name = "tc_clkevt", | 157 | .name = "tc_clkevt", |
| 158 | .features = CLOCK_EVT_FEAT_PERIODIC | 158 | .features = CLOCK_EVT_FEAT_PERIODIC |
| 159 | | CLOCK_EVT_FEAT_ONESHOT, | 159 | | CLOCK_EVT_FEAT_ONESHOT, |
| 160 | .shift = 32, | ||
| 161 | /* Should be lower than at91rm9200's system timer */ | 160 | /* Should be lower than at91rm9200's system timer */ |
| 162 | .rating = 125, | 161 | .rating = 125, |
| 163 | .set_next_event = tc_next_event, | 162 | .set_next_event = tc_next_event, |
| @@ -196,13 +195,9 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) | |||
| 196 | 195 | ||
| 197 | timer_clock = clk32k_divisor_idx; | 196 | timer_clock = clk32k_divisor_idx; |
| 198 | 197 | ||
| 199 | clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift); | ||
| 200 | clkevt.clkevt.max_delta_ns | ||
| 201 | = clockevent_delta2ns(0xffff, &clkevt.clkevt); | ||
| 202 | clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1; | ||
| 203 | clkevt.clkevt.cpumask = cpumask_of(0); | 198 | clkevt.clkevt.cpumask = cpumask_of(0); |
| 204 | 199 | ||
| 205 | clockevents_register_device(&clkevt.clkevt); | 200 | clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); |
| 206 | 201 | ||
| 207 | setup_irq(irq, &tc_irqaction); | 202 | setup_irq(irq, &tc_irqaction); |
| 208 | } | 203 | } |
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c new file mode 100644 index 000000000000..0bde03feb095 --- /dev/null +++ b/drivers/clocksource/tegra20_timer.c | |||
| @@ -0,0 +1,281 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2010 Google, Inc. | ||
| 3 | * | ||
| 4 | * Author: | ||
| 5 | * Colin Cross <ccross@google.com> | ||
| 6 | * | ||
| 7 | * This software is licensed under the terms of the GNU General Public | ||
| 8 | * License version 2, as published by the Free Software Foundation, and | ||
| 9 | * may be copied, distributed, and modified under those terms. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | * | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/init.h> | ||
| 19 | #include <linux/err.h> | ||
| 20 | #include <linux/time.h> | ||
| 21 | #include <linux/interrupt.h> | ||
| 22 | #include <linux/irq.h> | ||
| 23 | #include <linux/clockchips.h> | ||
| 24 | #include <linux/clocksource.h> | ||
| 25 | #include <linux/clk.h> | ||
| 26 | #include <linux/io.h> | ||
| 27 | #include <linux/of_address.h> | ||
| 28 | #include <linux/of_irq.h> | ||
| 29 | |||
| 30 | #include <asm/mach/time.h> | ||
| 31 | #include <asm/smp_twd.h> | ||
| 32 | #include <asm/sched_clock.h> | ||
| 33 | |||
| 34 | #define RTC_SECONDS 0x08 | ||
| 35 | #define RTC_SHADOW_SECONDS 0x0c | ||
| 36 | #define RTC_MILLISECONDS 0x10 | ||
| 37 | |||
| 38 | #define TIMERUS_CNTR_1US 0x10 | ||
| 39 | #define TIMERUS_USEC_CFG 0x14 | ||
| 40 | #define TIMERUS_CNTR_FREEZE 0x4c | ||
| 41 | |||
| 42 | #define TIMER1_BASE 0x0 | ||
| 43 | #define TIMER2_BASE 0x8 | ||
| 44 | #define TIMER3_BASE 0x50 | ||
| 45 | #define TIMER4_BASE 0x58 | ||
| 46 | |||
| 47 | #define TIMER_PTV 0x0 | ||
| 48 | #define TIMER_PCR 0x4 | ||
| 49 | |||
| 50 | static void __iomem *timer_reg_base; | ||
| 51 | static void __iomem *rtc_base; | ||
| 52 | |||
| 53 | static struct timespec persistent_ts; | ||
| 54 | static u64 persistent_ms, last_persistent_ms; | ||
| 55 | |||
| 56 | #define timer_writel(value, reg) \ | ||
| 57 | __raw_writel(value, timer_reg_base + (reg)) | ||
| 58 | #define timer_readl(reg) \ | ||
| 59 | __raw_readl(timer_reg_base + (reg)) | ||
| 60 | |||
| 61 | static int tegra_timer_set_next_event(unsigned long cycles, | ||
| 62 | struct clock_event_device *evt) | ||
| 63 | { | ||
| 64 | u32 reg; | ||
| 65 | |||
| 66 | reg = 0x80000000 | ((cycles > 1) ? (cycles-1) : 0); | ||
| 67 | timer_writel(reg, TIMER3_BASE + TIMER_PTV); | ||
| 68 | |||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | static void tegra_timer_set_mode(enum clock_event_mode mode, | ||
| 73 | struct clock_event_device *evt) | ||
| 74 | { | ||
| 75 | u32 reg; | ||
| 76 | |||
| 77 | timer_writel(0, TIMER3_BASE + TIMER_PTV); | ||
| 78 | |||
| 79 | switch (mode) { | ||
| 80 | case CLOCK_EVT_MODE_PERIODIC: | ||
| 81 | reg = 0xC0000000 | ((1000000/HZ)-1); | ||
| 82 | timer_writel(reg, TIMER3_BASE + TIMER_PTV); | ||
| 83 | break; | ||
| 84 | case CLOCK_EVT_MODE_ONESHOT: | ||
| 85 | break; | ||
| 86 | case CLOCK_EVT_MODE_UNUSED: | ||
| 87 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
| 88 | case CLOCK_EVT_MODE_RESUME: | ||
| 89 | break; | ||
| 90 | } | ||
| 91 | } | ||
| 92 | |||
| 93 | static struct clock_event_device tegra_clockevent = { | ||
| 94 | .name = "timer0", | ||
| 95 | .rating = 300, | ||
| 96 | .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, | ||
| 97 | .set_next_event = tegra_timer_set_next_event, | ||
| 98 | .set_mode = tegra_timer_set_mode, | ||
| 99 | }; | ||
| 100 | |||
| 101 | static u32 notrace tegra_read_sched_clock(void) | ||
| 102 | { | ||
| 103 | return timer_readl(TIMERUS_CNTR_1US); | ||
| 104 | } | ||
| 105 | |||
| 106 | /* | ||
| 107 | * tegra_rtc_read - Reads the Tegra RTC registers | ||
| 108 | * Care must be taken that this funciton is not called while the | ||
| 109 | * tegra_rtc driver could be executing to avoid race conditions | ||
| 110 | * on the RTC shadow register | ||
| 111 | */ | ||
| 112 | static u64 tegra_rtc_read_ms(void) | ||
| 113 | { | ||
| 114 | u32 ms = readl(rtc_base + RTC_MILLISECONDS); | ||
| 115 | u32 s = readl(rtc_base + RTC_SHADOW_SECONDS); | ||
| 116 | return (u64)s * MSEC_PER_SEC + ms; | ||
| 117 | } | ||
| 118 | |||
| 119 | /* | ||
| 120 | * tegra_read_persistent_clock - Return time from a persistent clock. | ||
| 121 | * | ||
| 122 | * Reads the time from a source which isn't disabled during PM, the | ||
| 123 | * 32k sync timer. Convert the cycles elapsed since last read into | ||
| 124 | * nsecs and adds to a monotonically increasing timespec. | ||
| 125 | * Care must be taken that this funciton is not called while the | ||
| 126 | * tegra_rtc driver could be executing to avoid race conditions | ||
| 127 | * on the RTC shadow register | ||
| 128 | */ | ||
| 129 | static void tegra_read_persistent_clock(struct timespec *ts) | ||
| 130 | { | ||
| 131 | u64 delta; | ||
| 132 | struct timespec *tsp = &persistent_ts; | ||
| 133 | |||
| 134 | last_persistent_ms = persistent_ms; | ||
| 135 | persistent_ms = tegra_rtc_read_ms(); | ||
| 136 | delta = persistent_ms - last_persistent_ms; | ||
| 137 | |||
| 138 | timespec_add_ns(tsp, delta * NSEC_PER_MSEC); | ||
| 139 | *ts = *tsp; | ||
| 140 | } | ||
| 141 | |||
| 142 | static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id) | ||
| 143 | { | ||
| 144 | struct clock_event_device *evt = (struct clock_event_device *)dev_id; | ||
| 145 | timer_writel(1<<30, TIMER3_BASE + TIMER_PCR); | ||
| 146 | evt->event_handler(evt); | ||
| 147 | return IRQ_HANDLED; | ||
| 148 | } | ||
| 149 | |||
| 150 | static struct irqaction tegra_timer_irq = { | ||
| 151 | .name = "timer0", | ||
| 152 | .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_HIGH, | ||
| 153 | .handler = tegra_timer_interrupt, | ||
| 154 | .dev_id = &tegra_clockevent, | ||
| 155 | }; | ||
| 156 | |||
| 157 | static const struct of_device_id timer_match[] __initconst = { | ||
| 158 | { .compatible = "nvidia,tegra20-timer" }, | ||
| 159 | {} | ||
| 160 | }; | ||
| 161 | |||
| 162 | static const struct of_device_id rtc_match[] __initconst = { | ||
| 163 | { .compatible = "nvidia,tegra20-rtc" }, | ||
| 164 | {} | ||
| 165 | }; | ||
| 166 | |||
| 167 | static void __init tegra20_init_timer(void) | ||
| 168 | { | ||
| 169 | struct device_node *np; | ||
| 170 | struct clk *clk; | ||
| 171 | unsigned long rate; | ||
| 172 | int ret; | ||
| 173 | |||
| 174 | np = of_find_matching_node(NULL, timer_match); | ||
| 175 | if (!np) { | ||
| 176 | pr_err("Failed to find timer DT node\n"); | ||
| 177 | BUG(); | ||
| 178 | } | ||
| 179 | |||
| 180 | timer_reg_base = of_iomap(np, 0); | ||
| 181 | if (!timer_reg_base) { | ||
| 182 | pr_err("Can't map timer registers\n"); | ||
| 183 | BUG(); | ||
| 184 | } | ||
| 185 | |||
| 186 | tegra_timer_irq.irq = irq_of_parse_and_map(np, 2); | ||
| 187 | if (tegra_timer_irq.irq <= 0) { | ||
| 188 | pr_err("Failed to map timer IRQ\n"); | ||
| 189 | BUG(); | ||
| 190 | } | ||
| 191 | |||
| 192 | clk = clk_get_sys("timer", NULL); | ||
| 193 | if (IS_ERR(clk)) { | ||
| 194 | pr_warn("Unable to get timer clock. Assuming 12Mhz input clock.\n"); | ||
| 195 | rate = 12000000; | ||
| 196 | } else { | ||
| 197 | clk_prepare_enable(clk); | ||
| 198 | rate = clk_get_rate(clk); | ||
| 199 | } | ||
| 200 | |||
| 201 | of_node_put(np); | ||
| 202 | |||
| 203 | np = of_find_matching_node(NULL, rtc_match); | ||
| 204 | if (!np) { | ||
| 205 | pr_err("Failed to find RTC DT node\n"); | ||
| 206 | BUG(); | ||
| 207 | } | ||
| 208 | |||
| 209 | rtc_base = of_iomap(np, 0); | ||
| 210 | if (!rtc_base) { | ||
| 211 | pr_err("Can't map RTC registers"); | ||
| 212 | BUG(); | ||
| 213 | } | ||
| 214 | |||
| 215 | /* | ||
| 216 | * rtc registers are used by read_persistent_clock, keep the rtc clock | ||
| 217 | * enabled | ||
| 218 | */ | ||
| 219 | clk = clk_get_sys("rtc-tegra", NULL); | ||
| 220 | if (IS_ERR(clk)) | ||
| 221 | pr_warn("Unable to get rtc-tegra clock\n"); | ||
| 222 | else | ||
| 223 | clk_prepare_enable(clk); | ||
| 224 | |||
| 225 | of_node_put(np); | ||
| 226 | |||
| 227 | switch (rate) { | ||
| 228 | case 12000000: | ||
| 229 | timer_writel(0x000b, TIMERUS_USEC_CFG); | ||
| 230 | break; | ||
| 231 | case 13000000: | ||
| 232 | timer_writel(0x000c, TIMERUS_USEC_CFG); | ||
| 233 | break; | ||
| 234 | case 19200000: | ||
| 235 | timer_writel(0x045f, TIMERUS_USEC_CFG); | ||
| 236 | break; | ||
| 237 | case 26000000: | ||
| 238 | timer_writel(0x0019, TIMERUS_USEC_CFG); | ||
| 239 | break; | ||
| 240 | default: | ||
| 241 | WARN(1, "Unknown clock rate"); | ||
| 242 | } | ||
| 243 | |||
| 244 | setup_sched_clock(tegra_read_sched_clock, 32, 1000000); | ||
| 245 | |||
| 246 | if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US, | ||
| 247 | "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) { | ||
| 248 | pr_err("Failed to register clocksource\n"); | ||
| 249 | BUG(); | ||
| 250 | } | ||
| 251 | |||
| 252 | ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq); | ||
| 253 | if (ret) { | ||
| 254 | pr_err("Failed to register timer IRQ: %d\n", ret); | ||
| 255 | BUG(); | ||
| 256 | } | ||
| 257 | |||
| 258 | tegra_clockevent.cpumask = cpu_all_mask; | ||
| 259 | tegra_clockevent.irq = tegra_timer_irq.irq; | ||
| 260 | clockevents_config_and_register(&tegra_clockevent, 1000000, | ||
| 261 | 0x1, 0x1fffffff); | ||
| 262 | #ifdef CONFIG_HAVE_ARM_TWD | ||
| 263 | twd_local_timer_of_register(); | ||
| 264 | #endif | ||
| 265 | register_persistent_clock(NULL, tegra_read_persistent_clock); | ||
| 266 | } | ||
| 267 | CLOCKSOURCE_OF_DECLARE(tegra20, "nvidia,tegra20-timer", tegra20_init_timer); | ||
| 268 | |||
| 269 | #ifdef CONFIG_PM | ||
| 270 | static u32 usec_config; | ||
| 271 | |||
| 272 | void tegra_timer_suspend(void) | ||
| 273 | { | ||
| 274 | usec_config = timer_readl(TIMERUS_USEC_CFG); | ||
| 275 | } | ||
| 276 | |||
| 277 | void tegra_timer_resume(void) | ||
| 278 | { | ||
| 279 | timer_writel(usec_config, TIMERUS_USEC_CFG); | ||
| 280 | } | ||
| 281 | #endif | ||
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c new file mode 100644 index 000000000000..8efc86b5b5dd --- /dev/null +++ b/drivers/clocksource/vt8500_timer.c | |||
| @@ -0,0 +1,180 @@ | |||
| 1 | /* | ||
| 2 | * arch/arm/mach-vt8500/timer.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz> | ||
| 5 | * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 20 | */ | ||
| 21 | |||
| 22 | /* | ||
| 23 | * This file is copied and modified from the original timer.c provided by | ||
| 24 | * Alexey Charkov. Minor changes have been made for Device Tree Support. | ||
| 25 | */ | ||
| 26 | |||
| 27 | #include <linux/io.h> | ||
| 28 | #include <linux/irq.h> | ||
| 29 | #include <linux/interrupt.h> | ||
| 30 | #include <linux/clocksource.h> | ||
| 31 | #include <linux/clockchips.h> | ||
| 32 | #include <linux/delay.h> | ||
| 33 | #include <asm/mach/time.h> | ||
| 34 | |||
| 35 | #include <linux/of.h> | ||
| 36 | #include <linux/of_address.h> | ||
| 37 | #include <linux/of_irq.h> | ||
| 38 | |||
| 39 | #define VT8500_TIMER_OFFSET 0x0100 | ||
| 40 | #define VT8500_TIMER_HZ 3000000 | ||
| 41 | #define TIMER_MATCH_VAL 0x0000 | ||
| 42 | #define TIMER_COUNT_VAL 0x0010 | ||
| 43 | #define TIMER_STATUS_VAL 0x0014 | ||
| 44 | #define TIMER_IER_VAL 0x001c /* interrupt enable */ | ||
| 45 | #define TIMER_CTRL_VAL 0x0020 | ||
| 46 | #define TIMER_AS_VAL 0x0024 /* access status */ | ||
| 47 | #define TIMER_COUNT_R_ACTIVE (1 << 5) /* not ready for read */ | ||
| 48 | #define TIMER_COUNT_W_ACTIVE (1 << 4) /* not ready for write */ | ||
| 49 | #define TIMER_MATCH_W_ACTIVE (1 << 0) /* not ready for write */ | ||
| 50 | |||
| 51 | #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) | ||
| 52 | |||
| 53 | static void __iomem *regbase; | ||
| 54 | |||
| 55 | static cycle_t vt8500_timer_read(struct clocksource *cs) | ||
| 56 | { | ||
| 57 | int loops = msecs_to_loops(10); | ||
| 58 | writel(3, regbase + TIMER_CTRL_VAL); | ||
| 59 | while ((readl((regbase + TIMER_AS_VAL)) & TIMER_COUNT_R_ACTIVE) | ||
| 60 | && --loops) | ||
| 61 | cpu_relax(); | ||
| 62 | return readl(regbase + TIMER_COUNT_VAL); | ||
| 63 | } | ||
| 64 | |||
| 65 | static struct clocksource clocksource = { | ||
| 66 | .name = "vt8500_timer", | ||
| 67 | .rating = 200, | ||
| 68 | .read = vt8500_timer_read, | ||
| 69 | .mask = CLOCKSOURCE_MASK(32), | ||
| 70 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
| 71 | }; | ||
| 72 | |||
| 73 | static int vt8500_timer_set_next_event(unsigned long cycles, | ||
| 74 | struct clock_event_device *evt) | ||
| 75 | { | ||
| 76 | int loops = msecs_to_loops(10); | ||
| 77 | cycle_t alarm = clocksource.read(&clocksource) + cycles; | ||
| 78 | while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE) | ||
| 79 | && --loops) | ||
| 80 | cpu_relax(); | ||
| 81 | writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL); | ||
| 82 | |||
| 83 | if ((signed)(alarm - clocksource.read(&clocksource)) <= 16) | ||
| 84 | return -ETIME; | ||
| 85 | |||
| 86 | writel(1, regbase + TIMER_IER_VAL); | ||
| 87 | |||
| 88 | return 0; | ||
| 89 | } | ||
| 90 | |||
| 91 | static void vt8500_timer_set_mode(enum clock_event_mode mode, | ||
| 92 | struct clock_event_device *evt) | ||
| 93 | { | ||
| 94 | switch (mode) { | ||
| 95 | case CLOCK_EVT_MODE_RESUME: | ||
| 96 | case CLOCK_EVT_MODE_PERIODIC: | ||
| 97 | break; | ||
| 98 | case CLOCK_EVT_MODE_ONESHOT: | ||
| 99 | case CLOCK_EVT_MODE_UNUSED: | ||
| 100 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
| 101 | writel(readl(regbase + TIMER_CTRL_VAL) | 1, | ||
| 102 | regbase + TIMER_CTRL_VAL); | ||
| 103 | writel(0, regbase + TIMER_IER_VAL); | ||
| 104 | break; | ||
| 105 | } | ||
| 106 | } | ||
| 107 | |||
| 108 | static struct clock_event_device clockevent = { | ||
| 109 | .name = "vt8500_timer", | ||
| 110 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
| 111 | .rating = 200, | ||
| 112 | .set_next_event = vt8500_timer_set_next_event, | ||
| 113 | .set_mode = vt8500_timer_set_mode, | ||
| 114 | }; | ||
| 115 | |||
| 116 | static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id) | ||
| 117 | { | ||
| 118 | struct clock_event_device *evt = dev_id; | ||
| 119 | writel(0xf, regbase + TIMER_STATUS_VAL); | ||
| 120 | evt->event_handler(evt); | ||
| 121 | |||
| 122 | return IRQ_HANDLED; | ||
| 123 | } | ||
| 124 | |||
| 125 | static struct irqaction irq = { | ||
| 126 | .name = "vt8500_timer", | ||
| 127 | .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, | ||
| 128 | .handler = vt8500_timer_interrupt, | ||
| 129 | .dev_id = &clockevent, | ||
| 130 | }; | ||
| 131 | |||
| 132 | static struct of_device_id vt8500_timer_ids[] = { | ||
| 133 | { .compatible = "via,vt8500-timer" }, | ||
| 134 | { } | ||
| 135 | }; | ||
| 136 | |||
| 137 | static void __init vt8500_timer_init(void) | ||
| 138 | { | ||
| 139 | struct device_node *np; | ||
| 140 | int timer_irq; | ||
| 141 | |||
| 142 | np = of_find_matching_node(NULL, vt8500_timer_ids); | ||
| 143 | if (!np) { | ||
| 144 | pr_err("%s: Timer description missing from Device Tree\n", | ||
| 145 | __func__); | ||
| 146 | return; | ||
| 147 | } | ||
| 148 | regbase = of_iomap(np, 0); | ||
| 149 | if (!regbase) { | ||
| 150 | pr_err("%s: Missing iobase description in Device Tree\n", | ||
| 151 | __func__); | ||
| 152 | of_node_put(np); | ||
| 153 | return; | ||
| 154 | } | ||
| 155 | timer_irq = irq_of_parse_and_map(np, 0); | ||
| 156 | if (!timer_irq) { | ||
| 157 | pr_err("%s: Missing irq description in Device Tree\n", | ||
| 158 | __func__); | ||
| 159 | of_node_put(np); | ||
| 160 | return; | ||
| 161 | } | ||
| 162 | |||
| 163 | writel(1, regbase + TIMER_CTRL_VAL); | ||
| 164 | writel(0xf, regbase + TIMER_STATUS_VAL); | ||
| 165 | writel(~0, regbase + TIMER_MATCH_VAL); | ||
| 166 | |||
| 167 | if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ)) | ||
| 168 | pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n", | ||
| 169 | __func__, clocksource.name); | ||
| 170 | |||
| 171 | clockevent.cpumask = cpumask_of(0); | ||
| 172 | |||
| 173 | if (setup_irq(timer_irq, &irq)) | ||
| 174 | pr_err("%s: setup_irq failed for %s\n", __func__, | ||
| 175 | clockevent.name); | ||
| 176 | clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, | ||
| 177 | 4, 0xf0000000); | ||
| 178 | } | ||
| 179 | |||
| 180 | CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init) | ||
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c index 79a84860ea56..48a1988149d8 100644 --- a/drivers/cpufreq/db8500-cpufreq.c +++ b/drivers/cpufreq/db8500-cpufreq.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
| 17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
| 18 | #include <mach/id.h> | ||
| 19 | 18 | ||
| 20 | static struct cpufreq_frequency_table *freq_table; | 19 | static struct cpufreq_frequency_table *freq_table; |
| 21 | static struct clk *armss_clk; | 20 | static struct clk *armss_clk; |
| @@ -165,9 +164,6 @@ static struct platform_driver db8500_cpufreq_plat_driver = { | |||
| 165 | 164 | ||
| 166 | static int __init db8500_cpufreq_register(void) | 165 | static int __init db8500_cpufreq_register(void) |
| 167 | { | 166 | { |
| 168 | if (!cpu_is_u8500_family()) | ||
| 169 | return -ENODEV; | ||
| 170 | |||
| 171 | pr_info("cpufreq for DB8500 started\n"); | 167 | pr_info("cpufreq for DB8500 started\n"); |
| 172 | return platform_driver_register(&db8500_cpufreq_plat_driver); | 168 | return platform_driver_register(&db8500_cpufreq_plat_driver); |
| 173 | } | 169 | } |
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index 69b676dd3358..78057a357ddb 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c | |||
| @@ -18,10 +18,10 @@ | |||
| 18 | #include <linux/cpufreq.h> | 18 | #include <linux/cpufreq.h> |
| 19 | #include <linux/suspend.h> | 19 | #include <linux/suspend.h> |
| 20 | 20 | ||
| 21 | #include <mach/cpufreq.h> | ||
| 22 | |||
| 23 | #include <plat/cpu.h> | 21 | #include <plat/cpu.h> |
| 24 | 22 | ||
| 23 | #include "exynos-cpufreq.h" | ||
| 24 | |||
| 25 | static struct exynos_dvfs_info *exynos_info; | 25 | static struct exynos_dvfs_info *exynos_info; |
| 26 | 26 | ||
| 27 | static struct regulator *arm_regulator; | 27 | static struct regulator *arm_regulator; |
diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h new file mode 100644 index 000000000000..92b852ee5ddc --- /dev/null +++ b/drivers/cpufreq/exynos-cpufreq.h | |||
| @@ -0,0 +1,48 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2010 Samsung Electronics Co., Ltd. | ||
| 3 | * http://www.samsung.com | ||
| 4 | * | ||
| 5 | * EXYNOS - CPUFreq support | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | */ | ||
| 11 | |||
| 12 | enum cpufreq_level_index { | ||
| 13 | L0, L1, L2, L3, L4, | ||
| 14 | L5, L6, L7, L8, L9, | ||
| 15 | L10, L11, L12, L13, L14, | ||
| 16 | L15, L16, L17, L18, L19, | ||
| 17 | L20, | ||
| 18 | }; | ||
| 19 | |||
| 20 | #define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \ | ||
| 21 | { \ | ||
| 22 | .freq = (f) * 1000, \ | ||
| 23 | .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \ | ||
| 24 | (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \ | ||
| 25 | .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \ | ||
| 26 | .mps = ((m) << 16 | (p) << 8 | (s)), \ | ||
| 27 | } | ||
| 28 | |||
| 29 | struct apll_freq { | ||
| 30 | unsigned int freq; | ||
| 31 | u32 clk_div_cpu0; | ||
| 32 | u32 clk_div_cpu1; | ||
| 33 | u32 mps; | ||
| 34 | }; | ||
| 35 | |||
| 36 | struct exynos_dvfs_info { | ||
| 37 | unsigned long mpll_freq_khz; | ||
| 38 | unsigned int pll_safe_idx; | ||
| 39 | struct clk *cpu_clk; | ||
| 40 | unsigned int *volt_table; | ||
| 41 | struct cpufreq_frequency_table *freq_table; | ||
| 42 | void (*set_freq)(unsigned int, unsigned int); | ||
| 43 | bool (*need_apll_change)(unsigned int, unsigned int); | ||
| 44 | }; | ||
| 45 | |||
| 46 | extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *); | ||
| 47 | extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *); | ||
| 48 | extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *); | ||
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index de91755e2556..add7fbec4fc9 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c | |||
| @@ -18,7 +18,8 @@ | |||
| 18 | #include <linux/cpufreq.h> | 18 | #include <linux/cpufreq.h> |
| 19 | 19 | ||
| 20 | #include <mach/regs-clock.h> | 20 | #include <mach/regs-clock.h> |
| 21 | #include <mach/cpufreq.h> | 21 | |
| 22 | #include "exynos-cpufreq.h" | ||
| 22 | 23 | ||
| 23 | static struct clk *cpu_clk; | 24 | static struct clk *cpu_clk; |
| 24 | static struct clk *moutcore; | 25 | static struct clk *moutcore; |
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c index 0661039e5d4a..08b7477b0aa2 100644 --- a/drivers/cpufreq/exynos4x12-cpufreq.c +++ b/drivers/cpufreq/exynos4x12-cpufreq.c | |||
| @@ -18,7 +18,8 @@ | |||
| 18 | #include <linux/cpufreq.h> | 18 | #include <linux/cpufreq.h> |
| 19 | 19 | ||
| 20 | #include <mach/regs-clock.h> | 20 | #include <mach/regs-clock.h> |
| 21 | #include <mach/cpufreq.h> | 21 | |
| 22 | #include "exynos-cpufreq.h" | ||
| 22 | 23 | ||
| 23 | static struct clk *cpu_clk; | 24 | static struct clk *cpu_clk; |
| 24 | static struct clk *moutcore; | 25 | static struct clk *moutcore; |
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c index b9344869f822..9fae466d7746 100644 --- a/drivers/cpufreq/exynos5250-cpufreq.c +++ b/drivers/cpufreq/exynos5250-cpufreq.c | |||
| @@ -19,7 +19,8 @@ | |||
| 19 | 19 | ||
| 20 | #include <mach/map.h> | 20 | #include <mach/map.h> |
| 21 | #include <mach/regs-clock.h> | 21 | #include <mach/regs-clock.h> |
| 22 | #include <mach/cpufreq.h> | 22 | |
| 23 | #include "exynos-cpufreq.h" | ||
| 23 | 24 | ||
| 24 | static struct clk *cpu_clk; | 25 | static struct clk *cpu_clk; |
| 25 | static struct clk *moutcore; | 26 | static struct clk *moutcore; |
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c index b2016ed941ac..b3643ff007e4 100644 --- a/drivers/gpio/gpio-samsung.c +++ b/drivers/gpio/gpio-samsung.c | |||
| @@ -38,7 +38,6 @@ | |||
| 38 | #include <plat/gpio-core.h> | 38 | #include <plat/gpio-core.h> |
| 39 | #include <plat/gpio-cfg.h> | 39 | #include <plat/gpio-cfg.h> |
| 40 | #include <plat/gpio-cfg-helpers.h> | 40 | #include <plat/gpio-cfg-helpers.h> |
| 41 | #include <plat/gpio-fns.h> | ||
| 42 | #include <plat/pm.h> | 41 | #include <plat/pm.h> |
| 43 | 42 | ||
| 44 | int samsung_gpio_setpull_updown(struct samsung_gpio_chip *chip, | 43 | int samsung_gpio_setpull_updown(struct samsung_gpio_chip *chip, |
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 62ca575701d3..a350969e5efe 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig | |||
| @@ -1,3 +1,30 @@ | |||
| 1 | config IRQCHIP | ||
| 2 | def_bool y | ||
| 3 | depends on OF_IRQ | ||
| 4 | |||
| 5 | config ARM_GIC | ||
| 6 | bool | ||
| 7 | select IRQ_DOMAIN | ||
| 8 | select MULTI_IRQ_HANDLER | ||
| 9 | |||
| 10 | config GIC_NON_BANKED | ||
| 11 | bool | ||
| 12 | |||
| 13 | config ARM_VIC | ||
| 14 | bool | ||
| 15 | select IRQ_DOMAIN | ||
| 16 | select MULTI_IRQ_HANDLER | ||
| 17 | |||
| 18 | config ARM_VIC_NR | ||
| 19 | int | ||
| 20 | default 4 if ARCH_S5PV210 | ||
| 21 | default 3 if ARCH_S5PC100 | ||
| 22 | default 2 | ||
| 23 | depends on ARM_VIC | ||
| 24 | help | ||
| 25 | The maximum number of VICs available in the system, for | ||
| 26 | power management. | ||
| 27 | |||
| 1 | config VERSATILE_FPGA_IRQ | 28 | config VERSATILE_FPGA_IRQ |
| 2 | bool | 29 | bool |
| 3 | select IRQ_DOMAIN | 30 | select IRQ_DOMAIN |
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index bf4609a5bd9d..e65fbf2cdf71 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile | |||
| @@ -1,4 +1,9 @@ | |||
| 1 | obj-$(CONFIG_IRQCHIP) += irqchip.o | ||
| 2 | |||
| 1 | obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o | 3 | obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o |
| 4 | obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o | ||
| 2 | obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o | 5 | obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o |
| 3 | obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o | ||
| 4 | obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o | 6 | obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o |
| 7 | obj-$(CONFIG_ARM_GIC) += irq-gic.o | ||
| 8 | obj-$(CONFIG_ARM_VIC) += irq-vic.o | ||
| 9 | obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o | ||
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c new file mode 100644 index 000000000000..04d86a9803f4 --- /dev/null +++ b/drivers/irqchip/exynos-combiner.c | |||
| @@ -0,0 +1,230 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. | ||
| 3 | * http://www.samsung.com | ||
| 4 | * | ||
| 5 | * Combiner irqchip for EXYNOS | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | */ | ||
| 11 | #include <linux/err.h> | ||
| 12 | #include <linux/export.h> | ||
| 13 | #include <linux/init.h> | ||
| 14 | #include <linux/io.h> | ||
| 15 | #include <linux/irqdomain.h> | ||
| 16 | #include <linux/of_address.h> | ||
| 17 | #include <linux/of_irq.h> | ||
| 18 | #include <asm/mach/irq.h> | ||
| 19 | |||
| 20 | #include <plat/cpu.h> | ||
| 21 | |||
| 22 | #include "irqchip.h" | ||
| 23 | |||
| 24 | #define COMBINER_ENABLE_SET 0x0 | ||
| 25 | #define COMBINER_ENABLE_CLEAR 0x4 | ||
| 26 | #define COMBINER_INT_STATUS 0xC | ||
| 27 | |||
| 28 | static DEFINE_SPINLOCK(irq_controller_lock); | ||
| 29 | |||
| 30 | struct combiner_chip_data { | ||
| 31 | unsigned int irq_offset; | ||
| 32 | unsigned int irq_mask; | ||
| 33 | void __iomem *base; | ||
| 34 | }; | ||
| 35 | |||
| 36 | static struct irq_domain *combiner_irq_domain; | ||
| 37 | static struct combiner_chip_data combiner_data[MAX_COMBINER_NR]; | ||
| 38 | |||
| 39 | static inline void __iomem *combiner_base(struct irq_data *data) | ||
| 40 | { | ||
| 41 | struct combiner_chip_data *combiner_data = | ||
| 42 | irq_data_get_irq_chip_data(data); | ||
| 43 | |||
| 44 | return combiner_data->base; | ||
| 45 | } | ||
| 46 | |||
| 47 | static void combiner_mask_irq(struct irq_data *data) | ||
| 48 | { | ||
| 49 | u32 mask = 1 << (data->hwirq % 32); | ||
| 50 | |||
| 51 | __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR); | ||
| 52 | } | ||
| 53 | |||
| 54 | static void combiner_unmask_irq(struct irq_data *data) | ||
| 55 | { | ||
| 56 | u32 mask = 1 << (data->hwirq % 32); | ||
| 57 | |||
| 58 | __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET); | ||
| 59 | } | ||
| 60 | |||
| 61 | static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | ||
| 62 | { | ||
| 63 | struct combiner_chip_data *chip_data = irq_get_handler_data(irq); | ||
| 64 | struct irq_chip *chip = irq_get_chip(irq); | ||
| 65 | unsigned int cascade_irq, combiner_irq; | ||
| 66 | unsigned long status; | ||
| 67 | |||
| 68 | chained_irq_enter(chip, desc); | ||
| 69 | |||
| 70 | spin_lock(&irq_controller_lock); | ||
| 71 | status = __raw_readl(chip_data->base + COMBINER_INT_STATUS); | ||
| 72 | spin_unlock(&irq_controller_lock); | ||
| 73 | status &= chip_data->irq_mask; | ||
| 74 | |||
| 75 | if (status == 0) | ||
| 76 | goto out; | ||
| 77 | |||
| 78 | combiner_irq = __ffs(status); | ||
| 79 | |||
| 80 | cascade_irq = combiner_irq + (chip_data->irq_offset & ~31); | ||
| 81 | if (unlikely(cascade_irq >= NR_IRQS)) | ||
| 82 | do_bad_IRQ(cascade_irq, desc); | ||
| 83 | else | ||
| 84 | generic_handle_irq(cascade_irq); | ||
| 85 | |||
| 86 | out: | ||
| 87 | chained_irq_exit(chip, desc); | ||
| 88 | } | ||
| 89 | |||
| 90 | static struct irq_chip combiner_chip = { | ||
| 91 | .name = "COMBINER", | ||
| 92 | .irq_mask = combiner_mask_irq, | ||
| 93 | .irq_unmask = combiner_unmask_irq, | ||
| 94 | }; | ||
| 95 | |||
| 96 | static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq) | ||
| 97 | { | ||
| 98 | unsigned int max_nr; | ||
| 99 | |||
| 100 | if (soc_is_exynos5250()) | ||
| 101 | max_nr = EXYNOS5_MAX_COMBINER_NR; | ||
| 102 | else | ||
| 103 | max_nr = EXYNOS4_MAX_COMBINER_NR; | ||
| 104 | |||
| 105 | if (combiner_nr >= max_nr) | ||
| 106 | BUG(); | ||
| 107 | if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0) | ||
| 108 | BUG(); | ||
| 109 | irq_set_chained_handler(irq, combiner_handle_cascade_irq); | ||
| 110 | } | ||
| 111 | |||
| 112 | static void __init combiner_init_one(unsigned int combiner_nr, | ||
| 113 | void __iomem *base) | ||
| 114 | { | ||
| 115 | combiner_data[combiner_nr].base = base; | ||
| 116 | combiner_data[combiner_nr].irq_offset = irq_find_mapping( | ||
| 117 | combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER); | ||
| 118 | combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3); | ||
| 119 | |||
| 120 | /* Disable all interrupts */ | ||
| 121 | __raw_writel(combiner_data[combiner_nr].irq_mask, | ||
| 122 | base + COMBINER_ENABLE_CLEAR); | ||
| 123 | } | ||
| 124 | |||
| 125 | #ifdef CONFIG_OF | ||
| 126 | static int combiner_irq_domain_xlate(struct irq_domain *d, | ||
| 127 | struct device_node *controller, | ||
| 128 | const u32 *intspec, unsigned int intsize, | ||
| 129 | unsigned long *out_hwirq, | ||
| 130 | unsigned int *out_type) | ||
| 131 | { | ||
| 132 | if (d->of_node != controller) | ||
| 133 | return -EINVAL; | ||
| 134 | |||
| 135 | if (intsize < 2) | ||
| 136 | return -EINVAL; | ||
| 137 | |||
| 138 | *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1]; | ||
| 139 | *out_type = 0; | ||
| 140 | |||
| 141 | return 0; | ||
| 142 | } | ||
| 143 | #else | ||
| 144 | static int combiner_irq_domain_xlate(struct irq_domain *d, | ||
| 145 | struct device_node *controller, | ||
| 146 | const u32 *intspec, unsigned int intsize, | ||
| 147 | unsigned long *out_hwirq, | ||
| 148 | unsigned int *out_type) | ||
| 149 | { | ||
| 150 | return -EINVAL; | ||
| 151 | } | ||
| 152 | #endif | ||
| 153 | |||
| 154 | static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq, | ||
| 155 | irq_hw_number_t hw) | ||
| 156 | { | ||
| 157 | irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq); | ||
| 158 | irq_set_chip_data(irq, &combiner_data[hw >> 3]); | ||
| 159 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | ||
| 160 | |||
| 161 | return 0; | ||
| 162 | } | ||
| 163 | |||
| 164 | static struct irq_domain_ops combiner_irq_domain_ops = { | ||
| 165 | .xlate = combiner_irq_domain_xlate, | ||
| 166 | .map = combiner_irq_domain_map, | ||
| 167 | }; | ||
| 168 | |||
| 169 | void __init combiner_init(void __iomem *combiner_base, | ||
| 170 | struct device_node *np) | ||
| 171 | { | ||
| 172 | int i, irq, irq_base; | ||
| 173 | unsigned int max_nr, nr_irq; | ||
| 174 | |||
| 175 | if (np) { | ||
| 176 | if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) { | ||
| 177 | pr_warning("%s: number of combiners not specified, " | ||
| 178 | "setting default as %d.\n", | ||
| 179 | __func__, EXYNOS4_MAX_COMBINER_NR); | ||
| 180 | max_nr = EXYNOS4_MAX_COMBINER_NR; | ||
| 181 | } | ||
| 182 | } else { | ||
| 183 | max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR : | ||
| 184 | EXYNOS4_MAX_COMBINER_NR; | ||
| 185 | } | ||
| 186 | nr_irq = max_nr * MAX_IRQ_IN_COMBINER; | ||
| 187 | |||
| 188 | irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0); | ||
| 189 | if (IS_ERR_VALUE(irq_base)) { | ||
| 190 | irq_base = COMBINER_IRQ(0, 0); | ||
| 191 | pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base); | ||
| 192 | } | ||
| 193 | |||
| 194 | combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0, | ||
| 195 | &combiner_irq_domain_ops, &combiner_data); | ||
| 196 | if (WARN_ON(!combiner_irq_domain)) { | ||
| 197 | pr_warning("%s: irq domain init failed\n", __func__); | ||
| 198 | return; | ||
| 199 | } | ||
| 200 | |||
| 201 | for (i = 0; i < max_nr; i++) { | ||
| 202 | combiner_init_one(i, combiner_base + (i >> 2) * 0x10); | ||
| 203 | irq = IRQ_SPI(i); | ||
| 204 | #ifdef CONFIG_OF | ||
| 205 | if (np) | ||
| 206 | irq = irq_of_parse_and_map(np, i); | ||
| 207 | #endif | ||
| 208 | combiner_cascade_irq(i, irq); | ||
| 209 | } | ||
| 210 | } | ||
| 211 | |||
| 212 | #ifdef CONFIG_OF | ||
| 213 | static int __init combiner_of_init(struct device_node *np, | ||
| 214 | struct device_node *parent) | ||
| 215 | { | ||
| 216 | void __iomem *combiner_base; | ||
| 217 | |||
| 218 | combiner_base = of_iomap(np, 0); | ||
| 219 | if (!combiner_base) { | ||
| 220 | pr_err("%s: failed to map combiner registers\n", __func__); | ||
| 221 | return -ENXIO; | ||
| 222 | } | ||
| 223 | |||
| 224 | combiner_init(combiner_base, np); | ||
| 225 | |||
| 226 | return 0; | ||
| 227 | } | ||
| 228 | IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner", | ||
| 229 | combiner_of_init); | ||
| 230 | #endif | ||
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c new file mode 100644 index 000000000000..644d72468423 --- /dev/null +++ b/drivers/irqchip/irq-gic.c | |||
| @@ -0,0 +1,845 @@ | |||
| 1 | /* | ||
| 2 | * linux/arch/arm/common/gic.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * Interrupt architecture for the GIC: | ||
| 11 | * | ||
| 12 | * o There is one Interrupt Distributor, which receives interrupts | ||
| 13 | * from system devices and sends them to the Interrupt Controllers. | ||
| 14 | * | ||
| 15 | * o There is one CPU Interface per CPU, which sends interrupts sent | ||
| 16 | * by the Distributor, and interrupts generated locally, to the | ||
| 17 | * associated CPU. The base address of the CPU interface is usually | ||
| 18 | * aliased so that the same address points to different chips depending | ||
| 19 | * on the CPU it is accessed from. | ||
| 20 | * | ||
| 21 | * Note that IRQs 0-31 are special - they are local to each CPU. | ||
| 22 | * As such, the enable set/clear, pending set/clear and active bit | ||
| 23 | * registers are banked per-cpu for these sources. | ||
| 24 | */ | ||
| 25 | #include <linux/init.h> | ||
| 26 | #include <linux/kernel.h> | ||
| 27 | #include <linux/err.h> | ||
| 28 | #include <linux/module.h> | ||
| 29 | #include <linux/list.h> | ||
| 30 | #include <linux/smp.h> | ||
| 31 | #include <linux/cpu_pm.h> | ||
| 32 | #include <linux/cpumask.h> | ||
| 33 | #include <linux/io.h> | ||
| 34 | #include <linux/of.h> | ||
| 35 | #include <linux/of_address.h> | ||
| 36 | #include <linux/of_irq.h> | ||
| 37 | #include <linux/irqdomain.h> | ||
| 38 | #include <linux/interrupt.h> | ||
| 39 | #include <linux/percpu.h> | ||
| 40 | #include <linux/slab.h> | ||
| 41 | #include <linux/irqchip/arm-gic.h> | ||
| 42 | |||
| 43 | #include <asm/irq.h> | ||
| 44 | #include <asm/exception.h> | ||
| 45 | #include <asm/smp_plat.h> | ||
| 46 | #include <asm/mach/irq.h> | ||
| 47 | |||
| 48 | #include "irqchip.h" | ||
| 49 | |||
| 50 | union gic_base { | ||
| 51 | void __iomem *common_base; | ||
| 52 | void __percpu __iomem **percpu_base; | ||
| 53 | }; | ||
| 54 | |||
| 55 | struct gic_chip_data { | ||
| 56 | union gic_base dist_base; | ||
| 57 | union gic_base cpu_base; | ||
| 58 | #ifdef CONFIG_CPU_PM | ||
| 59 | u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; | ||
| 60 | u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; | ||
| 61 | u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; | ||
| 62 | u32 __percpu *saved_ppi_enable; | ||
| 63 | u32 __percpu *saved_ppi_conf; | ||
| 64 | #endif | ||
| 65 | struct irq_domain *domain; | ||
| 66 | unsigned int gic_irqs; | ||
| 67 | #ifdef CONFIG_GIC_NON_BANKED | ||
| 68 | void __iomem *(*get_base)(union gic_base *); | ||
| 69 | #endif | ||
| 70 | }; | ||
| 71 | |||
| 72 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); | ||
| 73 | |||
| 74 | /* | ||
| 75 | * The GIC mapping of CPU interfaces does not necessarily match | ||
| 76 | * the logical CPU numbering. Let's use a mapping as returned | ||
| 77 | * by the GIC itself. | ||
| 78 | */ | ||
| 79 | #define NR_GIC_CPU_IF 8 | ||
| 80 | static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; | ||
| 81 | |||
| 82 | /* | ||
| 83 | * Supported arch specific GIC irq extension. | ||
| 84 | * Default make them NULL. | ||
| 85 | */ | ||
| 86 | struct irq_chip gic_arch_extn = { | ||
| 87 | .irq_eoi = NULL, | ||
| 88 | .irq_mask = NULL, | ||
| 89 | .irq_unmask = NULL, | ||
| 90 | .irq_retrigger = NULL, | ||
| 91 | .irq_set_type = NULL, | ||
| 92 | .irq_set_wake = NULL, | ||
| 93 | }; | ||
| 94 | |||
| 95 | #ifndef MAX_GIC_NR | ||
| 96 | #define MAX_GIC_NR 1 | ||
| 97 | #endif | ||
| 98 | |||
| 99 | static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly; | ||
| 100 | |||
| 101 | #ifdef CONFIG_GIC_NON_BANKED | ||
| 102 | static void __iomem *gic_get_percpu_base(union gic_base *base) | ||
| 103 | { | ||
| 104 | return *__this_cpu_ptr(base->percpu_base); | ||
| 105 | } | ||
| 106 | |||
| 107 | static void __iomem *gic_get_common_base(union gic_base *base) | ||
| 108 | { | ||
| 109 | return base->common_base; | ||
| 110 | } | ||
| 111 | |||
| 112 | static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data) | ||
| 113 | { | ||
| 114 | return data->get_base(&data->dist_base); | ||
| 115 | } | ||
| 116 | |||
| 117 | static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data) | ||
| 118 | { | ||
| 119 | return data->get_base(&data->cpu_base); | ||
| 120 | } | ||
| 121 | |||
| 122 | static inline void gic_set_base_accessor(struct gic_chip_data *data, | ||
| 123 | void __iomem *(*f)(union gic_base *)) | ||
| 124 | { | ||
| 125 | data->get_base = f; | ||
| 126 | } | ||
| 127 | #else | ||
| 128 | #define gic_data_dist_base(d) ((d)->dist_base.common_base) | ||
| 129 | #define gic_data_cpu_base(d) ((d)->cpu_base.common_base) | ||
| 130 | #define gic_set_base_accessor(d,f) | ||
| 131 | #endif | ||
| 132 | |||
| 133 | static inline void __iomem *gic_dist_base(struct irq_data *d) | ||
| 134 | { | ||
| 135 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); | ||
| 136 | return gic_data_dist_base(gic_data); | ||
| 137 | } | ||
| 138 | |||
| 139 | static inline void __iomem *gic_cpu_base(struct irq_data *d) | ||
| 140 | { | ||
| 141 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); | ||
| 142 | return gic_data_cpu_base(gic_data); | ||
| 143 | } | ||
| 144 | |||
| 145 | static inline unsigned int gic_irq(struct irq_data *d) | ||
| 146 | { | ||
| 147 | return d->hwirq; | ||
| 148 | } | ||
| 149 | |||
| 150 | /* | ||
| 151 | * Routines to acknowledge, disable and enable interrupts | ||
| 152 | */ | ||
| 153 | static void gic_mask_irq(struct irq_data *d) | ||
| 154 | { | ||
| 155 | u32 mask = 1 << (gic_irq(d) % 32); | ||
| 156 | |||
| 157 | raw_spin_lock(&irq_controller_lock); | ||
| 158 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); | ||
| 159 | if (gic_arch_extn.irq_mask) | ||
| 160 | gic_arch_extn.irq_mask(d); | ||
| 161 | raw_spin_unlock(&irq_controller_lock); | ||
| 162 | } | ||
| 163 | |||
| 164 | static void gic_unmask_irq(struct irq_data *d) | ||
| 165 | { | ||
| 166 | u32 mask = 1 << (gic_irq(d) % 32); | ||
| 167 | |||
| 168 | raw_spin_lock(&irq_controller_lock); | ||
| 169 | if (gic_arch_extn.irq_unmask) | ||
| 170 | gic_arch_extn.irq_unmask(d); | ||
| 171 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); | ||
| 172 | raw_spin_unlock(&irq_controller_lock); | ||
| 173 | } | ||
| 174 | |||
| 175 | static void gic_eoi_irq(struct irq_data *d) | ||
| 176 | { | ||
| 177 | if (gic_arch_extn.irq_eoi) { | ||
| 178 | raw_spin_lock(&irq_controller_lock); | ||
| 179 | gic_arch_extn.irq_eoi(d); | ||
| 180 | raw_spin_unlock(&irq_controller_lock); | ||
| 181 | } | ||
| 182 | |||
| 183 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); | ||
| 184 | } | ||
| 185 | |||
| 186 | static int gic_set_type(struct irq_data *d, unsigned int type) | ||
| 187 | { | ||
| 188 | void __iomem *base = gic_dist_base(d); | ||
| 189 | unsigned int gicirq = gic_irq(d); | ||
| 190 | u32 enablemask = 1 << (gicirq % 32); | ||
| 191 | u32 enableoff = (gicirq / 32) * 4; | ||
| 192 | u32 confmask = 0x2 << ((gicirq % 16) * 2); | ||
| 193 | u32 confoff = (gicirq / 16) * 4; | ||
| 194 | bool enabled = false; | ||
| 195 | u32 val; | ||
| 196 | |||
| 197 | /* Interrupt configuration for SGIs can't be changed */ | ||
| 198 | if (gicirq < 16) | ||
| 199 | return -EINVAL; | ||
| 200 | |||
| 201 | if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) | ||
| 202 | return -EINVAL; | ||
| 203 | |||
| 204 | raw_spin_lock(&irq_controller_lock); | ||
| 205 | |||
| 206 | if (gic_arch_extn.irq_set_type) | ||
| 207 | gic_arch_extn.irq_set_type(d, type); | ||
| 208 | |||
| 209 | val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); | ||
| 210 | if (type == IRQ_TYPE_LEVEL_HIGH) | ||
| 211 | val &= ~confmask; | ||
| 212 | else if (type == IRQ_TYPE_EDGE_RISING) | ||
| 213 | val |= confmask; | ||
| 214 | |||
| 215 | /* | ||
| 216 | * As recommended by the spec, disable the interrupt before changing | ||
| 217 | * the configuration | ||
| 218 | */ | ||
| 219 | if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { | ||
| 220 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); | ||
| 221 | enabled = true; | ||
| 222 | } | ||
| 223 | |||
| 224 | writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); | ||
| 225 | |||
| 226 | if (enabled) | ||
| 227 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); | ||
| 228 | |||
| 229 | raw_spin_unlock(&irq_controller_lock); | ||
| 230 | |||
| 231 | return 0; | ||
| 232 | } | ||
| 233 | |||
| 234 | static int gic_retrigger(struct irq_data *d) | ||
| 235 | { | ||
| 236 | if (gic_arch_extn.irq_retrigger) | ||
| 237 | return gic_arch_extn.irq_retrigger(d); | ||
| 238 | |||
| 239 | return -ENXIO; | ||
| 240 | } | ||
| 241 | |||
| 242 | #ifdef CONFIG_SMP | ||
| 243 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | ||
| 244 | bool force) | ||
| 245 | { | ||
| 246 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); | ||
| 247 | unsigned int shift = (gic_irq(d) % 4) * 8; | ||
| 248 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); | ||
| 249 | u32 val, mask, bit; | ||
| 250 | |||
| 251 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) | ||
| 252 | return -EINVAL; | ||
| 253 | |||
| 254 | mask = 0xff << shift; | ||
| 255 | bit = gic_cpu_map[cpu] << shift; | ||
| 256 | |||
| 257 | raw_spin_lock(&irq_controller_lock); | ||
| 258 | val = readl_relaxed(reg) & ~mask; | ||
| 259 | writel_relaxed(val | bit, reg); | ||
| 260 | raw_spin_unlock(&irq_controller_lock); | ||
| 261 | |||
| 262 | return IRQ_SET_MASK_OK; | ||
| 263 | } | ||
| 264 | #endif | ||
| 265 | |||
| 266 | #ifdef CONFIG_PM | ||
| 267 | static int gic_set_wake(struct irq_data *d, unsigned int on) | ||
| 268 | { | ||
| 269 | int ret = -ENXIO; | ||
| 270 | |||
| 271 | if (gic_arch_extn.irq_set_wake) | ||
| 272 | ret = gic_arch_extn.irq_set_wake(d, on); | ||
| 273 | |||
| 274 | return ret; | ||
| 275 | } | ||
| 276 | |||
| 277 | #else | ||
| 278 | #define gic_set_wake NULL | ||
| 279 | #endif | ||
| 280 | |||
| 281 | static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) | ||
| 282 | { | ||
| 283 | u32 irqstat, irqnr; | ||
| 284 | struct gic_chip_data *gic = &gic_data[0]; | ||
| 285 | void __iomem *cpu_base = gic_data_cpu_base(gic); | ||
| 286 | |||
| 287 | do { | ||
| 288 | irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); | ||
| 289 | irqnr = irqstat & ~0x1c00; | ||
| 290 | |||
| 291 | if (likely(irqnr > 15 && irqnr < 1021)) { | ||
| 292 | irqnr = irq_find_mapping(gic->domain, irqnr); | ||
| 293 | handle_IRQ(irqnr, regs); | ||
| 294 | continue; | ||
| 295 | } | ||
| 296 | if (irqnr < 16) { | ||
| 297 | writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); | ||
| 298 | #ifdef CONFIG_SMP | ||
| 299 | handle_IPI(irqnr, regs); | ||
| 300 | #endif | ||
| 301 | continue; | ||
| 302 | } | ||
| 303 | break; | ||
| 304 | } while (1); | ||
| 305 | } | ||
| 306 | |||
| 307 | static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | ||
| 308 | { | ||
| 309 | struct gic_chip_data *chip_data = irq_get_handler_data(irq); | ||
| 310 | struct irq_chip *chip = irq_get_chip(irq); | ||
| 311 | unsigned int cascade_irq, gic_irq; | ||
| 312 | unsigned long status; | ||
| 313 | |||
| 314 | chained_irq_enter(chip, desc); | ||
| 315 | |||
| 316 | raw_spin_lock(&irq_controller_lock); | ||
| 317 | status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); | ||
| 318 | raw_spin_unlock(&irq_controller_lock); | ||
| 319 | |||
| 320 | gic_irq = (status & 0x3ff); | ||
| 321 | if (gic_irq == 1023) | ||
| 322 | goto out; | ||
| 323 | |||
| 324 | cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); | ||
| 325 | if (unlikely(gic_irq < 32 || gic_irq > 1020)) | ||
| 326 | do_bad_IRQ(cascade_irq, desc); | ||
| 327 | else | ||
| 328 | generic_handle_irq(cascade_irq); | ||
| 329 | |||
| 330 | out: | ||
| 331 | chained_irq_exit(chip, desc); | ||
| 332 | } | ||
| 333 | |||
| 334 | static struct irq_chip gic_chip = { | ||
| 335 | .name = "GIC", | ||
| 336 | .irq_mask = gic_mask_irq, | ||
| 337 | .irq_unmask = gic_unmask_irq, | ||
| 338 | .irq_eoi = gic_eoi_irq, | ||
| 339 | .irq_set_type = gic_set_type, | ||
| 340 | .irq_retrigger = gic_retrigger, | ||
| 341 | #ifdef CONFIG_SMP | ||
| 342 | .irq_set_affinity = gic_set_affinity, | ||
| 343 | #endif | ||
| 344 | .irq_set_wake = gic_set_wake, | ||
| 345 | }; | ||
| 346 | |||
| 347 | void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) | ||
| 348 | { | ||
| 349 | if (gic_nr >= MAX_GIC_NR) | ||
| 350 | BUG(); | ||
| 351 | if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0) | ||
| 352 | BUG(); | ||
| 353 | irq_set_chained_handler(irq, gic_handle_cascade_irq); | ||
| 354 | } | ||
| 355 | |||
| 356 | static u8 gic_get_cpumask(struct gic_chip_data *gic) | ||
| 357 | { | ||
| 358 | void __iomem *base = gic_data_dist_base(gic); | ||
| 359 | u32 mask, i; | ||
| 360 | |||
| 361 | for (i = mask = 0; i < 32; i += 4) { | ||
| 362 | mask = readl_relaxed(base + GIC_DIST_TARGET + i); | ||
| 363 | mask |= mask >> 16; | ||
| 364 | mask |= mask >> 8; | ||
| 365 | if (mask) | ||
| 366 | break; | ||
| 367 | } | ||
| 368 | |||
| 369 | if (!mask) | ||
| 370 | pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); | ||
| 371 | |||
| 372 | return mask; | ||
| 373 | } | ||
| 374 | |||
| 375 | static void __init gic_dist_init(struct gic_chip_data *gic) | ||
| 376 | { | ||
| 377 | unsigned int i; | ||
| 378 | u32 cpumask; | ||
| 379 | unsigned int gic_irqs = gic->gic_irqs; | ||
| 380 | void __iomem *base = gic_data_dist_base(gic); | ||
| 381 | |||
| 382 | writel_relaxed(0, base + GIC_DIST_CTRL); | ||
| 383 | |||
| 384 | /* | ||
| 385 | * Set all global interrupts to be level triggered, active low. | ||
| 386 | */ | ||
| 387 | for (i = 32; i < gic_irqs; i += 16) | ||
| 388 | writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16); | ||
| 389 | |||
| 390 | /* | ||
| 391 | * Set all global interrupts to this CPU only. | ||
| 392 | */ | ||
| 393 | cpumask = gic_get_cpumask(gic); | ||
| 394 | cpumask |= cpumask << 8; | ||
| 395 | cpumask |= cpumask << 16; | ||
| 396 | for (i = 32; i < gic_irqs; i += 4) | ||
| 397 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); | ||
| 398 | |||
| 399 | /* | ||
| 400 | * Set priority on all global interrupts. | ||
| 401 | */ | ||
| 402 | for (i = 32; i < gic_irqs; i += 4) | ||
| 403 | writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); | ||
| 404 | |||
| 405 | /* | ||
| 406 | * Disable all interrupts. Leave the PPI and SGIs alone | ||
| 407 | * as these enables are banked registers. | ||
| 408 | */ | ||
| 409 | for (i = 32; i < gic_irqs; i += 32) | ||
| 410 | writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); | ||
| 411 | |||
| 412 | writel_relaxed(1, base + GIC_DIST_CTRL); | ||
| 413 | } | ||
| 414 | |||
| 415 | static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) | ||
| 416 | { | ||
| 417 | void __iomem *dist_base = gic_data_dist_base(gic); | ||
| 418 | void __iomem *base = gic_data_cpu_base(gic); | ||
| 419 | unsigned int cpu_mask, cpu = smp_processor_id(); | ||
| 420 | int i; | ||
| 421 | |||
| 422 | /* | ||
| 423 | * Get what the GIC says our CPU mask is. | ||
| 424 | */ | ||
| 425 | BUG_ON(cpu >= NR_GIC_CPU_IF); | ||
| 426 | cpu_mask = gic_get_cpumask(gic); | ||
| 427 | gic_cpu_map[cpu] = cpu_mask; | ||
| 428 | |||
| 429 | /* | ||
| 430 | * Clear our mask from the other map entries in case they're | ||
| 431 | * still undefined. | ||
| 432 | */ | ||
| 433 | for (i = 0; i < NR_GIC_CPU_IF; i++) | ||
| 434 | if (i != cpu) | ||
| 435 | gic_cpu_map[i] &= ~cpu_mask; | ||
| 436 | |||
| 437 | /* | ||
| 438 | * Deal with the banked PPI and SGI interrupts - disable all | ||
| 439 | * PPI interrupts, ensure all SGI interrupts are enabled. | ||
| 440 | */ | ||
| 441 | writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR); | ||
| 442 | writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET); | ||
| 443 | |||
| 444 | /* | ||
| 445 | * Set priority on PPI and SGI interrupts | ||
| 446 | */ | ||
| 447 | for (i = 0; i < 32; i += 4) | ||
| 448 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4); | ||
| 449 | |||
| 450 | writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); | ||
| 451 | writel_relaxed(1, base + GIC_CPU_CTRL); | ||
| 452 | } | ||
| 453 | |||
| 454 | #ifdef CONFIG_CPU_PM | ||
| 455 | /* | ||
| 456 | * Saves the GIC distributor registers during suspend or idle. Must be called | ||
| 457 | * with interrupts disabled but before powering down the GIC. After calling | ||
| 458 | * this function, no interrupts will be delivered by the GIC, and another | ||
| 459 | * platform-specific wakeup source must be enabled. | ||
| 460 | */ | ||
| 461 | static void gic_dist_save(unsigned int gic_nr) | ||
| 462 | { | ||
| 463 | unsigned int gic_irqs; | ||
| 464 | void __iomem *dist_base; | ||
| 465 | int i; | ||
| 466 | |||
| 467 | if (gic_nr >= MAX_GIC_NR) | ||
| 468 | BUG(); | ||
| 469 | |||
| 470 | gic_irqs = gic_data[gic_nr].gic_irqs; | ||
| 471 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); | ||
| 472 | |||
| 473 | if (!dist_base) | ||
| 474 | return; | ||
| 475 | |||
| 476 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) | ||
| 477 | gic_data[gic_nr].saved_spi_conf[i] = | ||
| 478 | readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); | ||
| 479 | |||
| 480 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | ||
| 481 | gic_data[gic_nr].saved_spi_target[i] = | ||
| 482 | readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); | ||
| 483 | |||
| 484 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | ||
| 485 | gic_data[gic_nr].saved_spi_enable[i] = | ||
| 486 | readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | ||
| 487 | } | ||
| 488 | |||
| 489 | /* | ||
| 490 | * Restores the GIC distributor registers during resume or when coming out of | ||
| 491 | * idle. Must be called before enabling interrupts. If a level interrupt | ||
| 492 | * that occured while the GIC was suspended is still present, it will be | ||
| 493 | * handled normally, but any edge interrupts that occured will not be seen by | ||
| 494 | * the GIC and need to be handled by the platform-specific wakeup source. | ||
| 495 | */ | ||
| 496 | static void gic_dist_restore(unsigned int gic_nr) | ||
| 497 | { | ||
| 498 | unsigned int gic_irqs; | ||
| 499 | unsigned int i; | ||
| 500 | void __iomem *dist_base; | ||
| 501 | |||
| 502 | if (gic_nr >= MAX_GIC_NR) | ||
| 503 | BUG(); | ||
| 504 | |||
| 505 | gic_irqs = gic_data[gic_nr].gic_irqs; | ||
| 506 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); | ||
| 507 | |||
| 508 | if (!dist_base) | ||
| 509 | return; | ||
| 510 | |||
| 511 | writel_relaxed(0, dist_base + GIC_DIST_CTRL); | ||
| 512 | |||
| 513 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) | ||
| 514 | writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], | ||
| 515 | dist_base + GIC_DIST_CONFIG + i * 4); | ||
| 516 | |||
| 517 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | ||
| 518 | writel_relaxed(0xa0a0a0a0, | ||
| 519 | dist_base + GIC_DIST_PRI + i * 4); | ||
| 520 | |||
| 521 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | ||
| 522 | writel_relaxed(gic_data[gic_nr].saved_spi_target[i], | ||
| 523 | dist_base + GIC_DIST_TARGET + i * 4); | ||
| 524 | |||
| 525 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | ||
| 526 | writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], | ||
| 527 | dist_base + GIC_DIST_ENABLE_SET + i * 4); | ||
| 528 | |||
| 529 | writel_relaxed(1, dist_base + GIC_DIST_CTRL); | ||
| 530 | } | ||
| 531 | |||
| 532 | static void gic_cpu_save(unsigned int gic_nr) | ||
| 533 | { | ||
| 534 | int i; | ||
| 535 | u32 *ptr; | ||
| 536 | void __iomem *dist_base; | ||
| 537 | void __iomem *cpu_base; | ||
| 538 | |||
| 539 | if (gic_nr >= MAX_GIC_NR) | ||
| 540 | BUG(); | ||
| 541 | |||
| 542 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); | ||
| 543 | cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); | ||
| 544 | |||
| 545 | if (!dist_base || !cpu_base) | ||
| 546 | return; | ||
| 547 | |||
| 548 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | ||
| 549 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | ||
| 550 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | ||
| 551 | |||
| 552 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | ||
| 553 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | ||
| 554 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); | ||
| 555 | |||
| 556 | } | ||
| 557 | |||
| 558 | static void gic_cpu_restore(unsigned int gic_nr) | ||
| 559 | { | ||
| 560 | int i; | ||
| 561 | u32 *ptr; | ||
| 562 | void __iomem *dist_base; | ||
| 563 | void __iomem *cpu_base; | ||
| 564 | |||
| 565 | if (gic_nr >= MAX_GIC_NR) | ||
| 566 | BUG(); | ||
| 567 | |||
| 568 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); | ||
| 569 | cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); | ||
| 570 | |||
| 571 | if (!dist_base || !cpu_base) | ||
| 572 | return; | ||
| 573 | |||
| 574 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | ||
| 575 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | ||
| 576 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); | ||
| 577 | |||
| 578 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | ||
| 579 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | ||
| 580 | writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); | ||
| 581 | |||
| 582 | for (i = 0; i < DIV_ROUND_UP(32, 4); i++) | ||
| 583 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4); | ||
| 584 | |||
| 585 | writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK); | ||
| 586 | writel_relaxed(1, cpu_base + GIC_CPU_CTRL); | ||
| 587 | } | ||
| 588 | |||
| 589 | static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) | ||
| 590 | { | ||
| 591 | int i; | ||
| 592 | |||
| 593 | for (i = 0; i < MAX_GIC_NR; i++) { | ||
| 594 | #ifdef CONFIG_GIC_NON_BANKED | ||
| 595 | /* Skip over unused GICs */ | ||
| 596 | if (!gic_data[i].get_base) | ||
| 597 | continue; | ||
| 598 | #endif | ||
| 599 | switch (cmd) { | ||
| 600 | case CPU_PM_ENTER: | ||
| 601 | gic_cpu_save(i); | ||
| 602 | break; | ||
| 603 | case CPU_PM_ENTER_FAILED: | ||
| 604 | case CPU_PM_EXIT: | ||
| 605 | gic_cpu_restore(i); | ||
| 606 | break; | ||
| 607 | case CPU_CLUSTER_PM_ENTER: | ||
| 608 | gic_dist_save(i); | ||
| 609 | break; | ||
| 610 | case CPU_CLUSTER_PM_ENTER_FAILED: | ||
| 611 | case CPU_CLUSTER_PM_EXIT: | ||
| 612 | gic_dist_restore(i); | ||
| 613 | break; | ||
| 614 | } | ||
| 615 | } | ||
| 616 | |||
| 617 | return NOTIFY_OK; | ||
| 618 | } | ||
| 619 | |||
| 620 | static struct notifier_block gic_notifier_block = { | ||
| 621 | .notifier_call = gic_notifier, | ||
| 622 | }; | ||
| 623 | |||
| 624 | static void __init gic_pm_init(struct gic_chip_data *gic) | ||
| 625 | { | ||
| 626 | gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, | ||
| 627 | sizeof(u32)); | ||
| 628 | BUG_ON(!gic->saved_ppi_enable); | ||
| 629 | |||
| 630 | gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, | ||
| 631 | sizeof(u32)); | ||
| 632 | BUG_ON(!gic->saved_ppi_conf); | ||
| 633 | |||
| 634 | if (gic == &gic_data[0]) | ||
| 635 | cpu_pm_register_notifier(&gic_notifier_block); | ||
| 636 | } | ||
| 637 | #else | ||
| 638 | static void __init gic_pm_init(struct gic_chip_data *gic) | ||
| 639 | { | ||
| 640 | } | ||
| 641 | #endif | ||
| 642 | |||
| 643 | #ifdef CONFIG_SMP | ||
| 644 | void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | ||
| 645 | { | ||
| 646 | int cpu; | ||
| 647 | unsigned long map = 0; | ||
| 648 | |||
| 649 | /* Convert our logical CPU mask into a physical one. */ | ||
| 650 | for_each_cpu(cpu, mask) | ||
| 651 | map |= 1 << cpu_logical_map(cpu); | ||
| 652 | |||
| 653 | /* | ||
| 654 | * Ensure that stores to Normal memory are visible to the | ||
| 655 | * other CPUs before issuing the IPI. | ||
| 656 | */ | ||
| 657 | dsb(); | ||
| 658 | |||
| 659 | /* this always happens on GIC0 */ | ||
| 660 | writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); | ||
| 661 | } | ||
| 662 | #endif | ||
| 663 | |||
| 664 | static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, | ||
| 665 | irq_hw_number_t hw) | ||
| 666 | { | ||
| 667 | if (hw < 32) { | ||
| 668 | irq_set_percpu_devid(irq); | ||
| 669 | irq_set_chip_and_handler(irq, &gic_chip, | ||
| 670 | handle_percpu_devid_irq); | ||
| 671 | set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); | ||
| 672 | } else { | ||
| 673 | irq_set_chip_and_handler(irq, &gic_chip, | ||
| 674 | handle_fasteoi_irq); | ||
| 675 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | ||
| 676 | } | ||
| 677 | irq_set_chip_data(irq, d->host_data); | ||
| 678 | return 0; | ||
| 679 | } | ||
| 680 | |||
| 681 | static int gic_irq_domain_xlate(struct irq_domain *d, | ||
| 682 | struct device_node *controller, | ||
| 683 | const u32 *intspec, unsigned int intsize, | ||
| 684 | unsigned long *out_hwirq, unsigned int *out_type) | ||
| 685 | { | ||
| 686 | if (d->of_node != controller) | ||
| 687 | return -EINVAL; | ||
| 688 | if (intsize < 3) | ||
| 689 | return -EINVAL; | ||
| 690 | |||
| 691 | /* Get the interrupt number and add 16 to skip over SGIs */ | ||
| 692 | *out_hwirq = intspec[1] + 16; | ||
| 693 | |||
| 694 | /* For SPIs, we need to add 16 more to get the GIC irq ID number */ | ||
| 695 | if (!intspec[0]) | ||
| 696 | *out_hwirq += 16; | ||
| 697 | |||
| 698 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; | ||
| 699 | return 0; | ||
| 700 | } | ||
| 701 | |||
| 702 | const struct irq_domain_ops gic_irq_domain_ops = { | ||
| 703 | .map = gic_irq_domain_map, | ||
| 704 | .xlate = gic_irq_domain_xlate, | ||
| 705 | }; | ||
| 706 | |||
| 707 | void __init gic_init_bases(unsigned int gic_nr, int irq_start, | ||
| 708 | void __iomem *dist_base, void __iomem *cpu_base, | ||
| 709 | u32 percpu_offset, struct device_node *node) | ||
| 710 | { | ||
| 711 | irq_hw_number_t hwirq_base; | ||
| 712 | struct gic_chip_data *gic; | ||
| 713 | int gic_irqs, irq_base, i; | ||
| 714 | |||
| 715 | BUG_ON(gic_nr >= MAX_GIC_NR); | ||
| 716 | |||
| 717 | gic = &gic_data[gic_nr]; | ||
| 718 | #ifdef CONFIG_GIC_NON_BANKED | ||
| 719 | if (percpu_offset) { /* Frankein-GIC without banked registers... */ | ||
| 720 | unsigned int cpu; | ||
| 721 | |||
| 722 | gic->dist_base.percpu_base = alloc_percpu(void __iomem *); | ||
| 723 | gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); | ||
| 724 | if (WARN_ON(!gic->dist_base.percpu_base || | ||
| 725 | !gic->cpu_base.percpu_base)) { | ||
| 726 | free_percpu(gic->dist_base.percpu_base); | ||
| 727 | free_percpu(gic->cpu_base.percpu_base); | ||
| 728 | return; | ||
| 729 | } | ||
| 730 | |||
| 731 | for_each_possible_cpu(cpu) { | ||
| 732 | unsigned long offset = percpu_offset * cpu_logical_map(cpu); | ||
| 733 | *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; | ||
| 734 | *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; | ||
| 735 | } | ||
| 736 | |||
| 737 | gic_set_base_accessor(gic, gic_get_percpu_base); | ||
| 738 | } else | ||
| 739 | #endif | ||
| 740 | { /* Normal, sane GIC... */ | ||
| 741 | WARN(percpu_offset, | ||
| 742 | "GIC_NON_BANKED not enabled, ignoring %08x offset!", | ||
| 743 | percpu_offset); | ||
| 744 | gic->dist_base.common_base = dist_base; | ||
| 745 | gic->cpu_base.common_base = cpu_base; | ||
| 746 | gic_set_base_accessor(gic, gic_get_common_base); | ||
| 747 | } | ||
| 748 | |||
| 749 | /* | ||
| 750 | * Initialize the CPU interface map to all CPUs. | ||
| 751 | * It will be refined as each CPU probes its ID. | ||
| 752 | */ | ||
| 753 | for (i = 0; i < NR_GIC_CPU_IF; i++) | ||
| 754 | gic_cpu_map[i] = 0xff; | ||
| 755 | |||
| 756 | /* | ||
| 757 | * For primary GICs, skip over SGIs. | ||
| 758 | * For secondary GICs, skip over PPIs, too. | ||
| 759 | */ | ||
| 760 | if (gic_nr == 0 && (irq_start & 31) > 0) { | ||
| 761 | hwirq_base = 16; | ||
| 762 | if (irq_start != -1) | ||
| 763 | irq_start = (irq_start & ~31) + 16; | ||
| 764 | } else { | ||
| 765 | hwirq_base = 32; | ||
| 766 | } | ||
| 767 | |||
| 768 | /* | ||
| 769 | * Find out how many interrupts are supported. | ||
| 770 | * The GIC only supports up to 1020 interrupt sources. | ||
| 771 | */ | ||
| 772 | gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f; | ||
| 773 | gic_irqs = (gic_irqs + 1) * 32; | ||
| 774 | if (gic_irqs > 1020) | ||
| 775 | gic_irqs = 1020; | ||
| 776 | gic->gic_irqs = gic_irqs; | ||
| 777 | |||
| 778 | gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ | ||
| 779 | irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id()); | ||
| 780 | if (IS_ERR_VALUE(irq_base)) { | ||
| 781 | WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", | ||
| 782 | irq_start); | ||
| 783 | irq_base = irq_start; | ||
| 784 | } | ||
| 785 | gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, | ||
| 786 | hwirq_base, &gic_irq_domain_ops, gic); | ||
| 787 | if (WARN_ON(!gic->domain)) | ||
| 788 | return; | ||
| 789 | |||
| 790 | #ifdef CONFIG_SMP | ||
| 791 | set_smp_cross_call(gic_raise_softirq); | ||
| 792 | #endif | ||
| 793 | |||
| 794 | set_handle_irq(gic_handle_irq); | ||
| 795 | |||
| 796 | gic_chip.flags |= gic_arch_extn.flags; | ||
| 797 | gic_dist_init(gic); | ||
| 798 | gic_cpu_init(gic); | ||
| 799 | gic_pm_init(gic); | ||
| 800 | } | ||
| 801 | |||
| 802 | void __cpuinit gic_secondary_init(unsigned int gic_nr) | ||
| 803 | { | ||
| 804 | BUG_ON(gic_nr >= MAX_GIC_NR); | ||
| 805 | |||
| 806 | gic_cpu_init(&gic_data[gic_nr]); | ||
| 807 | } | ||
| 808 | |||
| 809 | #ifdef CONFIG_OF | ||
| 810 | static int gic_cnt __initdata = 0; | ||
| 811 | |||
| 812 | int __init gic_of_init(struct device_node *node, struct device_node *parent) | ||
| 813 | { | ||
| 814 | void __iomem *cpu_base; | ||
| 815 | void __iomem *dist_base; | ||
| 816 | u32 percpu_offset; | ||
| 817 | int irq; | ||
| 818 | |||
| 819 | if (WARN_ON(!node)) | ||
| 820 | return -ENODEV; | ||
| 821 | |||
| 822 | dist_base = of_iomap(node, 0); | ||
| 823 | WARN(!dist_base, "unable to map gic dist registers\n"); | ||
| 824 | |||
| 825 | cpu_base = of_iomap(node, 1); | ||
| 826 | WARN(!cpu_base, "unable to map gic cpu registers\n"); | ||
| 827 | |||
| 828 | if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) | ||
| 829 | percpu_offset = 0; | ||
| 830 | |||
| 831 | gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node); | ||
| 832 | |||
| 833 | if (parent) { | ||
| 834 | irq = irq_of_parse_and_map(node, 0); | ||
| 835 | gic_cascade_irq(gic_cnt, irq); | ||
| 836 | } | ||
| 837 | gic_cnt++; | ||
| 838 | return 0; | ||
| 839 | } | ||
| 840 | IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); | ||
| 841 | IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); | ||
| 842 | IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); | ||
| 843 | IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); | ||
| 844 | |||
| 845 | #endif | ||
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c new file mode 100644 index 000000000000..3cf97aaebe40 --- /dev/null +++ b/drivers/irqchip/irq-vic.c | |||
| @@ -0,0 +1,489 @@ | |||
| 1 | /* | ||
| 2 | * linux/arch/arm/common/vic.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 1999 - 2003 ARM Limited | ||
| 5 | * Copyright (C) 2000 Deep Blue Solutions Ltd | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 20 | */ | ||
| 21 | |||
| 22 | #include <linux/export.h> | ||
| 23 | #include <linux/init.h> | ||
| 24 | #include <linux/list.h> | ||
| 25 | #include <linux/io.h> | ||
| 26 | #include <linux/irqdomain.h> | ||
| 27 | #include <linux/of.h> | ||
| 28 | #include <linux/of_address.h> | ||
| 29 | #include <linux/of_irq.h> | ||
| 30 | #include <linux/syscore_ops.h> | ||
| 31 | #include <linux/device.h> | ||
| 32 | #include <linux/amba/bus.h> | ||
| 33 | #include <linux/irqchip/arm-vic.h> | ||
| 34 | |||
| 35 | #include <asm/exception.h> | ||
| 36 | #include <asm/mach/irq.h> | ||
| 37 | |||
| 38 | #include "irqchip.h" | ||
| 39 | |||
| 40 | #define VIC_IRQ_STATUS 0x00 | ||
| 41 | #define VIC_FIQ_STATUS 0x04 | ||
| 42 | #define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */ | ||
| 43 | #define VIC_INT_SOFT 0x18 | ||
| 44 | #define VIC_INT_SOFT_CLEAR 0x1c | ||
| 45 | #define VIC_PROTECT 0x20 | ||
| 46 | #define VIC_PL190_VECT_ADDR 0x30 /* PL190 only */ | ||
| 47 | #define VIC_PL190_DEF_VECT_ADDR 0x34 /* PL190 only */ | ||
| 48 | |||
| 49 | #define VIC_VECT_ADDR0 0x100 /* 0 to 15 (0..31 PL192) */ | ||
| 50 | #define VIC_VECT_CNTL0 0x200 /* 0 to 15 (0..31 PL192) */ | ||
| 51 | #define VIC_ITCR 0x300 /* VIC test control register */ | ||
| 52 | |||
| 53 | #define VIC_VECT_CNTL_ENABLE (1 << 5) | ||
| 54 | |||
| 55 | #define VIC_PL192_VECT_ADDR 0xF00 | ||
| 56 | |||
| 57 | /** | ||
| 58 | * struct vic_device - VIC PM device | ||
| 59 | * @irq: The IRQ number for the base of the VIC. | ||
| 60 | * @base: The register base for the VIC. | ||
| 61 | * @valid_sources: A bitmask of valid interrupts | ||
| 62 | * @resume_sources: A bitmask of interrupts for resume. | ||
| 63 | * @resume_irqs: The IRQs enabled for resume. | ||
| 64 | * @int_select: Save for VIC_INT_SELECT. | ||
| 65 | * @int_enable: Save for VIC_INT_ENABLE. | ||
| 66 | * @soft_int: Save for VIC_INT_SOFT. | ||
| 67 | * @protect: Save for VIC_PROTECT. | ||
| 68 | * @domain: The IRQ domain for the VIC. | ||
| 69 | */ | ||
| 70 | struct vic_device { | ||
| 71 | void __iomem *base; | ||
| 72 | int irq; | ||
| 73 | u32 valid_sources; | ||
| 74 | u32 resume_sources; | ||
| 75 | u32 resume_irqs; | ||
| 76 | u32 int_select; | ||
| 77 | u32 int_enable; | ||
| 78 | u32 soft_int; | ||
| 79 | u32 protect; | ||
| 80 | struct irq_domain *domain; | ||
| 81 | }; | ||
| 82 | |||
| 83 | /* we cannot allocate memory when VICs are initially registered */ | ||
| 84 | static struct vic_device vic_devices[CONFIG_ARM_VIC_NR]; | ||
| 85 | |||
| 86 | static int vic_id; | ||
| 87 | |||
| 88 | static void vic_handle_irq(struct pt_regs *regs); | ||
| 89 | |||
| 90 | /** | ||
| 91 | * vic_init2 - common initialisation code | ||
| 92 | * @base: Base of the VIC. | ||
| 93 | * | ||
| 94 | * Common initialisation code for registration | ||
| 95 | * and resume. | ||
| 96 | */ | ||
| 97 | static void vic_init2(void __iomem *base) | ||
| 98 | { | ||
| 99 | int i; | ||
| 100 | |||
| 101 | for (i = 0; i < 16; i++) { | ||
| 102 | void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4); | ||
| 103 | writel(VIC_VECT_CNTL_ENABLE | i, reg); | ||
| 104 | } | ||
| 105 | |||
| 106 | writel(32, base + VIC_PL190_DEF_VECT_ADDR); | ||
| 107 | } | ||
| 108 | |||
| 109 | #ifdef CONFIG_PM | ||
| 110 | static void resume_one_vic(struct vic_device *vic) | ||
| 111 | { | ||
| 112 | void __iomem *base = vic->base; | ||
| 113 | |||
| 114 | printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base); | ||
| 115 | |||
| 116 | /* re-initialise static settings */ | ||
| 117 | vic_init2(base); | ||
| 118 | |||
| 119 | writel(vic->int_select, base + VIC_INT_SELECT); | ||
| 120 | writel(vic->protect, base + VIC_PROTECT); | ||
| 121 | |||
| 122 | /* set the enabled ints and then clear the non-enabled */ | ||
| 123 | writel(vic->int_enable, base + VIC_INT_ENABLE); | ||
| 124 | writel(~vic->int_enable, base + VIC_INT_ENABLE_CLEAR); | ||
| 125 | |||
| 126 | /* and the same for the soft-int register */ | ||
| 127 | |||
| 128 | writel(vic->soft_int, base + VIC_INT_SOFT); | ||
| 129 | writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR); | ||
| 130 | } | ||
| 131 | |||
| 132 | static void vic_resume(void) | ||
| 133 | { | ||
| 134 | int id; | ||
| 135 | |||
| 136 | for (id = vic_id - 1; id >= 0; id--) | ||
| 137 | resume_one_vic(vic_devices + id); | ||
| 138 | } | ||
| 139 | |||
| 140 | static void suspend_one_vic(struct vic_device *vic) | ||
| 141 | { | ||
| 142 | void __iomem *base = vic->base; | ||
| 143 | |||
| 144 | printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base); | ||
| 145 | |||
| 146 | vic->int_select = readl(base + VIC_INT_SELECT); | ||
| 147 | vic->int_enable = readl(base + VIC_INT_ENABLE); | ||
| 148 | vic->soft_int = readl(base + VIC_INT_SOFT); | ||
| 149 | vic->protect = readl(base + VIC_PROTECT); | ||
| 150 | |||
| 151 | /* set the interrupts (if any) that are used for | ||
| 152 | * resuming the system */ | ||
| 153 | |||
| 154 | writel(vic->resume_irqs, base + VIC_INT_ENABLE); | ||
| 155 | writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR); | ||
| 156 | } | ||
| 157 | |||
| 158 | static int vic_suspend(void) | ||
| 159 | { | ||
| 160 | int id; | ||
| 161 | |||
| 162 | for (id = 0; id < vic_id; id++) | ||
| 163 | suspend_one_vic(vic_devices + id); | ||
| 164 | |||
| 165 | return 0; | ||
| 166 | } | ||
| 167 | |||
| 168 | struct syscore_ops vic_syscore_ops = { | ||
| 169 | .suspend = vic_suspend, | ||
| 170 | .resume = vic_resume, | ||
| 171 | }; | ||
| 172 | |||
| 173 | /** | ||
| 174 | * vic_pm_init - initicall to register VIC pm | ||
| 175 | * | ||
| 176 | * This is called via late_initcall() to register | ||
| 177 | * the resources for the VICs due to the early | ||
| 178 | * nature of the VIC's registration. | ||
| 179 | */ | ||
| 180 | static int __init vic_pm_init(void) | ||
| 181 | { | ||
| 182 | if (vic_id > 0) | ||
| 183 | register_syscore_ops(&vic_syscore_ops); | ||
| 184 | |||
| 185 | return 0; | ||
| 186 | } | ||
| 187 | late_initcall(vic_pm_init); | ||
| 188 | #endif /* CONFIG_PM */ | ||
| 189 | |||
| 190 | static struct irq_chip vic_chip; | ||
| 191 | |||
| 192 | static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq, | ||
| 193 | irq_hw_number_t hwirq) | ||
| 194 | { | ||
| 195 | struct vic_device *v = d->host_data; | ||
| 196 | |||
| 197 | /* Skip invalid IRQs, only register handlers for the real ones */ | ||
| 198 | if (!(v->valid_sources & (1 << hwirq))) | ||
| 199 | return -ENOTSUPP; | ||
| 200 | irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq); | ||
| 201 | irq_set_chip_data(irq, v->base); | ||
| 202 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | ||
| 203 | return 0; | ||
| 204 | } | ||
| 205 | |||
| 206 | /* | ||
| 207 | * Handle each interrupt in a single VIC. Returns non-zero if we've | ||
| 208 | * handled at least one interrupt. This reads the status register | ||
| 209 | * before handling each interrupt, which is necessary given that | ||
| 210 | * handle_IRQ may briefly re-enable interrupts for soft IRQ handling. | ||
| 211 | */ | ||
| 212 | static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs) | ||
| 213 | { | ||
| 214 | u32 stat, irq; | ||
| 215 | int handled = 0; | ||
| 216 | |||
| 217 | while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) { | ||
| 218 | irq = ffs(stat) - 1; | ||
| 219 | handle_IRQ(irq_find_mapping(vic->domain, irq), regs); | ||
| 220 | handled = 1; | ||
| 221 | } | ||
| 222 | |||
| 223 | return handled; | ||
| 224 | } | ||
| 225 | |||
| 226 | /* | ||
| 227 | * Keep iterating over all registered VIC's until there are no pending | ||
| 228 | * interrupts. | ||
| 229 | */ | ||
| 230 | static asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs) | ||
| 231 | { | ||
| 232 | int i, handled; | ||
| 233 | |||
| 234 | do { | ||
| 235 | for (i = 0, handled = 0; i < vic_id; ++i) | ||
| 236 | handled |= handle_one_vic(&vic_devices[i], regs); | ||
| 237 | } while (handled); | ||
| 238 | } | ||
| 239 | |||
| 240 | static struct irq_domain_ops vic_irqdomain_ops = { | ||
| 241 | .map = vic_irqdomain_map, | ||
| 242 | .xlate = irq_domain_xlate_onetwocell, | ||
| 243 | }; | ||
| 244 | |||
| 245 | /** | ||
| 246 | * vic_register() - Register a VIC. | ||
| 247 | * @base: The base address of the VIC. | ||
| 248 | * @irq: The base IRQ for the VIC. | ||
| 249 | * @valid_sources: bitmask of valid interrupts | ||
| 250 | * @resume_sources: bitmask of interrupts allowed for resume sources. | ||
| 251 | * @node: The device tree node associated with the VIC. | ||
| 252 | * | ||
| 253 | * Register the VIC with the system device tree so that it can be notified | ||
| 254 | * of suspend and resume requests and ensure that the correct actions are | ||
| 255 | * taken to re-instate the settings on resume. | ||
| 256 | * | ||
| 257 | * This also configures the IRQ domain for the VIC. | ||
| 258 | */ | ||
| 259 | static void __init vic_register(void __iomem *base, unsigned int irq, | ||
| 260 | u32 valid_sources, u32 resume_sources, | ||
| 261 | struct device_node *node) | ||
| 262 | { | ||
| 263 | struct vic_device *v; | ||
| 264 | int i; | ||
| 265 | |||
| 266 | if (vic_id >= ARRAY_SIZE(vic_devices)) { | ||
| 267 | printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__); | ||
| 268 | return; | ||
| 269 | } | ||
| 270 | |||
| 271 | v = &vic_devices[vic_id]; | ||
| 272 | v->base = base; | ||
| 273 | v->valid_sources = valid_sources; | ||
| 274 | v->resume_sources = resume_sources; | ||
| 275 | v->irq = irq; | ||
| 276 | set_handle_irq(vic_handle_irq); | ||
| 277 | vic_id++; | ||
| 278 | v->domain = irq_domain_add_simple(node, fls(valid_sources), irq, | ||
| 279 | &vic_irqdomain_ops, v); | ||
| 280 | /* create an IRQ mapping for each valid IRQ */ | ||
| 281 | for (i = 0; i < fls(valid_sources); i++) | ||
| 282 | if (valid_sources & (1 << i)) | ||
| 283 | irq_create_mapping(v->domain, i); | ||
| 284 | } | ||
| 285 | |||
| 286 | static void vic_ack_irq(struct irq_data *d) | ||
| 287 | { | ||
| 288 | void __iomem *base = irq_data_get_irq_chip_data(d); | ||
| 289 | unsigned int irq = d->hwirq; | ||
| 290 | writel(1 << irq, base + VIC_INT_ENABLE_CLEAR); | ||
| 291 | /* moreover, clear the soft-triggered, in case it was the reason */ | ||
| 292 | writel(1 << irq, base + VIC_INT_SOFT_CLEAR); | ||
| 293 | } | ||
| 294 | |||
| 295 | static void vic_mask_irq(struct irq_data *d) | ||
| 296 | { | ||
| 297 | void __iomem *base = irq_data_get_irq_chip_data(d); | ||
| 298 | unsigned int irq = d->hwirq; | ||
| 299 | writel(1 << irq, base + VIC_INT_ENABLE_CLEAR); | ||
| 300 | } | ||
| 301 | |||
| 302 | static void vic_unmask_irq(struct irq_data *d) | ||
| 303 | { | ||
| 304 | void __iomem *base = irq_data_get_irq_chip_data(d); | ||
| 305 | unsigned int irq = d->hwirq; | ||
| 306 | writel(1 << irq, base + VIC_INT_ENABLE); | ||
| 307 | } | ||
| 308 | |||
| 309 | #if defined(CONFIG_PM) | ||
| 310 | static struct vic_device *vic_from_irq(unsigned int irq) | ||
| 311 | { | ||
| 312 | struct vic_device *v = vic_devices; | ||
| 313 | unsigned int base_irq = irq & ~31; | ||
| 314 | int id; | ||
| 315 | |||
| 316 | for (id = 0; id < vic_id; id++, v++) { | ||
| 317 | if (v->irq == base_irq) | ||
| 318 | return v; | ||
| 319 | } | ||
| 320 | |||
| 321 | return NULL; | ||
| 322 | } | ||
| 323 | |||
| 324 | static int vic_set_wake(struct irq_data *d, unsigned int on) | ||
| 325 | { | ||
| 326 | struct vic_device *v = vic_from_irq(d->irq); | ||
| 327 | unsigned int off = d->hwirq; | ||
| 328 | u32 bit = 1 << off; | ||
| 329 | |||
| 330 | if (!v) | ||
| 331 | return -EINVAL; | ||
| 332 | |||
| 333 | if (!(bit & v->resume_sources)) | ||
| 334 | return -EINVAL; | ||
| 335 | |||
| 336 | if (on) | ||
| 337 | v->resume_irqs |= bit; | ||
| 338 | else | ||
| 339 | v->resume_irqs &= ~bit; | ||
| 340 | |||
| 341 | return 0; | ||
| 342 | } | ||
| 343 | #else | ||
| 344 | #define vic_set_wake NULL | ||
| 345 | #endif /* CONFIG_PM */ | ||
| 346 | |||
| 347 | static struct irq_chip vic_chip = { | ||
| 348 | .name = "VIC", | ||
| 349 | .irq_ack = vic_ack_irq, | ||
| 350 | .irq_mask = vic_mask_irq, | ||
| 351 | .irq_unmask = vic_unmask_irq, | ||
| 352 | .irq_set_wake = vic_set_wake, | ||
| 353 | }; | ||
| 354 | |||
| 355 | static void __init vic_disable(void __iomem *base) | ||
| 356 | { | ||
| 357 | writel(0, base + VIC_INT_SELECT); | ||
| 358 | writel(0, base + VIC_INT_ENABLE); | ||
| 359 | writel(~0, base + VIC_INT_ENABLE_CLEAR); | ||
| 360 | writel(0, base + VIC_ITCR); | ||
| 361 | writel(~0, base + VIC_INT_SOFT_CLEAR); | ||
| 362 | } | ||
| 363 | |||
| 364 | static void __init vic_clear_interrupts(void __iomem *base) | ||
| 365 | { | ||
| 366 | unsigned int i; | ||
| 367 | |||
| 368 | writel(0, base + VIC_PL190_VECT_ADDR); | ||
| 369 | for (i = 0; i < 19; i++) { | ||
| 370 | unsigned int value; | ||
| 371 | |||
| 372 | value = readl(base + VIC_PL190_VECT_ADDR); | ||
| 373 | writel(value, base + VIC_PL190_VECT_ADDR); | ||
| 374 | } | ||
| 375 | } | ||
| 376 | |||
| 377 | /* | ||
| 378 | * The PL190 cell from ARM has been modified by ST to handle 64 interrupts. | ||
| 379 | * The original cell has 32 interrupts, while the modified one has 64, | ||
| 380 | * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case | ||
| 381 | * the probe function is called twice, with base set to offset 000 | ||
| 382 | * and 020 within the page. We call this "second block". | ||
| 383 | */ | ||
| 384 | static void __init vic_init_st(void __iomem *base, unsigned int irq_start, | ||
| 385 | u32 vic_sources, struct device_node *node) | ||
| 386 | { | ||
| 387 | unsigned int i; | ||
| 388 | int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0; | ||
| 389 | |||
| 390 | /* Disable all interrupts initially. */ | ||
| 391 | vic_disable(base); | ||
| 392 | |||
| 393 | /* | ||
| 394 | * Make sure we clear all existing interrupts. The vector registers | ||
| 395 | * in this cell are after the second block of general registers, | ||
| 396 | * so we can address them using standard offsets, but only from | ||
| 397 | * the second base address, which is 0x20 in the page | ||
| 398 | */ | ||
| 399 | if (vic_2nd_block) { | ||
| 400 | vic_clear_interrupts(base); | ||
| 401 | |||
| 402 | /* ST has 16 vectors as well, but we don't enable them by now */ | ||
| 403 | for (i = 0; i < 16; i++) { | ||
| 404 | void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4); | ||
| 405 | writel(0, reg); | ||
| 406 | } | ||
| 407 | |||
| 408 | writel(32, base + VIC_PL190_DEF_VECT_ADDR); | ||
| 409 | } | ||
| 410 | |||
| 411 | vic_register(base, irq_start, vic_sources, 0, node); | ||
| 412 | } | ||
| 413 | |||
| 414 | void __init __vic_init(void __iomem *base, int irq_start, | ||
| 415 | u32 vic_sources, u32 resume_sources, | ||
| 416 | struct device_node *node) | ||
| 417 | { | ||
| 418 | unsigned int i; | ||
| 419 | u32 cellid = 0; | ||
| 420 | enum amba_vendor vendor; | ||
| 421 | |||
| 422 | /* Identify which VIC cell this one is, by reading the ID */ | ||
| 423 | for (i = 0; i < 4; i++) { | ||
| 424 | void __iomem *addr; | ||
| 425 | addr = (void __iomem *)((u32)base & PAGE_MASK) + 0xfe0 + (i * 4); | ||
| 426 | cellid |= (readl(addr) & 0xff) << (8 * i); | ||
| 427 | } | ||
| 428 | vendor = (cellid >> 12) & 0xff; | ||
| 429 | printk(KERN_INFO "VIC @%p: id 0x%08x, vendor 0x%02x\n", | ||
| 430 | base, cellid, vendor); | ||
| 431 | |||
| 432 | switch(vendor) { | ||
| 433 | case AMBA_VENDOR_ST: | ||
| 434 | vic_init_st(base, irq_start, vic_sources, node); | ||
| 435 | return; | ||
| 436 | default: | ||
| 437 | printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n"); | ||
| 438 | /* fall through */ | ||
| 439 | case AMBA_VENDOR_ARM: | ||
| 440 | break; | ||
| 441 | } | ||
| 442 | |||
| 443 | /* Disable all interrupts initially. */ | ||
| 444 | vic_disable(base); | ||
| 445 | |||
| 446 | /* Make sure we clear all existing interrupts */ | ||
| 447 | vic_clear_interrupts(base); | ||
| 448 | |||
| 449 | vic_init2(base); | ||
| 450 | |||
| 451 | vic_register(base, irq_start, vic_sources, resume_sources, node); | ||
| 452 | } | ||
| 453 | |||
| 454 | /** | ||
| 455 | * vic_init() - initialise a vectored interrupt controller | ||
| 456 | * @base: iomem base address | ||
| 457 | * @irq_start: starting interrupt number, must be muliple of 32 | ||
| 458 | * @vic_sources: bitmask of interrupt sources to allow | ||
| 459 | * @resume_sources: bitmask of interrupt sources to allow for resume | ||
| 460 | */ | ||
| 461 | void __init vic_init(void __iomem *base, unsigned int irq_start, | ||
| 462 | u32 vic_sources, u32 resume_sources) | ||
| 463 | { | ||
| 464 | __vic_init(base, irq_start, vic_sources, resume_sources, NULL); | ||
| 465 | } | ||
| 466 | |||
| 467 | #ifdef CONFIG_OF | ||
| 468 | int __init vic_of_init(struct device_node *node, struct device_node *parent) | ||
| 469 | { | ||
| 470 | void __iomem *regs; | ||
| 471 | |||
| 472 | if (WARN(parent, "non-root VICs are not supported")) | ||
| 473 | return -EINVAL; | ||
| 474 | |||
| 475 | regs = of_iomap(node, 0); | ||
| 476 | if (WARN_ON(!regs)) | ||
| 477 | return -EIO; | ||
| 478 | |||
| 479 | /* | ||
| 480 | * Passing 0 as first IRQ makes the simple domain allocate descriptors | ||
| 481 | */ | ||
| 482 | __vic_init(regs, 0, ~0, ~0, node); | ||
| 483 | |||
| 484 | return 0; | ||
| 485 | } | ||
| 486 | IRQCHIP_DECLARE(arm_pl190_vic, "arm,pl190-vic", vic_of_init); | ||
| 487 | IRQCHIP_DECLARE(arm_pl192_vic, "arm,pl192-vic", vic_of_init); | ||
| 488 | IRQCHIP_DECLARE(arm_versatile_vic, "arm,versatile-vic", vic_of_init); | ||
| 489 | #endif /* CONFIG OF */ | ||
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c new file mode 100644 index 000000000000..f496afce29de --- /dev/null +++ b/drivers/irqchip/irqchip.c | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2012 Thomas Petazzoni | ||
| 3 | * | ||
| 4 | * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | ||
| 5 | * | ||
| 6 | * This file is licensed under the terms of the GNU General Public | ||
| 7 | * License version 2. This program is licensed "as is" without any | ||
| 8 | * warranty of any kind, whether express or implied. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/init.h> | ||
| 12 | #include <linux/of_irq.h> | ||
| 13 | |||
| 14 | #include "irqchip.h" | ||
| 15 | |||
| 16 | /* | ||
| 17 | * This special of_device_id is the sentinel at the end of the | ||
| 18 | * of_device_id[] array of all irqchips. It is automatically placed at | ||
| 19 | * the end of the array by the linker, thanks to being part of a | ||
| 20 | * special section. | ||
| 21 | */ | ||
| 22 | static const struct of_device_id | ||
| 23 | irqchip_of_match_end __used __section(__irqchip_of_end); | ||
| 24 | |||
| 25 | extern struct of_device_id __irqchip_begin[]; | ||
| 26 | |||
| 27 | void __init irqchip_init(void) | ||
| 28 | { | ||
| 29 | of_irq_init(__irqchip_begin); | ||
| 30 | } | ||
diff --git a/drivers/irqchip/irqchip.h b/drivers/irqchip/irqchip.h new file mode 100644 index 000000000000..e445ba2d6add --- /dev/null +++ b/drivers/irqchip/irqchip.h | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2012 Thomas Petazzoni | ||
| 3 | * | ||
| 4 | * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | ||
| 5 | * | ||
| 6 | * This file is licensed under the terms of the GNU General Public | ||
| 7 | * License version 2. This program is licensed "as is" without any | ||
| 8 | * warranty of any kind, whether express or implied. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef _IRQCHIP_H | ||
| 12 | #define _IRQCHIP_H | ||
| 13 | |||
| 14 | /* | ||
| 15 | * This macro must be used by the different irqchip drivers to declare | ||
| 16 | * the association between their DT compatible string and their | ||
| 17 | * initialization function. | ||
| 18 | * | ||
| 19 | * @name: name that must be unique accross all IRQCHIP_DECLARE of the | ||
| 20 | * same file. | ||
| 21 | * @compstr: compatible string of the irqchip driver | ||
| 22 | * @fn: initialization function | ||
| 23 | */ | ||
| 24 | #define IRQCHIP_DECLARE(name,compstr,fn) \ | ||
| 25 | static const struct of_device_id irqchip_of_match_##name \ | ||
| 26 | __used __section(__irqchip_of_table) \ | ||
| 27 | = { .compatible = compstr, .data = fn } | ||
| 28 | |||
| 29 | #endif | ||
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index 80e1d2fd9d4c..8527743b5cef 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c | |||
| @@ -25,6 +25,8 @@ | |||
| 25 | #include <linux/of_irq.h> | 25 | #include <linux/of_irq.h> |
| 26 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
| 27 | 27 | ||
| 28 | #include "irqchip.h" | ||
| 29 | |||
| 28 | static DEFINE_SPINLOCK(lock); | 30 | static DEFINE_SPINLOCK(lock); |
| 29 | 31 | ||
| 30 | /* spear300 shared irq registers offsets and masks */ | 32 | /* spear300 shared irq registers offsets and masks */ |
| @@ -300,6 +302,7 @@ int __init spear300_shirq_of_init(struct device_node *np, | |||
| 300 | return shirq_init(spear300_shirq_blocks, | 302 | return shirq_init(spear300_shirq_blocks, |
| 301 | ARRAY_SIZE(spear300_shirq_blocks), np); | 303 | ARRAY_SIZE(spear300_shirq_blocks), np); |
| 302 | } | 304 | } |
| 305 | IRQCHIP_DECLARE(spear300_shirq, "st,spear300-shirq", spear300_shirq_of_init); | ||
| 303 | 306 | ||
| 304 | int __init spear310_shirq_of_init(struct device_node *np, | 307 | int __init spear310_shirq_of_init(struct device_node *np, |
| 305 | struct device_node *parent) | 308 | struct device_node *parent) |
| @@ -307,6 +310,7 @@ int __init spear310_shirq_of_init(struct device_node *np, | |||
| 307 | return shirq_init(spear310_shirq_blocks, | 310 | return shirq_init(spear310_shirq_blocks, |
| 308 | ARRAY_SIZE(spear310_shirq_blocks), np); | 311 | ARRAY_SIZE(spear310_shirq_blocks), np); |
| 309 | } | 312 | } |
| 313 | IRQCHIP_DECLARE(spear310_shirq, "st,spear310-shirq", spear310_shirq_of_init); | ||
| 310 | 314 | ||
| 311 | int __init spear320_shirq_of_init(struct device_node *np, | 315 | int __init spear320_shirq_of_init(struct device_node *np, |
| 312 | struct device_node *parent) | 316 | struct device_node *parent) |
| @@ -314,3 +318,4 @@ int __init spear320_shirq_of_init(struct device_node *np, | |||
| 314 | return shirq_init(spear320_shirq_blocks, | 318 | return shirq_init(spear320_shirq_blocks, |
| 315 | ARRAY_SIZE(spear320_shirq_blocks), np); | 319 | ARRAY_SIZE(spear320_shirq_blocks), np); |
| 316 | } | 320 | } |
| 321 | IRQCHIP_DECLARE(spear320_shirq, "st,spear320-shirq", spear320_shirq_of_init); | ||
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index 268f45d42394..1192518e1aca 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c | |||
| @@ -26,22 +26,18 @@ | |||
| 26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
| 27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
| 28 | #include <linux/uaccess.h> | 28 | #include <linux/uaccess.h> |
| 29 | #include <linux/irqchip/arm-gic.h> | ||
| 29 | #include <linux/mfd/core.h> | 30 | #include <linux/mfd/core.h> |
| 30 | #include <linux/mfd/dbx500-prcmu.h> | 31 | #include <linux/mfd/dbx500-prcmu.h> |
| 31 | #include <linux/mfd/abx500/ab8500.h> | 32 | #include <linux/mfd/abx500/ab8500.h> |
| 32 | #include <linux/regulator/db8500-prcmu.h> | 33 | #include <linux/regulator/db8500-prcmu.h> |
| 33 | #include <linux/regulator/machine.h> | 34 | #include <linux/regulator/machine.h> |
| 34 | #include <linux/cpufreq.h> | 35 | #include <linux/cpufreq.h> |
| 35 | #include <asm/hardware/gic.h> | ||
| 36 | #include <mach/hardware.h> | 36 | #include <mach/hardware.h> |
| 37 | #include <mach/irqs.h> | 37 | #include <mach/irqs.h> |
| 38 | #include <mach/db8500-regs.h> | 38 | #include <mach/db8500-regs.h> |
| 39 | #include <mach/id.h> | ||
| 40 | #include "dbx500-prcmu-regs.h" | 39 | #include "dbx500-prcmu-regs.h" |
| 41 | 40 | ||
| 42 | /* Offset for the firmware version within the TCPM */ | ||
| 43 | #define PRCMU_FW_VERSION_OFFSET 0xA4 | ||
| 44 | |||
| 45 | /* Index of different voltages to be used when accessing AVSData */ | 41 | /* Index of different voltages to be used when accessing AVSData */ |
| 46 | #define PRCM_AVS_BASE 0x2FC | 42 | #define PRCM_AVS_BASE 0x2FC |
| 47 | #define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0) | 43 | #define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0) |
| @@ -216,10 +212,8 @@ | |||
| 216 | #define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1) | 212 | #define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1) |
| 217 | #define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2) | 213 | #define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2) |
| 218 | #define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3) | 214 | #define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3) |
| 219 | #define PRCMU_I2C_WRITE(slave) \ | 215 | #define PRCMU_I2C_WRITE(slave) (((slave) << 1) | BIT(6)) |
| 220 | (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0)) | 216 | #define PRCMU_I2C_READ(slave) (((slave) << 1) | BIT(0) | BIT(6)) |
| 221 | #define PRCMU_I2C_READ(slave) \ | ||
| 222 | (((slave) << 1) | BIT(0) | (cpu_is_u8500v2() ? BIT(6) : 0)) | ||
| 223 | #define PRCMU_I2C_STOP_EN BIT(3) | 217 | #define PRCMU_I2C_STOP_EN BIT(3) |
| 224 | 218 | ||
| 225 | /* Mailbox 5 ACKs */ | 219 | /* Mailbox 5 ACKs */ |
| @@ -1049,12 +1043,13 @@ int db8500_prcmu_get_ddr_opp(void) | |||
| 1049 | * | 1043 | * |
| 1050 | * This function sets the operating point of the DDR. | 1044 | * This function sets the operating point of the DDR. |
| 1051 | */ | 1045 | */ |
| 1046 | static bool enable_set_ddr_opp; | ||
| 1052 | int db8500_prcmu_set_ddr_opp(u8 opp) | 1047 | int db8500_prcmu_set_ddr_opp(u8 opp) |
| 1053 | { | 1048 | { |
| 1054 | if (opp < DDR_100_OPP || opp > DDR_25_OPP) | 1049 | if (opp < DDR_100_OPP || opp > DDR_25_OPP) |
| 1055 | return -EINVAL; | 1050 | return -EINVAL; |
| 1056 | /* Changing the DDR OPP can hang the hardware pre-v21 */ | 1051 | /* Changing the DDR OPP can hang the hardware pre-v21 */ |
| 1057 | if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20()) | 1052 | if (enable_set_ddr_opp) |
| 1058 | writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW); | 1053 | writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW); |
| 1059 | 1054 | ||
| 1060 | return 0; | 1055 | return 0; |
| @@ -2706,21 +2701,43 @@ static struct irq_chip prcmu_irq_chip = { | |||
| 2706 | .irq_unmask = prcmu_irq_unmask, | 2701 | .irq_unmask = prcmu_irq_unmask, |
| 2707 | }; | 2702 | }; |
| 2708 | 2703 | ||
| 2709 | static char *fw_project_name(u8 project) | 2704 | static __init char *fw_project_name(u32 project) |
| 2710 | { | 2705 | { |
| 2711 | switch (project) { | 2706 | switch (project) { |
| 2712 | case PRCMU_FW_PROJECT_U8500: | 2707 | case PRCMU_FW_PROJECT_U8500: |
| 2713 | return "U8500"; | 2708 | return "U8500"; |
| 2714 | case PRCMU_FW_PROJECT_U8500_C2: | 2709 | case PRCMU_FW_PROJECT_U8400: |
| 2715 | return "U8500 C2"; | 2710 | return "U8400"; |
| 2716 | case PRCMU_FW_PROJECT_U9500: | 2711 | case PRCMU_FW_PROJECT_U9500: |
| 2717 | return "U9500"; | 2712 | return "U9500"; |
| 2718 | case PRCMU_FW_PROJECT_U9500_C2: | 2713 | case PRCMU_FW_PROJECT_U8500_MBB: |
| 2719 | return "U9500 C2"; | 2714 | return "U8500 MBB"; |
| 2715 | case PRCMU_FW_PROJECT_U8500_C1: | ||
| 2716 | return "U8500 C1"; | ||
| 2717 | case PRCMU_FW_PROJECT_U8500_C2: | ||
| 2718 | return "U8500 C2"; | ||
| 2719 | case PRCMU_FW_PROJECT_U8500_C3: | ||
| 2720 | return "U8500 C3"; | ||
| 2721 | case PRCMU_FW_PROJECT_U8500_C4: | ||
| 2722 | return "U8500 C4"; | ||
| 2723 | case PRCMU_FW_PROJECT_U9500_MBL: | ||
| 2724 | return "U9500 MBL"; | ||
| 2725 | case PRCMU_FW_PROJECT_U8500_MBL: | ||
| 2726 | return "U8500 MBL"; | ||
| 2727 | case PRCMU_FW_PROJECT_U8500_MBL2: | ||
| 2728 | return "U8500 MBL2"; | ||
| 2720 | case PRCMU_FW_PROJECT_U8520: | 2729 | case PRCMU_FW_PROJECT_U8520: |
| 2721 | return "U8520"; | 2730 | return "U8520 MBL"; |
| 2722 | case PRCMU_FW_PROJECT_U8420: | 2731 | case PRCMU_FW_PROJECT_U8420: |
| 2723 | return "U8420"; | 2732 | return "U8420"; |
| 2733 | case PRCMU_FW_PROJECT_U9540: | ||
| 2734 | return "U9540"; | ||
| 2735 | case PRCMU_FW_PROJECT_A9420: | ||
| 2736 | return "A9420"; | ||
| 2737 | case PRCMU_FW_PROJECT_L8540: | ||
| 2738 | return "L8540"; | ||
| 2739 | case PRCMU_FW_PROJECT_L8580: | ||
| 2740 | return "L8580"; | ||
| 2724 | default: | 2741 | default: |
| 2725 | return "Unknown"; | 2742 | return "Unknown"; |
| 2726 | } | 2743 | } |
| @@ -2766,36 +2783,44 @@ static int db8500_irq_init(struct device_node *np) | |||
| 2766 | return 0; | 2783 | return 0; |
| 2767 | } | 2784 | } |
| 2768 | 2785 | ||
| 2769 | void __init db8500_prcmu_early_init(void) | 2786 | static void dbx500_fw_version_init(struct platform_device *pdev, |
| 2787 | u32 version_offset) | ||
| 2770 | { | 2788 | { |
| 2771 | if (cpu_is_u8500v2() || cpu_is_u9540()) { | 2789 | struct resource *res; |
| 2772 | void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K); | 2790 | void __iomem *tcpm_base; |
| 2773 | |||
| 2774 | if (tcpm_base != NULL) { | ||
| 2775 | u32 version; | ||
| 2776 | version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET); | ||
| 2777 | fw_info.version.project = version & 0xFF; | ||
| 2778 | fw_info.version.api_version = (version >> 8) & 0xFF; | ||
| 2779 | fw_info.version.func_version = (version >> 16) & 0xFF; | ||
| 2780 | fw_info.version.errata = (version >> 24) & 0xFF; | ||
| 2781 | fw_info.valid = true; | ||
| 2782 | pr_info("PRCMU firmware: %s, version %d.%d.%d\n", | ||
| 2783 | fw_project_name(fw_info.version.project), | ||
| 2784 | (version >> 8) & 0xFF, (version >> 16) & 0xFF, | ||
| 2785 | (version >> 24) & 0xFF); | ||
| 2786 | iounmap(tcpm_base); | ||
| 2787 | } | ||
| 2788 | 2791 | ||
| 2789 | if (cpu_is_u9540()) | 2792 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
| 2790 | tcdm_base = ioremap_nocache(U8500_PRCMU_TCDM_BASE, | 2793 | "prcmu-tcpm"); |
| 2791 | SZ_4K + SZ_8K) + SZ_8K; | 2794 | if (!res) { |
| 2792 | else | 2795 | dev_err(&pdev->dev, |
| 2793 | tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE); | 2796 | "Error: no prcmu tcpm memory region provided\n"); |
| 2794 | } else { | 2797 | return; |
| 2795 | pr_err("prcmu: Unsupported chip version\n"); | ||
| 2796 | BUG(); | ||
| 2797 | } | 2798 | } |
| 2799 | tcpm_base = ioremap(res->start, resource_size(res)); | ||
| 2800 | if (tcpm_base != NULL) { | ||
| 2801 | u32 version; | ||
| 2802 | |||
| 2803 | version = readl(tcpm_base + version_offset); | ||
| 2804 | fw_info.version.project = (version & 0xFF); | ||
| 2805 | fw_info.version.api_version = (version >> 8) & 0xFF; | ||
| 2806 | fw_info.version.func_version = (version >> 16) & 0xFF; | ||
| 2807 | fw_info.version.errata = (version >> 24) & 0xFF; | ||
| 2808 | strncpy(fw_info.version.project_name, | ||
| 2809 | fw_project_name(fw_info.version.project), | ||
| 2810 | PRCMU_FW_PROJECT_NAME_LEN); | ||
| 2811 | fw_info.valid = true; | ||
| 2812 | pr_info("PRCMU firmware: %s(%d), version %d.%d.%d\n", | ||
| 2813 | fw_info.version.project_name, | ||
| 2814 | fw_info.version.project, | ||
| 2815 | fw_info.version.api_version, | ||
| 2816 | fw_info.version.func_version, | ||
| 2817 | fw_info.version.errata); | ||
| 2818 | iounmap(tcpm_base); | ||
| 2819 | } | ||
| 2820 | } | ||
| 2798 | 2821 | ||
| 2822 | void __init db8500_prcmu_early_init(void) | ||
| 2823 | { | ||
| 2799 | spin_lock_init(&mb0_transfer.lock); | 2824 | spin_lock_init(&mb0_transfer.lock); |
| 2800 | spin_lock_init(&mb0_transfer.dbb_irqs_lock); | 2825 | spin_lock_init(&mb0_transfer.dbb_irqs_lock); |
| 2801 | mutex_init(&mb0_transfer.ac_wake_lock); | 2826 | mutex_init(&mb0_transfer.ac_wake_lock); |
| @@ -3105,23 +3130,30 @@ static void db8500_prcmu_update_cpufreq(void) | |||
| 3105 | */ | 3130 | */ |
| 3106 | static int db8500_prcmu_probe(struct platform_device *pdev) | 3131 | static int db8500_prcmu_probe(struct platform_device *pdev) |
| 3107 | { | 3132 | { |
| 3108 | struct ab8500_platform_data *ab8500_platdata = pdev->dev.platform_data; | ||
| 3109 | struct device_node *np = pdev->dev.of_node; | 3133 | struct device_node *np = pdev->dev.of_node; |
| 3134 | struct prcmu_pdata *pdata = dev_get_platdata(&pdev->dev); | ||
| 3110 | int irq = 0, err = 0, i; | 3135 | int irq = 0, err = 0, i; |
| 3111 | 3136 | struct resource *res; | |
| 3112 | if (ux500_is_svp()) | ||
| 3113 | return -ENODEV; | ||
| 3114 | 3137 | ||
| 3115 | init_prcm_registers(); | 3138 | init_prcm_registers(); |
| 3116 | 3139 | ||
| 3140 | dbx500_fw_version_init(pdev, pdata->version_offset); | ||
| 3141 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu-tcdm"); | ||
| 3142 | if (!res) { | ||
| 3143 | dev_err(&pdev->dev, "no prcmu tcdm region provided\n"); | ||
| 3144 | return -ENOENT; | ||
| 3145 | } | ||
| 3146 | tcdm_base = devm_ioremap(&pdev->dev, res->start, | ||
| 3147 | resource_size(res)); | ||
| 3148 | |||
| 3117 | /* Clean up the mailbox interrupts after pre-kernel code. */ | 3149 | /* Clean up the mailbox interrupts after pre-kernel code. */ |
| 3118 | writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR); | 3150 | writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR); |
| 3119 | 3151 | ||
| 3120 | if (np) | 3152 | irq = platform_get_irq(pdev, 0); |
| 3121 | irq = platform_get_irq(pdev, 0); | 3153 | if (irq <= 0) { |
| 3122 | 3154 | dev_err(&pdev->dev, "no prcmu irq provided\n"); | |
| 3123 | if (!np || irq <= 0) | 3155 | return -ENOENT; |
| 3124 | irq = IRQ_DB8500_PRCMU1; | 3156 | } |
| 3125 | 3157 | ||
| 3126 | err = request_threaded_irq(irq, prcmu_irq_handler, | 3158 | err = request_threaded_irq(irq, prcmu_irq_handler, |
| 3127 | prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL); | 3159 | prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL); |
| @@ -3135,13 +3167,12 @@ static int db8500_prcmu_probe(struct platform_device *pdev) | |||
| 3135 | 3167 | ||
| 3136 | for (i = 0; i < ARRAY_SIZE(db8500_prcmu_devs); i++) { | 3168 | for (i = 0; i < ARRAY_SIZE(db8500_prcmu_devs); i++) { |
| 3137 | if (!strcmp(db8500_prcmu_devs[i].name, "ab8500-core")) { | 3169 | if (!strcmp(db8500_prcmu_devs[i].name, "ab8500-core")) { |
| 3138 | db8500_prcmu_devs[i].platform_data = ab8500_platdata; | 3170 | db8500_prcmu_devs[i].platform_data = pdata->ab_platdata; |
| 3139 | db8500_prcmu_devs[i].pdata_size = sizeof(struct ab8500_platform_data); | 3171 | db8500_prcmu_devs[i].pdata_size = sizeof(struct ab8500_platform_data); |
| 3140 | } | 3172 | } |
| 3141 | } | 3173 | } |
| 3142 | 3174 | ||
| 3143 | if (cpu_is_u8500v20_or_later()) | 3175 | prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET); |
| 3144 | prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET); | ||
| 3145 | 3176 | ||
| 3146 | db8500_prcmu_update_cpufreq(); | 3177 | db8500_prcmu_update_cpufreq(); |
| 3147 | 3178 | ||
