diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/arm/common/gic.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/arm/common/gic.c')
-rw-r--r-- | arch/arm/common/gic.c | 300 |
1 files changed, 196 insertions, 104 deletions
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 7dfa9a85bc0c..4ddd0a6ac7ff 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
@@ -35,83 +35,92 @@ | |||
35 | 35 | ||
36 | static DEFINE_SPINLOCK(irq_controller_lock); | 36 | static DEFINE_SPINLOCK(irq_controller_lock); |
37 | 37 | ||
38 | /* Address of GIC 0 CPU interface */ | ||
39 | void __iomem *gic_cpu_base_addr __read_mostly; | ||
40 | |||
38 | struct gic_chip_data { | 41 | struct gic_chip_data { |
39 | unsigned int irq_offset; | 42 | unsigned int irq_offset; |
40 | void __iomem *dist_base; | 43 | void __iomem *dist_base; |
41 | void __iomem *cpu_base; | 44 | void __iomem *cpu_base; |
42 | }; | 45 | }; |
43 | 46 | ||
47 | /* | ||
48 | * Supported arch specific GIC irq extension. | ||
49 | * Default make them NULL. | ||
50 | */ | ||
51 | struct irq_chip gic_arch_extn = { | ||
52 | .irq_eoi = NULL, | ||
53 | .irq_mask = NULL, | ||
54 | .irq_unmask = NULL, | ||
55 | .irq_retrigger = NULL, | ||
56 | .irq_set_type = NULL, | ||
57 | .irq_set_wake = NULL, | ||
58 | }; | ||
59 | |||
44 | #ifndef MAX_GIC_NR | 60 | #ifndef MAX_GIC_NR |
45 | #define MAX_GIC_NR 1 | 61 | #define MAX_GIC_NR 1 |
46 | #endif | 62 | #endif |
47 | 63 | ||
48 | static struct gic_chip_data gic_data[MAX_GIC_NR]; | 64 | static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly; |
49 | 65 | ||
50 | static inline void __iomem *gic_dist_base(unsigned int irq) | 66 | static inline void __iomem *gic_dist_base(struct irq_data *d) |
51 | { | 67 | { |
52 | struct gic_chip_data *gic_data = get_irq_chip_data(irq); | 68 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
53 | return gic_data->dist_base; | 69 | return gic_data->dist_base; |
54 | } | 70 | } |
55 | 71 | ||
56 | static inline void __iomem *gic_cpu_base(unsigned int irq) | 72 | static inline void __iomem *gic_cpu_base(struct irq_data *d) |
57 | { | 73 | { |
58 | struct gic_chip_data *gic_data = get_irq_chip_data(irq); | 74 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
59 | return gic_data->cpu_base; | 75 | return gic_data->cpu_base; |
60 | } | 76 | } |
61 | 77 | ||
62 | static inline unsigned int gic_irq(unsigned int irq) | 78 | static inline unsigned int gic_irq(struct irq_data *d) |
63 | { | 79 | { |
64 | struct gic_chip_data *gic_data = get_irq_chip_data(irq); | 80 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
65 | return irq - gic_data->irq_offset; | 81 | return d->irq - gic_data->irq_offset; |
66 | } | 82 | } |
67 | 83 | ||
68 | /* | 84 | /* |
69 | * Routines to acknowledge, disable and enable interrupts | 85 | * Routines to acknowledge, disable and enable interrupts |
70 | * | ||
71 | * Linux assumes that when we're done with an interrupt we need to | ||
72 | * unmask it, in the same way we need to unmask an interrupt when | ||
73 | * we first enable it. | ||
74 | * | ||
75 | * The GIC has a separate notion of "end of interrupt" to re-enable | ||
76 | * an interrupt after handling, in order to support hardware | ||
77 | * prioritisation. | ||
78 | * | ||
79 | * We can make the GIC behave in the way that Linux expects by making | ||
80 | * our "acknowledge" routine disable the interrupt, then mark it as | ||
81 | * complete. | ||
82 | */ | 86 | */ |
83 | static void gic_ack_irq(unsigned int irq) | 87 | static void gic_mask_irq(struct irq_data *d) |
84 | { | 88 | { |
85 | u32 mask = 1 << (irq % 32); | 89 | u32 mask = 1 << (d->irq % 32); |
86 | 90 | ||
87 | spin_lock(&irq_controller_lock); | 91 | spin_lock(&irq_controller_lock); |
88 | writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4); | 92 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); |
89 | writel(gic_irq(irq), gic_cpu_base(irq) + GIC_CPU_EOI); | 93 | if (gic_arch_extn.irq_mask) |
94 | gic_arch_extn.irq_mask(d); | ||
90 | spin_unlock(&irq_controller_lock); | 95 | spin_unlock(&irq_controller_lock); |
91 | } | 96 | } |
92 | 97 | ||
93 | static void gic_mask_irq(unsigned int irq) | 98 | static void gic_unmask_irq(struct irq_data *d) |
94 | { | 99 | { |
95 | u32 mask = 1 << (irq % 32); | 100 | u32 mask = 1 << (d->irq % 32); |
96 | 101 | ||
97 | spin_lock(&irq_controller_lock); | 102 | spin_lock(&irq_controller_lock); |
98 | writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4); | 103 | if (gic_arch_extn.irq_unmask) |
104 | gic_arch_extn.irq_unmask(d); | ||
105 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); | ||
99 | spin_unlock(&irq_controller_lock); | 106 | spin_unlock(&irq_controller_lock); |
100 | } | 107 | } |
101 | 108 | ||
102 | static void gic_unmask_irq(unsigned int irq) | 109 | static void gic_eoi_irq(struct irq_data *d) |
103 | { | 110 | { |
104 | u32 mask = 1 << (irq % 32); | 111 | if (gic_arch_extn.irq_eoi) { |
112 | spin_lock(&irq_controller_lock); | ||
113 | gic_arch_extn.irq_eoi(d); | ||
114 | spin_unlock(&irq_controller_lock); | ||
115 | } | ||
105 | 116 | ||
106 | spin_lock(&irq_controller_lock); | 117 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); |
107 | writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_SET + (gic_irq(irq) / 32) * 4); | ||
108 | spin_unlock(&irq_controller_lock); | ||
109 | } | 118 | } |
110 | 119 | ||
111 | static int gic_set_type(unsigned int irq, unsigned int type) | 120 | static int gic_set_type(struct irq_data *d, unsigned int type) |
112 | { | 121 | { |
113 | void __iomem *base = gic_dist_base(irq); | 122 | void __iomem *base = gic_dist_base(d); |
114 | unsigned int gicirq = gic_irq(irq); | 123 | unsigned int gicirq = gic_irq(d); |
115 | u32 enablemask = 1 << (gicirq % 32); | 124 | u32 enablemask = 1 << (gicirq % 32); |
116 | u32 enableoff = (gicirq / 32) * 4; | 125 | u32 enableoff = (gicirq / 32) * 4; |
117 | u32 confmask = 0x2 << ((gicirq % 16) * 2); | 126 | u32 confmask = 0x2 << ((gicirq % 16) * 2); |
@@ -128,7 +137,10 @@ static int gic_set_type(unsigned int irq, unsigned int type) | |||
128 | 137 | ||
129 | spin_lock(&irq_controller_lock); | 138 | spin_lock(&irq_controller_lock); |
130 | 139 | ||
131 | val = readl(base + GIC_DIST_CONFIG + confoff); | 140 | if (gic_arch_extn.irq_set_type) |
141 | gic_arch_extn.irq_set_type(d, type); | ||
142 | |||
143 | val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); | ||
132 | if (type == IRQ_TYPE_LEVEL_HIGH) | 144 | if (type == IRQ_TYPE_LEVEL_HIGH) |
133 | val &= ~confmask; | 145 | val &= ~confmask; |
134 | else if (type == IRQ_TYPE_EDGE_RISING) | 146 | else if (type == IRQ_TYPE_EDGE_RISING) |
@@ -138,52 +150,80 @@ static int gic_set_type(unsigned int irq, unsigned int type) | |||
138 | * As recommended by the spec, disable the interrupt before changing | 150 | * As recommended by the spec, disable the interrupt before changing |
139 | * the configuration | 151 | * the configuration |
140 | */ | 152 | */ |
141 | if (readl(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { | 153 | if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { |
142 | writel(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); | 154 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); |
143 | enabled = true; | 155 | enabled = true; |
144 | } | 156 | } |
145 | 157 | ||
146 | writel(val, base + GIC_DIST_CONFIG + confoff); | 158 | writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); |
147 | 159 | ||
148 | if (enabled) | 160 | if (enabled) |
149 | writel(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); | 161 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); |
150 | 162 | ||
151 | spin_unlock(&irq_controller_lock); | 163 | spin_unlock(&irq_controller_lock); |
152 | 164 | ||
153 | return 0; | 165 | return 0; |
154 | } | 166 | } |
155 | 167 | ||
168 | static int gic_retrigger(struct irq_data *d) | ||
169 | { | ||
170 | if (gic_arch_extn.irq_retrigger) | ||
171 | return gic_arch_extn.irq_retrigger(d); | ||
172 | |||
173 | return -ENXIO; | ||
174 | } | ||
175 | |||
156 | #ifdef CONFIG_SMP | 176 | #ifdef CONFIG_SMP |
157 | static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val) | 177 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
178 | bool force) | ||
158 | { | 179 | { |
159 | void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); | 180 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); |
160 | unsigned int shift = (irq % 4) * 8; | 181 | unsigned int shift = (d->irq % 4) * 8; |
161 | unsigned int cpu = cpumask_first(mask_val); | 182 | unsigned int cpu = cpumask_first(mask_val); |
162 | u32 val; | 183 | u32 val, mask, bit; |
184 | |||
185 | if (cpu >= 8) | ||
186 | return -EINVAL; | ||
187 | |||
188 | mask = 0xff << shift; | ||
189 | bit = 1 << (cpu + shift); | ||
163 | 190 | ||
164 | spin_lock(&irq_controller_lock); | 191 | spin_lock(&irq_controller_lock); |
165 | irq_desc[irq].node = cpu; | 192 | d->node = cpu; |
166 | val = readl(reg) & ~(0xff << shift); | 193 | val = readl_relaxed(reg) & ~mask; |
167 | val |= 1 << (cpu + shift); | 194 | writel_relaxed(val | bit, reg); |
168 | writel(val, reg); | ||
169 | spin_unlock(&irq_controller_lock); | 195 | spin_unlock(&irq_controller_lock); |
170 | 196 | ||
171 | return 0; | 197 | return 0; |
172 | } | 198 | } |
173 | #endif | 199 | #endif |
174 | 200 | ||
201 | #ifdef CONFIG_PM | ||
202 | static int gic_set_wake(struct irq_data *d, unsigned int on) | ||
203 | { | ||
204 | int ret = -ENXIO; | ||
205 | |||
206 | if (gic_arch_extn.irq_set_wake) | ||
207 | ret = gic_arch_extn.irq_set_wake(d, on); | ||
208 | |||
209 | return ret; | ||
210 | } | ||
211 | |||
212 | #else | ||
213 | #define gic_set_wake NULL | ||
214 | #endif | ||
215 | |||
175 | static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | 216 | static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) |
176 | { | 217 | { |
177 | struct gic_chip_data *chip_data = get_irq_data(irq); | 218 | struct gic_chip_data *chip_data = irq_get_handler_data(irq); |
178 | struct irq_chip *chip = get_irq_chip(irq); | 219 | struct irq_chip *chip = irq_get_chip(irq); |
179 | unsigned int cascade_irq, gic_irq; | 220 | unsigned int cascade_irq, gic_irq; |
180 | unsigned long status; | 221 | unsigned long status; |
181 | 222 | ||
182 | /* primary controller ack'ing */ | 223 | chained_irq_enter(chip, desc); |
183 | chip->ack(irq); | ||
184 | 224 | ||
185 | spin_lock(&irq_controller_lock); | 225 | spin_lock(&irq_controller_lock); |
186 | status = readl(chip_data->cpu_base + GIC_CPU_INTACK); | 226 | status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK); |
187 | spin_unlock(&irq_controller_lock); | 227 | spin_unlock(&irq_controller_lock); |
188 | 228 | ||
189 | gic_irq = (status & 0x3ff); | 229 | gic_irq = (status & 0x3ff); |
@@ -197,107 +237,153 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | |||
197 | generic_handle_irq(cascade_irq); | 237 | generic_handle_irq(cascade_irq); |
198 | 238 | ||
199 | out: | 239 | out: |
200 | /* primary controller unmasking */ | 240 | chained_irq_exit(chip, desc); |
201 | chip->unmask(irq); | ||
202 | } | 241 | } |
203 | 242 | ||
204 | static struct irq_chip gic_chip = { | 243 | static struct irq_chip gic_chip = { |
205 | .name = "GIC", | 244 | .name = "GIC", |
206 | .ack = gic_ack_irq, | 245 | .irq_mask = gic_mask_irq, |
207 | .mask = gic_mask_irq, | 246 | .irq_unmask = gic_unmask_irq, |
208 | .unmask = gic_unmask_irq, | 247 | .irq_eoi = gic_eoi_irq, |
209 | .set_type = gic_set_type, | 248 | .irq_set_type = gic_set_type, |
249 | .irq_retrigger = gic_retrigger, | ||
210 | #ifdef CONFIG_SMP | 250 | #ifdef CONFIG_SMP |
211 | .set_affinity = gic_set_cpu, | 251 | .irq_set_affinity = gic_set_affinity, |
212 | #endif | 252 | #endif |
253 | .irq_set_wake = gic_set_wake, | ||
213 | }; | 254 | }; |
214 | 255 | ||
215 | void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) | 256 | void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) |
216 | { | 257 | { |
217 | if (gic_nr >= MAX_GIC_NR) | 258 | if (gic_nr >= MAX_GIC_NR) |
218 | BUG(); | 259 | BUG(); |
219 | if (set_irq_data(irq, &gic_data[gic_nr]) != 0) | 260 | if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0) |
220 | BUG(); | 261 | BUG(); |
221 | set_irq_chained_handler(irq, gic_handle_cascade_irq); | 262 | irq_set_chained_handler(irq, gic_handle_cascade_irq); |
222 | } | 263 | } |
223 | 264 | ||
224 | void __init gic_dist_init(unsigned int gic_nr, void __iomem *base, | 265 | static void __init gic_dist_init(struct gic_chip_data *gic, |
225 | unsigned int irq_start) | 266 | unsigned int irq_start) |
226 | { | 267 | { |
227 | unsigned int max_irq, i; | 268 | unsigned int gic_irqs, irq_limit, i; |
269 | void __iomem *base = gic->dist_base; | ||
228 | u32 cpumask = 1 << smp_processor_id(); | 270 | u32 cpumask = 1 << smp_processor_id(); |
229 | 271 | ||
230 | if (gic_nr >= MAX_GIC_NR) | ||
231 | BUG(); | ||
232 | |||
233 | cpumask |= cpumask << 8; | 272 | cpumask |= cpumask << 8; |
234 | cpumask |= cpumask << 16; | 273 | cpumask |= cpumask << 16; |
235 | 274 | ||
236 | gic_data[gic_nr].dist_base = base; | 275 | writel_relaxed(0, base + GIC_DIST_CTRL); |
237 | gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31; | ||
238 | |||
239 | writel(0, base + GIC_DIST_CTRL); | ||
240 | 276 | ||
241 | /* | 277 | /* |
242 | * Find out how many interrupts are supported. | 278 | * Find out how many interrupts are supported. |
243 | */ | ||
244 | max_irq = readl(base + GIC_DIST_CTR) & 0x1f; | ||
245 | max_irq = (max_irq + 1) * 32; | ||
246 | |||
247 | /* | ||
248 | * The GIC only supports up to 1020 interrupt sources. | 279 | * The GIC only supports up to 1020 interrupt sources. |
249 | * Limit this to either the architected maximum, or the | ||
250 | * platform maximum. | ||
251 | */ | 280 | */ |
252 | if (max_irq > max(1020, NR_IRQS)) | 281 | gic_irqs = readl_relaxed(base + GIC_DIST_CTR) & 0x1f; |
253 | max_irq = max(1020, NR_IRQS); | 282 | gic_irqs = (gic_irqs + 1) * 32; |
283 | if (gic_irqs > 1020) | ||
284 | gic_irqs = 1020; | ||
254 | 285 | ||
255 | /* | 286 | /* |
256 | * Set all global interrupts to be level triggered, active low. | 287 | * Set all global interrupts to be level triggered, active low. |
257 | */ | 288 | */ |
258 | for (i = 32; i < max_irq; i += 16) | 289 | for (i = 32; i < gic_irqs; i += 16) |
259 | writel(0, base + GIC_DIST_CONFIG + i * 4 / 16); | 290 | writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16); |
260 | 291 | ||
261 | /* | 292 | /* |
262 | * Set all global interrupts to this CPU only. | 293 | * Set all global interrupts to this CPU only. |
263 | */ | 294 | */ |
264 | for (i = 32; i < max_irq; i += 4) | 295 | for (i = 32; i < gic_irqs; i += 4) |
265 | writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); | 296 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); |
297 | |||
298 | /* | ||
299 | * Set priority on all global interrupts. | ||
300 | */ | ||
301 | for (i = 32; i < gic_irqs; i += 4) | ||
302 | writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); | ||
266 | 303 | ||
267 | /* | 304 | /* |
268 | * Set priority on all interrupts. | 305 | * Disable all interrupts. Leave the PPI and SGIs alone |
306 | * as these enables are banked registers. | ||
269 | */ | 307 | */ |
270 | for (i = 0; i < max_irq; i += 4) | 308 | for (i = 32; i < gic_irqs; i += 32) |
271 | writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); | 309 | writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); |
272 | 310 | ||
273 | /* | 311 | /* |
274 | * Disable all interrupts. | 312 | * Limit number of interrupts registered to the platform maximum |
275 | */ | 313 | */ |
276 | for (i = 0; i < max_irq; i += 32) | 314 | irq_limit = gic->irq_offset + gic_irqs; |
277 | writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); | 315 | if (WARN_ON(irq_limit > NR_IRQS)) |
316 | irq_limit = NR_IRQS; | ||
278 | 317 | ||
279 | /* | 318 | /* |
280 | * Setup the Linux IRQ subsystem. | 319 | * Setup the Linux IRQ subsystem. |
281 | */ | 320 | */ |
282 | for (i = irq_start; i < gic_data[gic_nr].irq_offset + max_irq; i++) { | 321 | for (i = irq_start; i < irq_limit; i++) { |
283 | set_irq_chip(i, &gic_chip); | 322 | irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq); |
284 | set_irq_chip_data(i, &gic_data[gic_nr]); | 323 | irq_set_chip_data(i, gic); |
285 | set_irq_handler(i, handle_level_irq); | ||
286 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 324 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
287 | } | 325 | } |
288 | 326 | ||
289 | writel(1, base + GIC_DIST_CTRL); | 327 | writel_relaxed(1, base + GIC_DIST_CTRL); |
290 | } | 328 | } |
291 | 329 | ||
292 | void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base) | 330 | static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) |
293 | { | 331 | { |
294 | if (gic_nr >= MAX_GIC_NR) | 332 | void __iomem *dist_base = gic->dist_base; |
295 | BUG(); | 333 | void __iomem *base = gic->cpu_base; |
334 | int i; | ||
296 | 335 | ||
297 | gic_data[gic_nr].cpu_base = base; | 336 | /* |
337 | * Deal with the banked PPI and SGI interrupts - disable all | ||
338 | * PPI interrupts, ensure all SGI interrupts are enabled. | ||
339 | */ | ||
340 | writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR); | ||
341 | writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET); | ||
342 | |||
343 | /* | ||
344 | * Set priority on PPI and SGI interrupts | ||
345 | */ | ||
346 | for (i = 0; i < 32; i += 4) | ||
347 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4); | ||
298 | 348 | ||
299 | writel(0xf0, base + GIC_CPU_PRIMASK); | 349 | writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); |
300 | writel(1, base + GIC_CPU_CTRL); | 350 | writel_relaxed(1, base + GIC_CPU_CTRL); |
351 | } | ||
352 | |||
353 | void __init gic_init(unsigned int gic_nr, unsigned int irq_start, | ||
354 | void __iomem *dist_base, void __iomem *cpu_base) | ||
355 | { | ||
356 | struct gic_chip_data *gic; | ||
357 | |||
358 | BUG_ON(gic_nr >= MAX_GIC_NR); | ||
359 | |||
360 | gic = &gic_data[gic_nr]; | ||
361 | gic->dist_base = dist_base; | ||
362 | gic->cpu_base = cpu_base; | ||
363 | gic->irq_offset = (irq_start - 1) & ~31; | ||
364 | |||
365 | if (gic_nr == 0) | ||
366 | gic_cpu_base_addr = cpu_base; | ||
367 | |||
368 | gic_dist_init(gic, irq_start); | ||
369 | gic_cpu_init(gic); | ||
370 | } | ||
371 | |||
372 | void __cpuinit gic_secondary_init(unsigned int gic_nr) | ||
373 | { | ||
374 | BUG_ON(gic_nr >= MAX_GIC_NR); | ||
375 | |||
376 | gic_cpu_init(&gic_data[gic_nr]); | ||
377 | } | ||
378 | |||
379 | void __cpuinit gic_enable_ppi(unsigned int irq) | ||
380 | { | ||
381 | unsigned long flags; | ||
382 | |||
383 | local_irq_save(flags); | ||
384 | irq_set_status_flags(irq, IRQ_NOPROBE); | ||
385 | gic_unmask_irq(irq_get_irq_data(irq)); | ||
386 | local_irq_restore(flags); | ||
301 | } | 387 | } |
302 | 388 | ||
303 | #ifdef CONFIG_SMP | 389 | #ifdef CONFIG_SMP |
@@ -305,7 +391,13 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | |||
305 | { | 391 | { |
306 | unsigned long map = *cpus_addr(*mask); | 392 | unsigned long map = *cpus_addr(*mask); |
307 | 393 | ||
394 | /* | ||
395 | * Ensure that stores to Normal memory are visible to the | ||
396 | * other CPUs before issuing the IPI. | ||
397 | */ | ||
398 | dsb(); | ||
399 | |||
308 | /* this always happens on GIC0 */ | 400 | /* this always happens on GIC0 */ |
309 | writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT); | 401 | writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT); |
310 | } | 402 | } |
311 | #endif | 403 | #endif |