diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-26 15:34:42 -0500 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-26 15:34:42 -0500 |
commit | 8e22e1b3499a446df48c2b26667ca36c55bf864c (patch) | |
tree | 5329f98b3eb3c95a9dcbab0fa4f9b6e62f0e788d /arch/arc/kernel/mcip.c | |
parent | 00d3c14f14d51babd8aeafd5fa734ccf04f5ca3d (diff) | |
parent | 64a577196d66b44e37384bc5c4d78c61f59d5b2a (diff) |
Merge airlied/drm-next into drm-misc-next
Backmerge the main pull request to sync up with all the newly landed
drivers. Otherwise we'll have chaos even before 4.12 started in
earnest.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'arch/arc/kernel/mcip.c')
-rw-r--r-- | arch/arc/kernel/mcip.c | 55 |
1 files changed, 23 insertions, 32 deletions
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index 9274f8ade8c7..9f6b68fd4f3b 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c | |||
@@ -93,11 +93,10 @@ static void mcip_probe_n_setup(void) | |||
93 | READ_BCR(ARC_REG_MCIP_BCR, mp); | 93 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
94 | 94 | ||
95 | sprintf(smp_cpuinfo_buf, | 95 | sprintf(smp_cpuinfo_buf, |
96 | "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n", | 96 | "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", |
97 | mp.ver, mp.num_cores, | 97 | mp.ver, mp.num_cores, |
98 | IS_AVAIL1(mp.ipi, "IPI "), | 98 | IS_AVAIL1(mp.ipi, "IPI "), |
99 | IS_AVAIL1(mp.idu, "IDU "), | 99 | IS_AVAIL1(mp.idu, "IDU "), |
100 | IS_AVAIL1(mp.llm, "LLM "), | ||
101 | IS_AVAIL1(mp.dbg, "DEBUG "), | 100 | IS_AVAIL1(mp.dbg, "DEBUG "), |
102 | IS_AVAIL1(mp.gfrc, "GFRC")); | 101 | IS_AVAIL1(mp.gfrc, "GFRC")); |
103 | 102 | ||
@@ -175,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data) | |||
175 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | 174 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
176 | } | 175 | } |
177 | 176 | ||
178 | #ifdef CONFIG_SMP | ||
179 | static int | 177 | static int |
180 | idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, | 178 | idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, |
181 | bool force) | 179 | bool force) |
@@ -205,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, | |||
205 | 203 | ||
206 | return IRQ_SET_MASK_OK; | 204 | return IRQ_SET_MASK_OK; |
207 | } | 205 | } |
208 | #endif | 206 | |
207 | static void idu_irq_enable(struct irq_data *data) | ||
208 | { | ||
209 | /* | ||
210 | * By default send all common interrupts to all available online CPUs. | ||
211 | * The affinity of common interrupts in IDU must be set manually since | ||
212 | * in some cases the kernel will not call irq_set_affinity() by itself: | ||
213 | * 1. When the kernel is not configured with support of SMP. | ||
214 | * 2. When the kernel is configured with support of SMP but upper | ||
215 | * interrupt controllers does not support setting of the affinity | ||
216 | * and cannot propagate it to IDU. | ||
217 | */ | ||
218 | idu_irq_set_affinity(data, cpu_online_mask, false); | ||
219 | idu_irq_unmask(data); | ||
220 | } | ||
209 | 221 | ||
210 | static struct irq_chip idu_irq_chip = { | 222 | static struct irq_chip idu_irq_chip = { |
211 | .name = "MCIP IDU Intc", | 223 | .name = "MCIP IDU Intc", |
212 | .irq_mask = idu_irq_mask, | 224 | .irq_mask = idu_irq_mask, |
213 | .irq_unmask = idu_irq_unmask, | 225 | .irq_unmask = idu_irq_unmask, |
226 | .irq_enable = idu_irq_enable, | ||
214 | #ifdef CONFIG_SMP | 227 | #ifdef CONFIG_SMP |
215 | .irq_set_affinity = idu_irq_set_affinity, | 228 | .irq_set_affinity = idu_irq_set_affinity, |
216 | #endif | 229 | #endif |
@@ -243,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n, | |||
243 | const u32 *intspec, unsigned int intsize, | 256 | const u32 *intspec, unsigned int intsize, |
244 | irq_hw_number_t *out_hwirq, unsigned int *out_type) | 257 | irq_hw_number_t *out_hwirq, unsigned int *out_type) |
245 | { | 258 | { |
246 | irq_hw_number_t hwirq = *out_hwirq = intspec[0]; | 259 | /* |
247 | int distri = intspec[1]; | 260 | * Ignore value of interrupt distribution mode for common interrupts in |
248 | unsigned long flags; | 261 | * IDU which resides in intspec[1] since setting an affinity using value |
249 | 262 | * from Device Tree is deprecated in ARC. | |
263 | */ | ||
264 | *out_hwirq = intspec[0]; | ||
250 | *out_type = IRQ_TYPE_NONE; | 265 | *out_type = IRQ_TYPE_NONE; |
251 | 266 | ||
252 | /* XXX: validate distribution scheme again online cpu mask */ | ||
253 | if (distri == 0) { | ||
254 | /* 0 - Round Robin to all cpus, otherwise 1 bit per core */ | ||
255 | raw_spin_lock_irqsave(&mcip_lock, flags); | ||
256 | idu_set_dest(hwirq, BIT(num_online_cpus()) - 1); | ||
257 | idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR); | ||
258 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | ||
259 | } else { | ||
260 | /* | ||
261 | * DEST based distribution for Level Triggered intr can only | ||
262 | * have 1 CPU, so generalize it to always contain 1 cpu | ||
263 | */ | ||
264 | int cpu = ffs(distri); | ||
265 | |||
266 | if (cpu != fls(distri)) | ||
267 | pr_warn("IDU irq %lx distri mode set to cpu %x\n", | ||
268 | hwirq, cpu); | ||
269 | |||
270 | raw_spin_lock_irqsave(&mcip_lock, flags); | ||
271 | idu_set_dest(hwirq, cpu); | ||
272 | idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST); | ||
273 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | ||
274 | } | ||
275 | |||
276 | return 0; | 267 | return 0; |
277 | } | 268 | } |
278 | 269 | ||