diff options
author | David S. Miller <davem@davemloft.net> | 2017-10-22 08:36:53 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-10-22 08:39:14 -0400 |
commit | f8ddadc4db6c7b7029b6d0e0d9af24f74ad27ca2 (patch) | |
tree | 0a6432aba336bae42313613f4c891bcfce02bd4e /kernel/irq/cpuhotplug.c | |
parent | bdd091bab8c631bd2801af838e344fad34566410 (diff) | |
parent | b5ac3beb5a9f0ef0ea64cd85faf94c0dc4de0e42 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
There were quite a few overlapping sets of changes here.
Daniel's bug fix for off-by-ones in the new BPF branch instructions,
along with the added allowances for "data_end > ptr + x" forms
collided with the metadata additions.
Along with those three changes came veritifer test cases, which in
their final form I tried to group together properly. If I had just
trimmed GIT's conflict tags as-is, this would have split up the
meta tests unnecessarily.
In the socketmap code, a set of preemption disabling changes
overlapped with the rename of bpf_compute_data_end() to
bpf_compute_data_pointers().
Changes were made to the mv88e6060.c driver set addr method
which got removed in net-next.
The hyperv transport socket layer had a locking change in 'net'
which overlapped with a change of socket state macro usage
in 'net-next'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/irq/cpuhotplug.c')
-rw-r--r-- | kernel/irq/cpuhotplug.c | 28 |
1 files changed, 27 insertions, 1 deletions
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index 638eb9c83d9f..9eb09aef0313 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c | |||
@@ -18,8 +18,34 @@ | |||
18 | static inline bool irq_needs_fixup(struct irq_data *d) | 18 | static inline bool irq_needs_fixup(struct irq_data *d) |
19 | { | 19 | { |
20 | const struct cpumask *m = irq_data_get_effective_affinity_mask(d); | 20 | const struct cpumask *m = irq_data_get_effective_affinity_mask(d); |
21 | unsigned int cpu = smp_processor_id(); | ||
21 | 22 | ||
22 | return cpumask_test_cpu(smp_processor_id(), m); | 23 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
24 | /* | ||
25 | * The cpumask_empty() check is a workaround for interrupt chips, | ||
26 | * which do not implement effective affinity, but the architecture has | ||
27 | * enabled the config switch. Use the general affinity mask instead. | ||
28 | */ | ||
29 | if (cpumask_empty(m)) | ||
30 | m = irq_data_get_affinity_mask(d); | ||
31 | |||
32 | /* | ||
33 | * Sanity check. If the mask is not empty when excluding the outgoing | ||
34 | * CPU then it must contain at least one online CPU. The outgoing CPU | ||
35 | * has been removed from the online mask already. | ||
36 | */ | ||
37 | if (cpumask_any_but(m, cpu) < nr_cpu_ids && | ||
38 | cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) { | ||
39 | /* | ||
40 | * If this happens then there was a missed IRQ fixup at some | ||
41 | * point. Warn about it and enforce fixup. | ||
42 | */ | ||
43 | pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n", | ||
44 | cpumask_pr_args(m), d->irq, cpu); | ||
45 | return true; | ||
46 | } | ||
47 | #endif | ||
48 | return cpumask_test_cpu(cpu, m); | ||
23 | } | 49 | } |
24 | 50 | ||
25 | static bool migrate_one_irq(struct irq_desc *desc) | 51 | static bool migrate_one_irq(struct irq_desc *desc) |