aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2012-05-21 19:58:02 -0400
committerIngo Molnar <mingo@kernel.org>2012-06-06 03:51:22 -0400
commit0b8255e660a0c229ebfe8f9fde12a8d4d34c50e0 (patch)
tree1cb345c46d2e20b857bfd04423d4686f919d2f0d
parent332afa656e76458ee9cf0f0d123016a0658539e4 (diff)
x86/x2apic/cluster: Use all the members of one cluster specified in the smp_affinity mask for the interrupt destination
If the HW implements round-robin interrupt delivery, this enables multiple cpu's (which are part of the user specified interrupt smp_affinity mask and belong to the same x2apic cluster) to service the interrupt. Also if the platform supports Power Aware Interrupt Routing, then this enables the interrupt to be routed to an idle cpu or a busy cpu depending on the perf/power bias tunable. We are now grouping all the cpu's in a cluster to one vector domain. So that will limit the total number of interrupt sources handled by Linux. Previously we support "cpu-count * available-vectors-per-cpu" interrupt sources but this will now reduce to "cpu-count/16 * available-vectors-per-cpu". Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: yinghai@kernel.org Cc: gorcunov@openvz.org Cc: agordeev@redhat.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1337644682-19854-2-git-send-email-suresh.b.siddha@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/x2apic.h9
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c56
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c9
3 files changed, 48 insertions, 26 deletions
diff --git a/arch/x86/include/asm/x2apic.h b/arch/x86/include/asm/x2apic.h
index 92e54abf89e0..7a5a832a99b6 100644
--- a/arch/x86/include/asm/x2apic.h
+++ b/arch/x86/include/asm/x2apic.h
@@ -28,15 +28,6 @@ static int x2apic_apic_id_registered(void)
28 return 1; 28 return 1;
29} 29}
30 30
31/*
32 * For now each logical cpu is in its own vector allocation domain.
33 */
34static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
35{
36 cpumask_clear(retmask);
37 cpumask_set_cpu(cpu, retmask);
38}
39
40static void 31static void
41__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) 32__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
42{ 33{
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index ff35cff0e1a7..90d999c7f2ea 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -98,34 +98,47 @@ static void x2apic_send_IPI_all(int vector)
98 98
99static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) 99static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
100{ 100{
101 /*
102 * We're using fixed IRQ delivery, can only return one logical APIC ID.
103 * May as well be the first.
104 */
105 int cpu = cpumask_first(cpumask); 101 int cpu = cpumask_first(cpumask);
102 u32 dest = 0;
103 int i;
106 104
107 if ((unsigned)cpu < nr_cpu_ids) 105 if (cpu > nr_cpu_ids)
108 return per_cpu(x86_cpu_to_logical_apicid, cpu);
109 else
110 return BAD_APICID; 106 return BAD_APICID;
107
108 for_each_cpu_and(i, cpumask, per_cpu(cpus_in_cluster, cpu))
109 dest |= per_cpu(x86_cpu_to_logical_apicid, i);
110
111 return dest;
111} 112}
112 113
113static unsigned int 114static unsigned int
114x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 115x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
115 const struct cpumask *andmask) 116 const struct cpumask *andmask)
116{ 117{
117 int cpu; 118 u32 dest = 0;
119 u16 cluster;
120 int i;
118 121
119 /* 122 for_each_cpu_and(i, cpumask, andmask) {
120 * We're using fixed IRQ delivery, can only return one logical APIC ID. 123 if (!cpumask_test_cpu(i, cpu_online_mask))
121 * May as well be the first. 124 continue;
122 */ 125 dest = per_cpu(x86_cpu_to_logical_apicid, i);
123 for_each_cpu_and(cpu, cpumask, andmask) { 126 cluster = x2apic_cluster(i);
124 if (cpumask_test_cpu(cpu, cpu_online_mask)) 127 break;
125 break;
126 } 128 }
127 129
128 return per_cpu(x86_cpu_to_logical_apicid, cpu); 130 if (!dest)
131 return BAD_APICID;
132
133 for_each_cpu_and(i, cpumask, andmask) {
134 if (!cpumask_test_cpu(i, cpu_online_mask))
135 continue;
136 if (cluster != x2apic_cluster(i))
137 continue;
138 dest |= per_cpu(x86_cpu_to_logical_apicid, i);
139 }
140
141 return dest;
129} 142}
130 143
131static void init_x2apic_ldr(void) 144static void init_x2apic_ldr(void)
@@ -208,6 +221,15 @@ static int x2apic_cluster_probe(void)
208 return 0; 221 return 0;
209} 222}
210 223
224/*
225 * Each x2apic cluster is an allocation domain.
226 */
227static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask)
228{
229 cpumask_clear(retmask);
230 cpumask_copy(retmask, per_cpu(cpus_in_cluster, cpu));
231}
232
211static struct apic apic_x2apic_cluster = { 233static struct apic apic_x2apic_cluster = {
212 234
213 .name = "cluster x2apic", 235 .name = "cluster x2apic",
@@ -225,7 +247,7 @@ static struct apic apic_x2apic_cluster = {
225 .check_apicid_used = NULL, 247 .check_apicid_used = NULL,
226 .check_apicid_present = NULL, 248 .check_apicid_present = NULL,
227 249
228 .vector_allocation_domain = x2apic_vector_allocation_domain, 250 .vector_allocation_domain = cluster_vector_allocation_domain,
229 .init_apic_ldr = init_x2apic_ldr, 251 .init_apic_ldr = init_x2apic_ldr,
230 252
231 .ioapic_phys_id_map = NULL, 253 .ioapic_phys_id_map = NULL,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index c17e982db275..93b25706f177 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -120,6 +120,15 @@ static int x2apic_phys_probe(void)
120 return apic == &apic_x2apic_phys; 120 return apic == &apic_x2apic_phys;
121} 121}
122 122
123/*
124 * Each logical cpu is in its own vector allocation domain.
125 */
126static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
127{
128 cpumask_clear(retmask);
129 cpumask_set_cpu(cpu, retmask);
130}
131
123static struct apic apic_x2apic_phys = { 132static struct apic apic_x2apic_phys = {
124 133
125 .name = "physical x2apic", 134 .name = "physical x2apic",