aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2012-06-25 16:38:28 -0400
committerIngo Molnar <mingo@kernel.org>2012-07-06 05:00:22 -0400
commit1ac322d0b169c95ce34d55b3ed6d40ce1a5f3a02 (patch)
tree21a911ca299c18c3c29569af1abf768230c7990a /arch/x86/kernel
parentb39f25a849d7677a7dbf183f2483fd41c201a5ce (diff)
x86/apic/x2apic: Limit the vector reservation to the user specified mask
For the x2apic cluster mode, vector for an interrupt is currently reserved on all the cpu's that are part of the x2apic cluster. But the interrupts will be routed only to the cluster (derived from the first cpu in the mask) members specified in the mask. So there is no need to reserve the vector in the unused cluster members. Modify __assign_irq_vector() to reserve the vectors based on the user specified irq destination mask. If the new mask is a proper subset of the currently used mask, cleanup the vector allocation on the unused cpu members. Also, allow the apic driver to tune the vector domain based on the affinity mask (which in most cases is the user-specified mask). Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Acked-by: Yinghai Lu <yinghai@kernel.org> Acked-by: Alexander Gordeev <agordeev@redhat.com> Acked-by: Cyrill Gorcunov <gorcunov@openvz.org> Link: http://lkml.kernel.org/r/1340656709-11423-3-git-send-email-suresh.b.siddha@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/apic/apic_noop.c3
-rw-r--r--arch/x86/kernel/apic/io_apic.c31
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c6
-rw-r--r--arch/x86/kernel/vsmp_64.c3
4 files changed, 22 insertions, 21 deletions
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index 08c337bc49ff..e145f28b4099 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -100,7 +100,8 @@ static unsigned long noop_check_apicid_present(int bit)
100 return physid_isset(bit, phys_cpu_present_map); 100 return physid_isset(bit, phys_cpu_present_map);
101} 101}
102 102
103static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask) 103static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask,
104 const struct cpumask *mask)
104{ 105{
105 if (cpu != 0) 106 if (cpu != 0)
106 pr_warning("APIC: Vector allocated for non-BSP cpu\n"); 107 pr_warning("APIC: Vector allocated for non-BSP cpu\n");
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 8a08f09aa505..9684f963befe 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1113,7 +1113,6 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1113 */ 1113 */
1114 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; 1114 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1115 static int current_offset = VECTOR_OFFSET_START % 16; 1115 static int current_offset = VECTOR_OFFSET_START % 16;
1116 unsigned int old_vector;
1117 int cpu, err; 1116 int cpu, err;
1118 cpumask_var_t tmp_mask; 1117 cpumask_var_t tmp_mask;
1119 1118
@@ -1123,28 +1122,28 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1123 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) 1122 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1124 return -ENOMEM; 1123 return -ENOMEM;
1125 1124
1126 old_vector = cfg->vector;
1127 if (old_vector) {
1128 cpumask_and(tmp_mask, mask, cpu_online_mask);
1129 if (cpumask_subset(tmp_mask, cfg->domain)) {
1130 free_cpumask_var(tmp_mask);
1131 return 0;
1132 }
1133 }
1134
1135 /* Only try and allocate irqs on cpus that are present */ 1125 /* Only try and allocate irqs on cpus that are present */
1136 err = -ENOSPC; 1126 err = -ENOSPC;
1137 cpumask_clear(cfg->old_domain); 1127 cpumask_clear(cfg->old_domain);
1138 cpu = cpumask_first_and(mask, cpu_online_mask); 1128 cpu = cpumask_first_and(mask, cpu_online_mask);
1139 while (cpu < nr_cpu_ids) { 1129 while (cpu < nr_cpu_ids) {
1140 int new_cpu; 1130 int new_cpu, vector, offset;
1141 int vector, offset;
1142 1131
1143 apic->vector_allocation_domain(cpu, tmp_mask); 1132 apic->vector_allocation_domain(cpu, tmp_mask, mask);
1144 1133
1145 if (cpumask_subset(tmp_mask, cfg->domain)) { 1134 if (cpumask_subset(tmp_mask, cfg->domain)) {
1146 free_cpumask_var(tmp_mask); 1135 err = 0;
1147 return 0; 1136 if (cpumask_equal(tmp_mask, cfg->domain))
1137 break;
1138 /*
1139 * New cpumask using the vector is a proper subset of
1140 * the current in use mask. So cleanup the vector
1141 * allocation for the members that are not used anymore.
1142 */
1143 cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
1144 cfg->move_in_progress = 1;
1145 cpumask_and(cfg->domain, cfg->domain, tmp_mask);
1146 break;
1148 } 1147 }
1149 1148
1150 vector = current_vector; 1149 vector = current_vector;
@@ -1172,7 +1171,7 @@ next:
1172 /* Found one! */ 1171 /* Found one! */
1173 current_vector = vector; 1172 current_vector = vector;
1174 current_offset = offset; 1173 current_offset = offset;
1175 if (old_vector) { 1174 if (cfg->vector) {
1176 cfg->move_in_progress = 1; 1175 cfg->move_in_progress = 1;
1177 cpumask_copy(cfg->old_domain, cfg->domain); 1176 cpumask_copy(cfg->old_domain, cfg->domain);
1178 } 1177 }
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index b5d889b5659a..bde78d0098a4 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -212,10 +212,10 @@ static int x2apic_cluster_probe(void)
212/* 212/*
213 * Each x2apic cluster is an allocation domain. 213 * Each x2apic cluster is an allocation domain.
214 */ 214 */
215static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask) 215static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
216 const struct cpumask *mask)
216{ 217{
217 cpumask_clear(retmask); 218 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
218 cpumask_copy(retmask, per_cpu(cpus_in_cluster, cpu));
219} 219}
220 220
221static struct apic apic_x2apic_cluster = { 221static struct apic apic_x2apic_cluster = {
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 3f0285ac00fa..992f890283e9 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -208,7 +208,8 @@ static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
208 * In vSMP, all cpus should be capable of handling interrupts, regardless of 208 * In vSMP, all cpus should be capable of handling interrupts, regardless of
209 * the APIC used. 209 * the APIC used.
210 */ 210 */
211static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask) 211static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
212 const struct cpumask *mask)
212{ 213{
213 cpumask_setall(retmask); 214 cpumask_setall(retmask);
214} 215}