diff options
-rw-r--r-- | arch/x86/include/asm/x2apic.h | 9 | ||||
-rw-r--r-- | arch/x86/kernel/apic/x2apic_cluster.c | 56 | ||||
-rw-r--r-- | arch/x86/kernel/apic/x2apic_phys.c | 9 |
3 files changed, 48 insertions, 26 deletions
diff --git a/arch/x86/include/asm/x2apic.h b/arch/x86/include/asm/x2apic.h index 92e54abf89e0..7a5a832a99b6 100644 --- a/arch/x86/include/asm/x2apic.h +++ b/arch/x86/include/asm/x2apic.h | |||
@@ -28,15 +28,6 @@ static int x2apic_apic_id_registered(void) | |||
28 | return 1; | 28 | return 1; |
29 | } | 29 | } |
30 | 30 | ||
31 | /* | ||
32 | * For now each logical cpu is in its own vector allocation domain. | ||
33 | */ | ||
34 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) | ||
35 | { | ||
36 | cpumask_clear(retmask); | ||
37 | cpumask_set_cpu(cpu, retmask); | ||
38 | } | ||
39 | |||
40 | static void | 31 | static void |
41 | __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) | 32 | __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) |
42 | { | 33 | { |
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index ff35cff0e1a7..90d999c7f2ea 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -98,34 +98,47 @@ static void x2apic_send_IPI_all(int vector) | |||
98 | 98 | ||
99 | static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) | 99 | static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) |
100 | { | 100 | { |
101 | /* | ||
102 | * We're using fixed IRQ delivery, can only return one logical APIC ID. | ||
103 | * May as well be the first. | ||
104 | */ | ||
105 | int cpu = cpumask_first(cpumask); | 101 | int cpu = cpumask_first(cpumask); |
102 | u32 dest = 0; | ||
103 | int i; | ||
106 | 104 | ||
107 | if ((unsigned)cpu < nr_cpu_ids) | 105 | if (cpu > nr_cpu_ids) |
108 | return per_cpu(x86_cpu_to_logical_apicid, cpu); | ||
109 | else | ||
110 | return BAD_APICID; | 106 | return BAD_APICID; |
107 | |||
108 | for_each_cpu_and(i, cpumask, per_cpu(cpus_in_cluster, cpu)) | ||
109 | dest |= per_cpu(x86_cpu_to_logical_apicid, i); | ||
110 | |||
111 | return dest; | ||
111 | } | 112 | } |
112 | 113 | ||
113 | static unsigned int | 114 | static unsigned int |
114 | x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | 115 | x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, |
115 | const struct cpumask *andmask) | 116 | const struct cpumask *andmask) |
116 | { | 117 | { |
117 | int cpu; | 118 | u32 dest = 0; |
119 | u16 cluster; | ||
120 | int i; | ||
118 | 121 | ||
119 | /* | 122 | for_each_cpu_and(i, cpumask, andmask) { |
120 | * We're using fixed IRQ delivery, can only return one logical APIC ID. | 123 | if (!cpumask_test_cpu(i, cpu_online_mask)) |
121 | * May as well be the first. | 124 | continue; |
122 | */ | 125 | dest = per_cpu(x86_cpu_to_logical_apicid, i); |
123 | for_each_cpu_and(cpu, cpumask, andmask) { | 126 | cluster = x2apic_cluster(i); |
124 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | 127 | break; |
125 | break; | ||
126 | } | 128 | } |
127 | 129 | ||
128 | return per_cpu(x86_cpu_to_logical_apicid, cpu); | 130 | if (!dest) |
131 | return BAD_APICID; | ||
132 | |||
133 | for_each_cpu_and(i, cpumask, andmask) { | ||
134 | if (!cpumask_test_cpu(i, cpu_online_mask)) | ||
135 | continue; | ||
136 | if (cluster != x2apic_cluster(i)) | ||
137 | continue; | ||
138 | dest |= per_cpu(x86_cpu_to_logical_apicid, i); | ||
139 | } | ||
140 | |||
141 | return dest; | ||
129 | } | 142 | } |
130 | 143 | ||
131 | static void init_x2apic_ldr(void) | 144 | static void init_x2apic_ldr(void) |
@@ -208,6 +221,15 @@ static int x2apic_cluster_probe(void) | |||
208 | return 0; | 221 | return 0; |
209 | } | 222 | } |
210 | 223 | ||
224 | /* | ||
225 | * Each x2apic cluster is an allocation domain. | ||
226 | */ | ||
227 | static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask) | ||
228 | { | ||
229 | cpumask_clear(retmask); | ||
230 | cpumask_copy(retmask, per_cpu(cpus_in_cluster, cpu)); | ||
231 | } | ||
232 | |||
211 | static struct apic apic_x2apic_cluster = { | 233 | static struct apic apic_x2apic_cluster = { |
212 | 234 | ||
213 | .name = "cluster x2apic", | 235 | .name = "cluster x2apic", |
@@ -225,7 +247,7 @@ static struct apic apic_x2apic_cluster = { | |||
225 | .check_apicid_used = NULL, | 247 | .check_apicid_used = NULL, |
226 | .check_apicid_present = NULL, | 248 | .check_apicid_present = NULL, |
227 | 249 | ||
228 | .vector_allocation_domain = x2apic_vector_allocation_domain, | 250 | .vector_allocation_domain = cluster_vector_allocation_domain, |
229 | .init_apic_ldr = init_x2apic_ldr, | 251 | .init_apic_ldr = init_x2apic_ldr, |
230 | 252 | ||
231 | .ioapic_phys_id_map = NULL, | 253 | .ioapic_phys_id_map = NULL, |
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index c17e982db275..93b25706f177 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c | |||
@@ -120,6 +120,15 @@ static int x2apic_phys_probe(void) | |||
120 | return apic == &apic_x2apic_phys; | 120 | return apic == &apic_x2apic_phys; |
121 | } | 121 | } |
122 | 122 | ||
123 | /* | ||
124 | * Each logical cpu is in its own vector allocation domain. | ||
125 | */ | ||
126 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) | ||
127 | { | ||
128 | cpumask_clear(retmask); | ||
129 | cpumask_set_cpu(cpu, retmask); | ||
130 | } | ||
131 | |||
123 | static struct apic apic_x2apic_phys = { | 132 | static struct apic apic_x2apic_phys = { |
124 | 133 | ||
125 | .name = "physical x2apic", | 134 | .name = "physical x2apic", |