diff options
author | Mike Travis <travis@sgi.com> | 2008-12-16 20:33:52 -0500 |
---|---|---|
committer | Mike Travis <travis@sgi.com> | 2008-12-16 20:40:56 -0500 |
commit | e7986739a76cde5079da08809d8bbc6878387ae0 (patch) | |
tree | dd99ed6af66d459fe164f75ded7f95262dc0fb0d /arch/x86/kernel/genx2apic_cluster.c | |
parent | 36f5101a60de8f79c0d1ca06e50660bf5129e02c (diff) |
x86 smp: modify send_IPI_mask interface to accept cpumask_t pointers
Impact: cleanup, change parameter passing
* Change genapic interfaces to accept cpumask_t pointers where possible.
* Modify external callers to use cpumask_t pointers in function calls.
* Create new send_IPI_mask_allbutself which is the same as the
send_IPI_mask functions but removes smp_processor_id() from list.
This removes another common need for a temporary cpumask_t variable.
* Functions that used a temp cpumask_t variable for:
cpumask_t allbutme = cpu_online_map;
cpu_clear(smp_processor_id(), allbutme);
if (!cpus_empty(allbutme))
...
become:
if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu)))
...
* Other minor code optimizations (like using cpus_clear instead of
CPU_MASK_NONE, etc.)
Applies to linux-2.6.tip/master.
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/genx2apic_cluster.c')
-rw-r--r-- | arch/x86/kernel/genx2apic_cluster.c | 60 |
1 files changed, 40 insertions, 20 deletions
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index f6a2c8eb48a6..f5fa9a91ad38 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c | |||
@@ -22,19 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
22 | 22 | ||
23 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 23 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ |
24 | 24 | ||
25 | static cpumask_t x2apic_target_cpus(void) | 25 | static const cpumask_t *x2apic_target_cpus(void) |
26 | { | 26 | { |
27 | return cpumask_of_cpu(0); | 27 | return &cpumask_of_cpu(0); |
28 | } | 28 | } |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * for now each logical cpu is in its own vector allocation domain. | 31 | * for now each logical cpu is in its own vector allocation domain. |
32 | */ | 32 | */ |
33 | static cpumask_t x2apic_vector_allocation_domain(int cpu) | 33 | static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask) |
34 | { | 34 | { |
35 | cpumask_t domain = CPU_MASK_NONE; | 35 | cpus_clear(*retmask); |
36 | cpu_set(cpu, domain); | 36 | cpu_set(cpu, *retmask); |
37 | return domain; | ||
38 | } | 37 | } |
39 | 38 | ||
40 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | 39 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, |
@@ -56,32 +55,52 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | |||
56 | * at once. We have 16 cpu's in a cluster. This will minimize IPI register | 55 | * at once. We have 16 cpu's in a cluster. This will minimize IPI register |
57 | * writes. | 56 | * writes. |
58 | */ | 57 | */ |
59 | static void x2apic_send_IPI_mask(cpumask_t mask, int vector) | 58 | static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector) |
60 | { | 59 | { |
61 | unsigned long flags; | 60 | unsigned long flags; |
62 | unsigned long query_cpu; | 61 | unsigned long query_cpu; |
63 | 62 | ||
64 | local_irq_save(flags); | 63 | local_irq_save(flags); |
65 | for_each_cpu_mask(query_cpu, mask) { | 64 | for_each_cpu_mask_nr(query_cpu, *mask) |
66 | __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu), | 65 | __x2apic_send_IPI_dest( |
67 | vector, APIC_DEST_LOGICAL); | 66 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
68 | } | 67 | vector, APIC_DEST_LOGICAL); |
69 | local_irq_restore(flags); | 68 | local_irq_restore(flags); |
70 | } | 69 | } |
71 | 70 | ||
72 | static void x2apic_send_IPI_allbutself(int vector) | 71 | static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) |
73 | { | 72 | { |
74 | cpumask_t mask = cpu_online_map; | 73 | unsigned long flags; |
74 | unsigned long query_cpu; | ||
75 | unsigned long this_cpu = smp_processor_id(); | ||
75 | 76 | ||
76 | cpu_clear(smp_processor_id(), mask); | 77 | local_irq_save(flags); |
78 | for_each_cpu_mask_nr(query_cpu, *mask) | ||
79 | if (query_cpu != this_cpu) | ||
80 | __x2apic_send_IPI_dest( | ||
81 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | ||
82 | vector, APIC_DEST_LOGICAL); | ||
83 | local_irq_restore(flags); | ||
84 | } | ||
77 | 85 | ||
78 | if (!cpus_empty(mask)) | 86 | static void x2apic_send_IPI_allbutself(int vector) |
79 | x2apic_send_IPI_mask(mask, vector); | 87 | { |
88 | unsigned long flags; | ||
89 | unsigned long query_cpu; | ||
90 | unsigned long this_cpu = smp_processor_id(); | ||
91 | |||
92 | local_irq_save(flags); | ||
93 | for_each_online_cpu(query_cpu) | ||
94 | if (query_cpu != this_cpu) | ||
95 | __x2apic_send_IPI_dest( | ||
96 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | ||
97 | vector, APIC_DEST_LOGICAL); | ||
98 | local_irq_restore(flags); | ||
80 | } | 99 | } |
81 | 100 | ||
82 | static void x2apic_send_IPI_all(int vector) | 101 | static void x2apic_send_IPI_all(int vector) |
83 | { | 102 | { |
84 | x2apic_send_IPI_mask(cpu_online_map, vector); | 103 | x2apic_send_IPI_mask(&cpu_online_map, vector); |
85 | } | 104 | } |
86 | 105 | ||
87 | static int x2apic_apic_id_registered(void) | 106 | static int x2apic_apic_id_registered(void) |
@@ -89,7 +108,7 @@ static int x2apic_apic_id_registered(void) | |||
89 | return 1; | 108 | return 1; |
90 | } | 109 | } |
91 | 110 | ||
92 | static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | 111 | static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) |
93 | { | 112 | { |
94 | int cpu; | 113 | int cpu; |
95 | 114 | ||
@@ -97,8 +116,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | |||
97 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 116 | * We're using fixed IRQ delivery, can only return one phys APIC ID. |
98 | * May as well be the first. | 117 | * May as well be the first. |
99 | */ | 118 | */ |
100 | cpu = first_cpu(cpumask); | 119 | cpu = first_cpu(*cpumask); |
101 | if ((unsigned)cpu < NR_CPUS) | 120 | if ((unsigned)cpu < nr_cpu_ids) |
102 | return per_cpu(x86_cpu_to_logical_apicid, cpu); | 121 | return per_cpu(x86_cpu_to_logical_apicid, cpu); |
103 | else | 122 | else |
104 | return BAD_APICID; | 123 | return BAD_APICID; |
@@ -150,6 +169,7 @@ struct genapic apic_x2apic_cluster = { | |||
150 | .send_IPI_all = x2apic_send_IPI_all, | 169 | .send_IPI_all = x2apic_send_IPI_all, |
151 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, | 170 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, |
152 | .send_IPI_mask = x2apic_send_IPI_mask, | 171 | .send_IPI_mask = x2apic_send_IPI_mask, |
172 | .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, | ||
153 | .send_IPI_self = x2apic_send_IPI_self, | 173 | .send_IPI_self = x2apic_send_IPI_self, |
154 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, | 174 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, |
155 | .phys_pkg_id = phys_pkg_id, | 175 | .phys_pkg_id = phys_pkg_id, |