diff options
author | Mike Travis <travis@sgi.com> | 2008-12-16 20:33:59 -0500 |
---|---|---|
committer | Mike Travis <travis@sgi.com> | 2008-12-16 20:40:57 -0500 |
commit | bcda016eddd7a8b374bb371473c821a91ff1d8cc (patch) | |
tree | 9335614036937765c385479d707ef7327fca7d67 /arch/x86/kernel/genx2apic_cluster.c | |
parent | d7b381bb7b1ad69ff008ea063d26e988b686c8de (diff) |
x86: cosmetic changes apic-related files.
This patch simply changes cpumask_t to struct cpumask and similar
trivial modernizations.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
Diffstat (limited to 'arch/x86/kernel/genx2apic_cluster.c')
-rw-r--r-- | arch/x86/kernel/genx2apic_cluster.c | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index e7d16f53b9cd..4716a0c9f936 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c | |||
@@ -22,18 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
22 | 22 | ||
23 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 23 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ |
24 | 24 | ||
25 | static const cpumask_t *x2apic_target_cpus(void) | 25 | static const struct cpumask *x2apic_target_cpus(void) |
26 | { | 26 | { |
27 | return &cpumask_of_cpu(0); | 27 | return cpumask_of(0); |
28 | } | 28 | } |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * for now each logical cpu is in its own vector allocation domain. | 31 | * for now each logical cpu is in its own vector allocation domain. |
32 | */ | 32 | */ |
33 | static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask) | 33 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) |
34 | { | 34 | { |
35 | cpus_clear(*retmask); | 35 | cpumask_clear(retmask); |
36 | cpu_set(cpu, *retmask); | 36 | cpumask_set_cpu(cpu, retmask); |
37 | } | 37 | } |
38 | 38 | ||
39 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | 39 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, |
@@ -55,27 +55,28 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | |||
55 | * at once. We have 16 cpu's in a cluster. This will minimize IPI register | 55 | * at once. We have 16 cpu's in a cluster. This will minimize IPI register |
56 | * writes. | 56 | * writes. |
57 | */ | 57 | */ |
58 | static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector) | 58 | static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) |
59 | { | 59 | { |
60 | unsigned long flags; | 60 | unsigned long flags; |
61 | unsigned long query_cpu; | 61 | unsigned long query_cpu; |
62 | 62 | ||
63 | local_irq_save(flags); | 63 | local_irq_save(flags); |
64 | for_each_cpu_mask_nr(query_cpu, *mask) | 64 | for_each_cpu(query_cpu, mask) |
65 | __x2apic_send_IPI_dest( | 65 | __x2apic_send_IPI_dest( |
66 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | 66 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
67 | vector, APIC_DEST_LOGICAL); | 67 | vector, APIC_DEST_LOGICAL); |
68 | local_irq_restore(flags); | 68 | local_irq_restore(flags); |
69 | } | 69 | } |
70 | 70 | ||
71 | static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) | 71 | static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, |
72 | int vector) | ||
72 | { | 73 | { |
73 | unsigned long flags; | 74 | unsigned long flags; |
74 | unsigned long query_cpu; | 75 | unsigned long query_cpu; |
75 | unsigned long this_cpu = smp_processor_id(); | 76 | unsigned long this_cpu = smp_processor_id(); |
76 | 77 | ||
77 | local_irq_save(flags); | 78 | local_irq_save(flags); |
78 | for_each_cpu_mask_nr(query_cpu, *mask) | 79 | for_each_cpu(query_cpu, mask) |
79 | if (query_cpu != this_cpu) | 80 | if (query_cpu != this_cpu) |
80 | __x2apic_send_IPI_dest( | 81 | __x2apic_send_IPI_dest( |
81 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | 82 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
@@ -100,7 +101,7 @@ static void x2apic_send_IPI_allbutself(int vector) | |||
100 | 101 | ||
101 | static void x2apic_send_IPI_all(int vector) | 102 | static void x2apic_send_IPI_all(int vector) |
102 | { | 103 | { |
103 | x2apic_send_IPI_mask(&cpu_online_map, vector); | 104 | x2apic_send_IPI_mask(cpu_online_mask, vector); |
104 | } | 105 | } |
105 | 106 | ||
106 | static int x2apic_apic_id_registered(void) | 107 | static int x2apic_apic_id_registered(void) |
@@ -108,7 +109,7 @@ static int x2apic_apic_id_registered(void) | |||
108 | return 1; | 109 | return 1; |
109 | } | 110 | } |
110 | 111 | ||
111 | static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) | 112 | static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) |
112 | { | 113 | { |
113 | int cpu; | 114 | int cpu; |
114 | 115 | ||
@@ -116,7 +117,7 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) | |||
116 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 117 | * We're using fixed IRQ delivery, can only return one phys APIC ID. |
117 | * May as well be the first. | 118 | * May as well be the first. |
118 | */ | 119 | */ |
119 | cpu = first_cpu(*cpumask); | 120 | cpu = cpumask_first(cpumask); |
120 | if ((unsigned)cpu < nr_cpu_ids) | 121 | if ((unsigned)cpu < nr_cpu_ids) |
121 | return per_cpu(x86_cpu_to_logical_apicid, cpu); | 122 | return per_cpu(x86_cpu_to_logical_apicid, cpu); |
122 | else | 123 | else |