diff options
Diffstat (limited to 'arch/x86/kernel/ipi.c')
-rw-r--r-- | arch/x86/kernel/ipi.c | 190 |
1 files changed, 0 insertions, 190 deletions
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c deleted file mode 100644 index 285bbf8831fa..000000000000 --- a/arch/x86/kernel/ipi.c +++ /dev/null | |||
@@ -1,190 +0,0 @@ | |||
1 | #include <linux/cpumask.h> | ||
2 | #include <linux/interrupt.h> | ||
3 | #include <linux/init.h> | ||
4 | |||
5 | #include <linux/mm.h> | ||
6 | #include <linux/delay.h> | ||
7 | #include <linux/spinlock.h> | ||
8 | #include <linux/kernel_stat.h> | ||
9 | #include <linux/mc146818rtc.h> | ||
10 | #include <linux/cache.h> | ||
11 | #include <linux/cpu.h> | ||
12 | #include <linux/module.h> | ||
13 | |||
14 | #include <asm/smp.h> | ||
15 | #include <asm/mtrr.h> | ||
16 | #include <asm/tlbflush.h> | ||
17 | #include <asm/mmu_context.h> | ||
18 | #include <asm/apic.h> | ||
19 | #include <asm/proto.h> | ||
20 | |||
21 | #ifdef CONFIG_X86_32 | ||
22 | #include <mach_apic.h> | ||
23 | #include <mach_ipi.h> | ||
24 | |||
25 | /* | ||
26 | * the following functions deal with sending IPIs between CPUs. | ||
27 | * | ||
28 | * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. | ||
29 | */ | ||
30 | |||
31 | static inline int __prepare_ICR(unsigned int shortcut, int vector) | ||
32 | { | ||
33 | unsigned int icr = shortcut | APIC_DEST_LOGICAL; | ||
34 | |||
35 | switch (vector) { | ||
36 | default: | ||
37 | icr |= APIC_DM_FIXED | vector; | ||
38 | break; | ||
39 | case NMI_VECTOR: | ||
40 | icr |= APIC_DM_NMI; | ||
41 | break; | ||
42 | } | ||
43 | return icr; | ||
44 | } | ||
45 | |||
46 | static inline int __prepare_ICR2(unsigned int mask) | ||
47 | { | ||
48 | return SET_APIC_DEST_FIELD(mask); | ||
49 | } | ||
50 | |||
51 | void __send_IPI_shortcut(unsigned int shortcut, int vector) | ||
52 | { | ||
53 | /* | ||
54 | * Subtle. In the case of the 'never do double writes' workaround | ||
55 | * we have to lock out interrupts to be safe. As we don't care | ||
56 | * of the value read we use an atomic rmw access to avoid costly | ||
57 | * cli/sti. Otherwise we use an even cheaper single atomic write | ||
58 | * to the APIC. | ||
59 | */ | ||
60 | unsigned int cfg; | ||
61 | |||
62 | /* | ||
63 | * Wait for idle. | ||
64 | */ | ||
65 | apic_wait_icr_idle(); | ||
66 | |||
67 | /* | ||
68 | * No need to touch the target chip field | ||
69 | */ | ||
70 | cfg = __prepare_ICR(shortcut, vector); | ||
71 | |||
72 | /* | ||
73 | * Send the IPI. The write to APIC_ICR fires this off. | ||
74 | */ | ||
75 | apic_write(APIC_ICR, cfg); | ||
76 | } | ||
77 | |||
78 | void send_IPI_self(int vector) | ||
79 | { | ||
80 | __send_IPI_shortcut(APIC_DEST_SELF, vector); | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * This is used to send an IPI with no shorthand notation (the destination is | ||
85 | * specified in bits 56 to 63 of the ICR). | ||
86 | */ | ||
87 | static inline void __send_IPI_dest_field(unsigned long mask, int vector) | ||
88 | { | ||
89 | unsigned long cfg; | ||
90 | |||
91 | /* | ||
92 | * Wait for idle. | ||
93 | */ | ||
94 | if (unlikely(vector == NMI_VECTOR)) | ||
95 | safe_apic_wait_icr_idle(); | ||
96 | else | ||
97 | apic_wait_icr_idle(); | ||
98 | |||
99 | /* | ||
100 | * prepare target chip field | ||
101 | */ | ||
102 | cfg = __prepare_ICR2(mask); | ||
103 | apic_write(APIC_ICR2, cfg); | ||
104 | |||
105 | /* | ||
106 | * program the ICR | ||
107 | */ | ||
108 | cfg = __prepare_ICR(0, vector); | ||
109 | |||
110 | /* | ||
111 | * Send the IPI. The write to APIC_ICR fires this off. | ||
112 | */ | ||
113 | apic_write(APIC_ICR, cfg); | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * This is only used on smaller machines. | ||
118 | */ | ||
119 | void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector) | ||
120 | { | ||
121 | unsigned long mask = cpumask_bits(cpumask)[0]; | ||
122 | unsigned long flags; | ||
123 | |||
124 | local_irq_save(flags); | ||
125 | WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); | ||
126 | __send_IPI_dest_field(mask, vector); | ||
127 | local_irq_restore(flags); | ||
128 | } | ||
129 | |||
130 | void send_IPI_mask_sequence(const struct cpumask *mask, int vector) | ||
131 | { | ||
132 | unsigned long flags; | ||
133 | unsigned int query_cpu; | ||
134 | |||
135 | /* | ||
136 | * Hack. The clustered APIC addressing mode doesn't allow us to send | ||
137 | * to an arbitrary mask, so I do a unicasts to each CPU instead. This | ||
138 | * should be modified to do 1 message per cluster ID - mbligh | ||
139 | */ | ||
140 | |||
141 | local_irq_save(flags); | ||
142 | for_each_cpu(query_cpu, mask) | ||
143 | __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); | ||
144 | local_irq_restore(flags); | ||
145 | } | ||
146 | |||
147 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector) | ||
148 | { | ||
149 | unsigned long flags; | ||
150 | unsigned int query_cpu; | ||
151 | unsigned int this_cpu = smp_processor_id(); | ||
152 | |||
153 | /* See Hack comment above */ | ||
154 | |||
155 | local_irq_save(flags); | ||
156 | for_each_cpu(query_cpu, mask) | ||
157 | if (query_cpu != this_cpu) | ||
158 | __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), | ||
159 | vector); | ||
160 | local_irq_restore(flags); | ||
161 | } | ||
162 | |||
163 | /* must come after the send_IPI functions above for inlining */ | ||
164 | static int convert_apicid_to_cpu(int apic_id) | ||
165 | { | ||
166 | int i; | ||
167 | |||
168 | for_each_possible_cpu(i) { | ||
169 | if (per_cpu(x86_cpu_to_apicid, i) == apic_id) | ||
170 | return i; | ||
171 | } | ||
172 | return -1; | ||
173 | } | ||
174 | |||
175 | int safe_smp_processor_id(void) | ||
176 | { | ||
177 | int apicid, cpuid; | ||
178 | |||
179 | if (!boot_cpu_has(X86_FEATURE_APIC)) | ||
180 | return 0; | ||
181 | |||
182 | apicid = hard_smp_processor_id(); | ||
183 | if (apicid == BAD_APICID) | ||
184 | return 0; | ||
185 | |||
186 | cpuid = convert_apicid_to_cpu(apicid); | ||
187 | |||
188 | return cpuid >= 0 ? cpuid : 0; | ||
189 | } | ||
190 | #endif | ||