diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-01-30 07:30:27 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:30:27 -0500 |
commit | 16da2f93054fc379522b93afc71d49751bd8be2b (patch) | |
tree | 8bef8116082e65bdd2db32b8e423db39c86531ed /arch | |
parent | 081e10b96e971da2eba05ab1ecbf2c051fa119f6 (diff) |
x86: smp_64.c: remove unused exports and cleanup while at it
The exports are nowhere used. There is even no reason why they were
ever introduced.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/smp_64.c | 74 |
1 files changed, 35 insertions, 39 deletions
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index 03fa6ed559c6..62b0f2a1b1e8 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <asm/idle.h> | 29 | #include <asm/idle.h> |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * Smarter SMP flushing macros. | 32 | * Smarter SMP flushing macros. |
33 | * c/o Linus Torvalds. | 33 | * c/o Linus Torvalds. |
34 | * | 34 | * |
35 | * These mean you can really definitely utterly forget about | 35 | * These mean you can really definitely utterly forget about |
@@ -37,15 +37,15 @@ | |||
37 | * | 37 | * |
38 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | 38 | * Optimizations Manfred Spraul <manfred@colorfullife.com> |
39 | * | 39 | * |
40 | * More scalable flush, from Andi Kleen | 40 | * More scalable flush, from Andi Kleen |
41 | * | 41 | * |
42 | * To avoid global state use 8 different call vectors. | 42 | * To avoid global state use 8 different call vectors. |
43 | * Each CPU uses a specific vector to trigger flushes on other | 43 | * Each CPU uses a specific vector to trigger flushes on other |
44 | * CPUs. Depending on the received vector the target CPUs look into | 44 | * CPUs. Depending on the received vector the target CPUs look into |
45 | * the right per cpu variable for the flush data. | 45 | * the right per cpu variable for the flush data. |
46 | * | 46 | * |
47 | * With more than 8 CPUs they are hashed to the 8 available | 47 | * With more than 8 CPUs they are hashed to the 8 available |
48 | * vectors. The limited global vector space forces us to this right now. | 48 | * vectors. The limited global vector space forces us to this right now. |
49 | * In future when interrupts are split into per CPU domains this could be | 49 | * In future when interrupts are split into per CPU domains this could be |
50 | * fixed, at the cost of triggering multiple IPIs in some cases. | 50 | * fixed, at the cost of triggering multiple IPIs in some cases. |
51 | */ | 51 | */ |
@@ -67,7 +67,7 @@ union smp_flush_state { | |||
67 | static DEFINE_PER_CPU(union smp_flush_state, flush_state); | 67 | static DEFINE_PER_CPU(union smp_flush_state, flush_state); |
68 | 68 | ||
69 | /* | 69 | /* |
70 | * We cannot call mmdrop() because we are in interrupt context, | 70 | * We cannot call mmdrop() because we are in interrupt context, |
71 | * instead update mm->cpu_vm_mask. | 71 | * instead update mm->cpu_vm_mask. |
72 | */ | 72 | */ |
73 | static inline void leave_mm(int cpu) | 73 | static inline void leave_mm(int cpu) |
@@ -85,25 +85,25 @@ static inline void leave_mm(int cpu) | |||
85 | * 1) switch_mm() either 1a) or 1b) | 85 | * 1) switch_mm() either 1a) or 1b) |
86 | * 1a) thread switch to a different mm | 86 | * 1a) thread switch to a different mm |
87 | * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); | 87 | * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); |
88 | * Stop ipi delivery for the old mm. This is not synchronized with | 88 | * Stop ipi delivery for the old mm. This is not synchronized with |
89 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis | 89 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis |
90 | * for the wrong mm, and in the worst case we perform a superfluous | 90 | * for the wrong mm, and in the worst case we perform a superfluous |
91 | * tlb flush. | 91 | * tlb flush. |
92 | * 1a2) set cpu mmu_state to TLBSTATE_OK | 92 | * 1a2) set cpu mmu_state to TLBSTATE_OK |
93 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 | 93 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 |
94 | * was in lazy tlb mode. | 94 | * was in lazy tlb mode. |
95 | * 1a3) update cpu active_mm | 95 | * 1a3) update cpu active_mm |
96 | * Now cpu0 accepts tlb flushes for the new mm. | 96 | * Now cpu0 accepts tlb flushes for the new mm. |
97 | * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); | 97 | * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); |
98 | * Now the other cpus will send tlb flush ipis. | 98 | * Now the other cpus will send tlb flush ipis. |
99 | * 1a4) change cr3. | 99 | * 1a4) change cr3. |
100 | * 1b) thread switch without mm change | 100 | * 1b) thread switch without mm change |
101 | * cpu active_mm is correct, cpu0 already handles | 101 | * cpu active_mm is correct, cpu0 already handles |
102 | * flush ipis. | 102 | * flush ipis. |
103 | * 1b1) set cpu mmu_state to TLBSTATE_OK | 103 | * 1b1) set cpu mmu_state to TLBSTATE_OK |
104 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. | 104 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. |
105 | * Atomically set the bit [other cpus will start sending flush ipis], | 105 | * Atomically set the bit [other cpus will start sending flush ipis], |
106 | * and test the bit. | 106 | * and test the bit. |
107 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. | 107 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. |
108 | * 2) switch %%esp, ie current | 108 | * 2) switch %%esp, ie current |
109 | * | 109 | * |
@@ -142,7 +142,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) | |||
142 | 142 | ||
143 | if (!cpu_isset(cpu, f->flush_cpumask)) | 143 | if (!cpu_isset(cpu, f->flush_cpumask)) |
144 | goto out; | 144 | goto out; |
145 | /* | 145 | /* |
146 | * This was a BUG() but until someone can quote me the | 146 | * This was a BUG() but until someone can quote me the |
147 | * line from the intel manual that guarantees an IPI to | 147 | * line from the intel manual that guarantees an IPI to |
148 | * multiple CPUs is retried _only_ on the erroring CPUs | 148 | * multiple CPUs is retried _only_ on the erroring CPUs |
@@ -150,7 +150,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) | |||
150 | * | 150 | * |
151 | * BUG(); | 151 | * BUG(); |
152 | */ | 152 | */ |
153 | 153 | ||
154 | if (f->flush_mm == read_pda(active_mm)) { | 154 | if (f->flush_mm == read_pda(active_mm)) { |
155 | if (read_pda(mmu_state) == TLBSTATE_OK) { | 155 | if (read_pda(mmu_state) == TLBSTATE_OK) { |
156 | if (f->flush_va == FLUSH_ALL) | 156 | if (f->flush_va == FLUSH_ALL) |
@@ -176,9 +176,11 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
176 | sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; | 176 | sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; |
177 | f = &per_cpu(flush_state, sender); | 177 | f = &per_cpu(flush_state, sender); |
178 | 178 | ||
179 | /* Could avoid this lock when | 179 | /* |
180 | num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is | 180 | * Could avoid this lock when |
181 | probably not worth checking this for a cache-hot lock. */ | 181 | * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is |
182 | * probably not worth checking this for a cache-hot lock. | ||
183 | */ | ||
182 | spin_lock(&f->tlbstate_lock); | 184 | spin_lock(&f->tlbstate_lock); |
183 | 185 | ||
184 | f->flush_mm = mm; | 186 | f->flush_mm = mm; |
@@ -202,14 +204,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
202 | int __cpuinit init_smp_flush(void) | 204 | int __cpuinit init_smp_flush(void) |
203 | { | 205 | { |
204 | int i; | 206 | int i; |
207 | |||
205 | for_each_cpu_mask(i, cpu_possible_map) { | 208 | for_each_cpu_mask(i, cpu_possible_map) { |
206 | spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); | 209 | spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); |
207 | } | 210 | } |
208 | return 0; | 211 | return 0; |
209 | } | 212 | } |
210 | |||
211 | core_initcall(init_smp_flush); | 213 | core_initcall(init_smp_flush); |
212 | 214 | ||
213 | void flush_tlb_current_task(void) | 215 | void flush_tlb_current_task(void) |
214 | { | 216 | { |
215 | struct mm_struct *mm = current->mm; | 217 | struct mm_struct *mm = current->mm; |
@@ -224,7 +226,6 @@ void flush_tlb_current_task(void) | |||
224 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | 226 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); |
225 | preempt_enable(); | 227 | preempt_enable(); |
226 | } | 228 | } |
227 | EXPORT_SYMBOL(flush_tlb_current_task); | ||
228 | 229 | ||
229 | void flush_tlb_mm (struct mm_struct * mm) | 230 | void flush_tlb_mm (struct mm_struct * mm) |
230 | { | 231 | { |
@@ -245,7 +246,6 @@ void flush_tlb_mm (struct mm_struct * mm) | |||
245 | 246 | ||
246 | preempt_enable(); | 247 | preempt_enable(); |
247 | } | 248 | } |
248 | EXPORT_SYMBOL(flush_tlb_mm); | ||
249 | 249 | ||
250 | void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | 250 | void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) |
251 | { | 251 | { |
@@ -259,8 +259,8 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | |||
259 | if (current->active_mm == mm) { | 259 | if (current->active_mm == mm) { |
260 | if(current->mm) | 260 | if(current->mm) |
261 | __flush_tlb_one(va); | 261 | __flush_tlb_one(va); |
262 | else | 262 | else |
263 | leave_mm(smp_processor_id()); | 263 | leave_mm(smp_processor_id()); |
264 | } | 264 | } |
265 | 265 | ||
266 | if (!cpus_empty(cpu_mask)) | 266 | if (!cpus_empty(cpu_mask)) |
@@ -268,7 +268,6 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | |||
268 | 268 | ||
269 | preempt_enable(); | 269 | preempt_enable(); |
270 | } | 270 | } |
271 | EXPORT_SYMBOL(flush_tlb_page); | ||
272 | 271 | ||
273 | static void do_flush_tlb_all(void* info) | 272 | static void do_flush_tlb_all(void* info) |
274 | { | 273 | { |
@@ -325,11 +324,9 @@ void unlock_ipi_call_lock(void) | |||
325 | * this function sends a 'generic call function' IPI to all other CPU | 324 | * this function sends a 'generic call function' IPI to all other CPU |
326 | * of the system defined in the mask. | 325 | * of the system defined in the mask. |
327 | */ | 326 | */ |
328 | 327 | static int __smp_call_function_mask(cpumask_t mask, | |
329 | static int | 328 | void (*func)(void *), void *info, |
330 | __smp_call_function_mask(cpumask_t mask, | 329 | int wait) |
331 | void (*func)(void *), void *info, | ||
332 | int wait) | ||
333 | { | 330 | { |
334 | struct call_data_struct data; | 331 | struct call_data_struct data; |
335 | cpumask_t allbutself; | 332 | cpumask_t allbutself; |
@@ -417,11 +414,10 @@ EXPORT_SYMBOL(smp_call_function_mask); | |||
417 | */ | 414 | */ |
418 | 415 | ||
419 | int smp_call_function_single (int cpu, void (*func) (void *info), void *info, | 416 | int smp_call_function_single (int cpu, void (*func) (void *info), void *info, |
420 | int nonatomic, int wait) | 417 | int nonatomic, int wait) |
421 | { | 418 | { |
422 | /* prevent preemption and reschedule on another processor */ | 419 | /* prevent preemption and reschedule on another processor */ |
423 | int ret; | 420 | int ret, me = get_cpu(); |
424 | int me = get_cpu(); | ||
425 | 421 | ||
426 | /* Can deadlock when called with interrupts disabled */ | 422 | /* Can deadlock when called with interrupts disabled */ |
427 | WARN_ON(irqs_disabled()); | 423 | WARN_ON(irqs_disabled()); |
@@ -471,9 +467,9 @@ static void stop_this_cpu(void *dummy) | |||
471 | */ | 467 | */ |
472 | cpu_clear(smp_processor_id(), cpu_online_map); | 468 | cpu_clear(smp_processor_id(), cpu_online_map); |
473 | disable_local_APIC(); | 469 | disable_local_APIC(); |
474 | for (;;) | 470 | for (;;) |
475 | halt(); | 471 | halt(); |
476 | } | 472 | } |
477 | 473 | ||
478 | void smp_send_stop(void) | 474 | void smp_send_stop(void) |
479 | { | 475 | { |