aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mach-voyager/voyager_smp.c
diff options
context:
space:
mode:
authorSimon Arlott <simon@fire.lp0.eu>2007-10-19 19:13:56 -0400
committerAdrian Bunk <bunk@kernel.org>2007-10-19 19:13:56 -0400
commit27b46d7661dc720224813eb4f452e424f1bf3a9a (patch)
tree1683daefc5f245efa5a1c2a3808277b45d21ce72 /arch/x86/mach-voyager/voyager_smp.c
parent5e71c6051585da46b898b21bd8e5b6df2795f03f (diff)
spelling fixes: arch/i386/
Spelling fixes in arch/i386/. Signed-off-by: Simon Arlott <simon@fire.lp0.eu> Signed-off-by: Adrian Bunk <bunk@kernel.org>
Diffstat (limited to 'arch/x86/mach-voyager/voyager_smp.c')
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index e4928aa6bdfb..c5f2692273e6 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -389,7 +389,7 @@ find_smp_config(void)
389 389
390 /* The boot CPU must be extended */ 390 /* The boot CPU must be extended */
391 voyager_extended_vic_processors = 1<<boot_cpu_id; 391 voyager_extended_vic_processors = 1<<boot_cpu_id;
392 /* initially, all of the first 8 cpu's can boot */ 392 /* initially, all of the first 8 CPUs can boot */
393 voyager_allowed_boot_processors = 0xff; 393 voyager_allowed_boot_processors = 0xff;
394 /* set up everything for just this CPU, we can alter 394 /* set up everything for just this CPU, we can alter
395 * this as we start the other CPUs later */ 395 * this as we start the other CPUs later */
@@ -1010,7 +1010,7 @@ static struct call_data_struct * call_data;
1010 1010
1011/* execute a thread on a new CPU. The function to be called must be 1011/* execute a thread on a new CPU. The function to be called must be
1012 * previously set up. This is used to schedule a function for 1012 * previously set up. This is used to schedule a function for
1013 * execution on all CPU's - set up the function then broadcast a 1013 * execution on all CPUs - set up the function then broadcast a
1014 * function_interrupt CPI to come here on each CPU */ 1014 * function_interrupt CPI to come here on each CPU */
1015static void 1015static void
1016smp_call_function_interrupt(void) 1016smp_call_function_interrupt(void)
@@ -1095,7 +1095,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask,
1095 * CPI here. We don't use this actually for counting so losing 1095 * CPI here. We don't use this actually for counting so losing
1096 * ticks doesn't matter 1096 * ticks doesn't matter
1097 * 1097 *
1098 * FIXME: For those CPU's which actually have a local APIC, we could 1098 * FIXME: For those CPUs which actually have a local APIC, we could
1099 * try to use it to trigger this interrupt instead of having to 1099 * try to use it to trigger this interrupt instead of having to
1100 * broadcast the timer tick. Unfortunately, all my pentium DYADs have 1100 * broadcast the timer tick. Unfortunately, all my pentium DYADs have
1101 * no local APIC, so I can't do this 1101 * no local APIC, so I can't do this
@@ -1287,7 +1287,7 @@ smp_local_timer_interrupt(void)
1287 1287
1288 /* 1288 /*
1289 * We take the 'long' return path, and there every subsystem 1289 * We take the 'long' return path, and there every subsystem
1290 * grabs the apropriate locks (kernel lock/ irq lock). 1290 * grabs the appropriate locks (kernel lock/ irq lock).
1291 * 1291 *
1292 * we might want to decouple profiling from the 'long path', 1292 * we might want to decouple profiling from the 'long path',
1293 * and do the profiling totally in assembly. 1293 * and do the profiling totally in assembly.
@@ -1759,7 +1759,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
1759 real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; 1759 real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
1760 1760
1761 if(cpus_addr(mask)[0] == 0) 1761 if(cpus_addr(mask)[0] == 0)
1762 /* can't have no cpu's to accept the interrupt -- extremely 1762 /* can't have no CPUs to accept the interrupt -- extremely
1763 * bad things will happen */ 1763 * bad things will happen */
1764 return; 1764 return;
1765 1765
@@ -1791,7 +1791,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
1791 } 1791 }
1792 /* this is magic, we now have the correct affinity maps, so 1792 /* this is magic, we now have the correct affinity maps, so
1793 * enable the interrupt. This will send an enable CPI to 1793 * enable the interrupt. This will send an enable CPI to
1794 * those cpu's who need to enable it in their local masks, 1794 * those CPUs who need to enable it in their local masks,
1795 * causing them to correct for the new affinity . If the 1795 * causing them to correct for the new affinity . If the
1796 * interrupt is currently globally disabled, it will simply be 1796 * interrupt is currently globally disabled, it will simply be
1797 * disabled again as it comes in (voyager lazy disable). If 1797 * disabled again as it comes in (voyager lazy disable). If