aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mach-voyager
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mach-voyager')
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c12
-rw-r--r--arch/x86/mach-voyager/voyager_thread.c2
2 files changed, 7 insertions, 7 deletions
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index e4928aa6bdfb..c5f2692273e6 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -389,7 +389,7 @@ find_smp_config(void)
389 389
390 /* The boot CPU must be extended */ 390 /* The boot CPU must be extended */
391 voyager_extended_vic_processors = 1<<boot_cpu_id; 391 voyager_extended_vic_processors = 1<<boot_cpu_id;
392 /* initially, all of the first 8 cpu's can boot */ 392 /* initially, all of the first 8 CPUs can boot */
393 voyager_allowed_boot_processors = 0xff; 393 voyager_allowed_boot_processors = 0xff;
394 /* set up everything for just this CPU, we can alter 394 /* set up everything for just this CPU, we can alter
395 * this as we start the other CPUs later */ 395 * this as we start the other CPUs later */
@@ -1010,7 +1010,7 @@ static struct call_data_struct * call_data;
1010 1010
1011/* execute a thread on a new CPU. The function to be called must be 1011/* execute a thread on a new CPU. The function to be called must be
1012 * previously set up. This is used to schedule a function for 1012 * previously set up. This is used to schedule a function for
1013 * execution on all CPU's - set up the function then broadcast a 1013 * execution on all CPUs - set up the function then broadcast a
1014 * function_interrupt CPI to come here on each CPU */ 1014 * function_interrupt CPI to come here on each CPU */
1015static void 1015static void
1016smp_call_function_interrupt(void) 1016smp_call_function_interrupt(void)
@@ -1095,7 +1095,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask,
1095 * CPI here. We don't use this actually for counting so losing 1095 * CPI here. We don't use this actually for counting so losing
1096 * ticks doesn't matter 1096 * ticks doesn't matter
1097 * 1097 *
1098 * FIXME: For those CPU's which actually have a local APIC, we could 1098 * FIXME: For those CPUs which actually have a local APIC, we could
1099 * try to use it to trigger this interrupt instead of having to 1099 * try to use it to trigger this interrupt instead of having to
1100 * broadcast the timer tick. Unfortunately, all my pentium DYADs have 1100 * broadcast the timer tick. Unfortunately, all my pentium DYADs have
1101 * no local APIC, so I can't do this 1101 * no local APIC, so I can't do this
@@ -1287,7 +1287,7 @@ smp_local_timer_interrupt(void)
1287 1287
1288 /* 1288 /*
1289 * We take the 'long' return path, and there every subsystem 1289 * We take the 'long' return path, and there every subsystem
1290 * grabs the apropriate locks (kernel lock/ irq lock). 1290 * grabs the appropriate locks (kernel lock/ irq lock).
1291 * 1291 *
1292 * we might want to decouple profiling from the 'long path', 1292 * we might want to decouple profiling from the 'long path',
1293 * and do the profiling totally in assembly. 1293 * and do the profiling totally in assembly.
@@ -1759,7 +1759,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
1759 real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; 1759 real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
1760 1760
1761 if(cpus_addr(mask)[0] == 0) 1761 if(cpus_addr(mask)[0] == 0)
1762 /* can't have no cpu's to accept the interrupt -- extremely 1762 /* can't have no CPUs to accept the interrupt -- extremely
1763 * bad things will happen */ 1763 * bad things will happen */
1764 return; 1764 return;
1765 1765
@@ -1791,7 +1791,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
1791 } 1791 }
1792 /* this is magic, we now have the correct affinity maps, so 1792 /* this is magic, we now have the correct affinity maps, so
1793 * enable the interrupt. This will send an enable CPI to 1793 * enable the interrupt. This will send an enable CPI to
1794 * those cpu's who need to enable it in their local masks, 1794 * those CPUs who need to enable it in their local masks,
1795 * causing them to correct for the new affinity . If the 1795 * causing them to correct for the new affinity . If the
1796 * interrupt is currently globally disabled, it will simply be 1796 * interrupt is currently globally disabled, it will simply be
1797 * disabled again as it comes in (voyager lazy disable). If 1797 * disabled again as it comes in (voyager lazy disable). If
diff --git a/arch/x86/mach-voyager/voyager_thread.c b/arch/x86/mach-voyager/voyager_thread.c
index f9d595338159..50f9366c411e 100644
--- a/arch/x86/mach-voyager/voyager_thread.c
+++ b/arch/x86/mach-voyager/voyager_thread.c
@@ -64,7 +64,7 @@ check_from_kernel(void)
64{ 64{
65 if(voyager_status.switch_off) { 65 if(voyager_status.switch_off) {
66 66
67 /* FIXME: This should be configureable via proc */ 67 /* FIXME: This should be configurable via proc */
68 execute("umask 600; echo 0 > /etc/initrunlvl; kill -HUP 1"); 68 execute("umask 600; echo 0 > /etc/initrunlvl; kill -HUP 1");
69 } else if(voyager_status.power_fail) { 69 } else if(voyager_status.power_fail) {
70 VDEBUG(("Voyager daemon detected AC power failure\n")); 70 VDEBUG(("Voyager daemon detected AC power failure\n"));