aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/kernel/sync-r4k.c31
2 files changed, 17 insertions, 16 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4368a723e2d0..df1a92afa56a 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1656,7 +1656,7 @@ config MIPS_APSP_KSPD
1656config MIPS_CMP 1656config MIPS_CMP
1657 bool "MIPS CMP framework support" 1657 bool "MIPS CMP framework support"
1658 depends on SYS_SUPPORTS_MIPS_CMP 1658 depends on SYS_SUPPORTS_MIPS_CMP
1659 select SYNC_R4K if BROKEN 1659 select SYNC_R4K
1660 select SYS_SUPPORTS_SMP 1660 select SYS_SUPPORTS_SMP
1661 select SYS_SUPPORTS_SCHED_SMT if SMP 1661 select SYS_SUPPORTS_SCHED_SMT if SMP
1662 select WEAK_ORDERING 1662 select WEAK_ORDERING
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 9021108eb9c1..05dd170a83f7 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Count register synchronisation. 2 * Count register synchronisation.
3 * 3 *
4 * All CPUs will have their count registers synchronised to the CPU0 expirelo 4 * All CPUs will have their count registers synchronised to the CPU0 next time
5 * value. This can cause a small timewarp for CPU0. All other CPU's should 5 * value. This can cause a small timewarp for CPU0. All other CPU's should
6 * not have done anything significant (but they may have had interrupts 6 * not have done anything significant (but they may have had interrupts
7 * enabled briefly - prom_smp_finish() should not be responsible for enabling 7 * enabled briefly - prom_smp_finish() should not be responsible for enabling
@@ -13,21 +13,22 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/irqflags.h> 15#include <linux/irqflags.h>
16#include <linux/r4k-timer.h> 16#include <linux/cpumask.h>
17 17
18#include <asm/r4k-timer.h>
18#include <asm/atomic.h> 19#include <asm/atomic.h>
19#include <asm/barrier.h> 20#include <asm/barrier.h>
20#include <asm/cpumask.h>
21#include <asm/mipsregs.h> 21#include <asm/mipsregs.h>
22 22
23static atomic_t __initdata count_start_flag = ATOMIC_INIT(0); 23static atomic_t __cpuinitdata count_start_flag = ATOMIC_INIT(0);
24static atomic_t __initdata count_count_start = ATOMIC_INIT(0); 24static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0);
25static atomic_t __initdata count_count_stop = ATOMIC_INIT(0); 25static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0);
26static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);
26 27
27#define COUNTON 100 28#define COUNTON 100
28#define NR_LOOPS 5 29#define NR_LOOPS 5
29 30
30void __init synchronise_count_master(void) 31void __cpuinit synchronise_count_master(void)
31{ 32{
32 int i; 33 int i;
33 unsigned long flags; 34 unsigned long flags;
@@ -42,19 +43,20 @@ void __init synchronise_count_master(void)
42 return; 43 return;
43#endif 44#endif
44 45
45 pr_info("Checking COUNT synchronization across %u CPUs: ", 46 printk(KERN_INFO "Synchronize counters across %u CPUs: ",
46 num_online_cpus()); 47 num_online_cpus());
47 48
48 local_irq_save(flags); 49 local_irq_save(flags);
49 50
50 /* 51 /*
51 * Notify the slaves that it's time to start 52 * Notify the slaves that it's time to start
52 */ 53 */
54 atomic_set(&count_reference, read_c0_count());
53 atomic_set(&count_start_flag, 1); 55 atomic_set(&count_start_flag, 1);
54 smp_wmb(); 56 smp_wmb();
55 57
56 /* Count will be initialised to expirelo for all CPU's */ 58 /* Count will be initialised to current timer for all CPU's */
57 initcount = expirelo; 59 initcount = read_c0_count();
58 60
59 /* 61 /*
60 * We loop a few times to get a primed instruction cache, 62 * We loop a few times to get a primed instruction cache,
@@ -106,7 +108,7 @@ void __init synchronise_count_master(void)
106 printk("done.\n"); 108 printk("done.\n");
107} 109}
108 110
109void __init synchronise_count_slave(void) 111void __cpuinit synchronise_count_slave(void)
110{ 112{
111 int i; 113 int i;
112 unsigned long flags; 114 unsigned long flags;
@@ -131,8 +133,8 @@ void __init synchronise_count_slave(void)
131 while (!atomic_read(&count_start_flag)) 133 while (!atomic_read(&count_start_flag))
132 mb(); 134 mb();
133 135
134 /* Count will be initialised to expirelo for all CPU's */ 136 /* Count will be initialised to next expire for all CPU's */
135 initcount = expirelo; 137 initcount = atomic_read(&count_reference);
136 138
137 ncpus = num_online_cpus(); 139 ncpus = num_online_cpus();
138 for (i = 0; i < NR_LOOPS; i++) { 140 for (i = 0; i < NR_LOOPS; i++) {
@@ -156,4 +158,3 @@ void __init synchronise_count_slave(void)
156 local_irq_restore(flags); 158 local_irq_restore(flags);
157} 159}
158#undef NR_LOOPS 160#undef NR_LOOPS
159#endif