diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/mips/kernel/sync-r4k.c | 32 |
1 files changed, 8 insertions, 24 deletions
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c index 2242bdd4370e..4472a7f98577 100644 --- a/arch/mips/kernel/sync-r4k.c +++ b/arch/mips/kernel/sync-r4k.c | |||
@@ -17,35 +17,23 @@ | |||
17 | #include <asm/barrier.h> | 17 | #include <asm/barrier.h> |
18 | #include <asm/mipsregs.h> | 18 | #include <asm/mipsregs.h> |
19 | 19 | ||
20 | static atomic_t count_start_flag = ATOMIC_INIT(0); | 20 | static unsigned int initcount = 0; |
21 | static atomic_t count_count_start = ATOMIC_INIT(0); | 21 | static atomic_t count_count_start = ATOMIC_INIT(0); |
22 | static atomic_t count_count_stop = ATOMIC_INIT(0); | 22 | static atomic_t count_count_stop = ATOMIC_INIT(0); |
23 | static atomic_t count_reference = ATOMIC_INIT(0); | ||
24 | 23 | ||
25 | #define COUNTON 100 | 24 | #define COUNTON 100 |
26 | #define NR_LOOPS 5 | 25 | #define NR_LOOPS 3 |
27 | 26 | ||
28 | void synchronise_count_master(int cpu) | 27 | void synchronise_count_master(int cpu) |
29 | { | 28 | { |
30 | int i; | 29 | int i; |
31 | unsigned long flags; | 30 | unsigned long flags; |
32 | unsigned int initcount; | ||
33 | 31 | ||
34 | printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); | 32 | printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); |
35 | 33 | ||
36 | local_irq_save(flags); | 34 | local_irq_save(flags); |
37 | 35 | ||
38 | /* | 36 | /* |
39 | * Notify the slaves that it's time to start | ||
40 | */ | ||
41 | atomic_set(&count_reference, read_c0_count()); | ||
42 | atomic_set(&count_start_flag, cpu); | ||
43 | smp_wmb(); | ||
44 | |||
45 | /* Count will be initialised to current timer for all CPU's */ | ||
46 | initcount = read_c0_count(); | ||
47 | |||
48 | /* | ||
49 | * We loop a few times to get a primed instruction cache, | 37 | * We loop a few times to get a primed instruction cache, |
50 | * then the last pass is more or less synchronised and | 38 | * then the last pass is more or less synchronised and |
51 | * the master and slaves each set their cycle counters to a known | 39 | * the master and slaves each set their cycle counters to a known |
@@ -63,9 +51,13 @@ void synchronise_count_master(int cpu) | |||
63 | atomic_set(&count_count_stop, 0); | 51 | atomic_set(&count_count_stop, 0); |
64 | smp_wmb(); | 52 | smp_wmb(); |
65 | 53 | ||
66 | /* this lets the slaves write their count register */ | 54 | /* Let the slave writes its count register */ |
67 | atomic_inc(&count_count_start); | 55 | atomic_inc(&count_count_start); |
68 | 56 | ||
57 | /* Count will be initialised to current timer */ | ||
58 | if (i == 1) | ||
59 | initcount = read_c0_count(); | ||
60 | |||
69 | /* | 61 | /* |
70 | * Everyone initialises count in the last loop: | 62 | * Everyone initialises count in the last loop: |
71 | */ | 63 | */ |
@@ -73,7 +65,7 @@ void synchronise_count_master(int cpu) | |||
73 | write_c0_count(initcount); | 65 | write_c0_count(initcount); |
74 | 66 | ||
75 | /* | 67 | /* |
76 | * Wait for all slaves to leave the synchronization point: | 68 | * Wait for slave to leave the synchronization point: |
77 | */ | 69 | */ |
78 | while (atomic_read(&count_count_stop) != 1) | 70 | while (atomic_read(&count_count_stop) != 1) |
79 | mb(); | 71 | mb(); |
@@ -83,7 +75,6 @@ void synchronise_count_master(int cpu) | |||
83 | } | 75 | } |
84 | /* Arrange for an interrupt in a short while */ | 76 | /* Arrange for an interrupt in a short while */ |
85 | write_c0_compare(read_c0_count() + COUNTON); | 77 | write_c0_compare(read_c0_count() + COUNTON); |
86 | atomic_set(&count_start_flag, 0); | ||
87 | 78 | ||
88 | local_irq_restore(flags); | 79 | local_irq_restore(flags); |
89 | 80 | ||
@@ -98,19 +89,12 @@ void synchronise_count_master(int cpu) | |||
98 | void synchronise_count_slave(int cpu) | 89 | void synchronise_count_slave(int cpu) |
99 | { | 90 | { |
100 | int i; | 91 | int i; |
101 | unsigned int initcount; | ||
102 | 92 | ||
103 | /* | 93 | /* |
104 | * Not every cpu is online at the time this gets called, | 94 | * Not every cpu is online at the time this gets called, |
105 | * so we first wait for the master to say everyone is ready | 95 | * so we first wait for the master to say everyone is ready |
106 | */ | 96 | */ |
107 | 97 | ||
108 | while (atomic_read(&count_start_flag) != cpu) | ||
109 | mb(); | ||
110 | |||
111 | /* Count will be initialised to next expire for all CPU's */ | ||
112 | initcount = atomic_read(&count_reference); | ||
113 | |||
114 | for (i = 0; i < NR_LOOPS; i++) { | 98 | for (i = 0; i < NR_LOOPS; i++) { |
115 | atomic_inc(&count_count_start); | 99 | atomic_inc(&count_count_start); |
116 | while (atomic_read(&count_count_start) != 2) | 100 | while (atomic_read(&count_count_start) != 2) |