aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/85xx/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/85xx/smp.c')
-rw-r--r--arch/powerpc/platforms/85xx/smp.c83
1 files changed, 68 insertions, 15 deletions
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index a6b106557be..5c91a992f02 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -16,6 +16,7 @@
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/of.h> 17#include <linux/of.h>
18#include <linux/kexec.h> 18#include <linux/kexec.h>
19#include <linux/highmem.h>
19 20
20#include <asm/machdep.h> 21#include <asm/machdep.h>
21#include <asm/pgtable.h> 22#include <asm/pgtable.h>
@@ -79,6 +80,7 @@ smp_85xx_kick_cpu(int nr)
79 local_irq_save(flags); 80 local_irq_save(flags);
80 81
81 out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr); 82 out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
83#ifdef CONFIG_PPC32
82 out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); 84 out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
83 85
84 if (!ioremappable) 86 if (!ioremappable)
@@ -88,6 +90,12 @@ smp_85xx_kick_cpu(int nr)
88 /* Wait a bit for the CPU to ack. */ 90 /* Wait a bit for the CPU to ack. */
89 while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) 91 while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
90 mdelay(1); 92 mdelay(1);
93#else
94 out_be64((u64 *)(bptr_vaddr + BOOT_ENTRY_ADDR_UPPER),
95 __pa((u64)*((unsigned long long *) generic_secondary_smp_init)));
96
97 smp_generic_kick_cpu(nr);
98#endif
91 99
92 local_irq_restore(flags); 100 local_irq_restore(flags);
93 101
@@ -114,19 +122,15 @@ struct smp_ops_t smp_85xx_ops = {
114}; 122};
115 123
116#ifdef CONFIG_KEXEC 124#ifdef CONFIG_KEXEC
117static int kexec_down_cpus = 0; 125atomic_t kexec_down_cpus = ATOMIC_INIT(0);
118 126
119void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) 127void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
120{ 128{
121 mpic_teardown_this_cpu(1); 129 local_irq_disable();
122
123 /* When crashing, this gets called on all CPU's we only
124 * take down the non-boot cpus */
125 if (smp_processor_id() != boot_cpuid)
126 {
127 local_irq_disable();
128 kexec_down_cpus++;
129 130
131 if (secondary) {
132 atomic_inc(&kexec_down_cpus);
133 /* loop forever */
130 while (1); 134 while (1);
131 } 135 }
132} 136}
@@ -137,16 +141,65 @@ static void mpc85xx_smp_kexec_down(void *arg)
137 ppc_md.kexec_cpu_down(0,1); 141 ppc_md.kexec_cpu_down(0,1);
138} 142}
139 143
140static void mpc85xx_smp_machine_kexec(struct kimage *image) 144static void map_and_flush(unsigned long paddr)
141{ 145{
142 int timeout = 2000; 146 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
147 unsigned long kaddr = (unsigned long)kmap(page);
148
149 flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
150 kunmap(page);
151}
152
153/**
154 * Before we reset the other cores, we need to flush relevant cache
155 * out to memory so we don't get anything corrupted, some of these flushes
156 * are performed out of an overabundance of caution as interrupts are not
157 * disabled yet and we can switch cores
158 */
159static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image)
160{
161 kimage_entry_t *ptr, entry;
162 unsigned long paddr;
143 int i; 163 int i;
144 164
145 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); 165 if (image->type == KEXEC_TYPE_DEFAULT) {
166 /* normal kexec images are stored in temporary pages */
167 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
168 ptr = (entry & IND_INDIRECTION) ?
169 phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
170 if (!(entry & IND_DESTINATION)) {
171 map_and_flush(entry);
172 }
173 }
174 /* flush out last IND_DONE page */
175 map_and_flush(entry);
176 } else {
177 /* crash type kexec images are copied to the crash region */
178 for (i = 0; i < image->nr_segments; i++) {
179 struct kexec_segment *seg = &image->segment[i];
180 for (paddr = seg->mem; paddr < seg->mem + seg->memsz;
181 paddr += PAGE_SIZE) {
182 map_and_flush(paddr);
183 }
184 }
185 }
186
187 /* also flush the kimage struct to be passed in as well */
188 flush_dcache_range((unsigned long)image,
189 (unsigned long)image + sizeof(*image));
190}
191
192static void mpc85xx_smp_machine_kexec(struct kimage *image)
193{
194 int timeout = INT_MAX;
195 int i, num_cpus = num_present_cpus();
196
197 mpc85xx_smp_flush_dcache_kexec(image);
146 198
147 smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); 199 if (image->type == KEXEC_TYPE_DEFAULT)
200 smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
148 201
149 while ( (kexec_down_cpus != (num_online_cpus() - 1)) && 202 while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) &&
150 ( timeout > 0 ) ) 203 ( timeout > 0 ) )
151 { 204 {
152 timeout--; 205 timeout--;
@@ -155,7 +208,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
155 if ( !timeout ) 208 if ( !timeout )
156 printk(KERN_ERR "Unable to bring down secondary cpu(s)"); 209 printk(KERN_ERR "Unable to bring down secondary cpu(s)");
157 210
158 for (i = 0; i < num_present_cpus(); i++) 211 for (i = 0; i < num_cpus; i++)
159 { 212 {
160 if ( i == smp_processor_id() ) continue; 213 if ( i == smp_processor_id() ) continue;
161 mpic_reset_core(i); 214 mpic_reset_core(i);