diff options
| author | Matthew McClintock <msm@freescale.com> | 2010-09-16 18:58:26 -0400 |
|---|---|---|
| committer | Kumar Gala <galak@kernel.crashing.org> | 2010-10-14 01:52:52 -0400 |
| commit | 677de425583b43bf1af3aea0fa8d433120f0f13c (patch) | |
| tree | 849973fff204f23518d8b7ccee45a16cf17de214 | |
| parent | 5d692961633d4ecd1ca07313b75ddf35520a4c28 (diff) | |
powerpc/85xx: flush dcache before resetting cores
When we do an mpic_reset_core we need to make sure the dcache is flushed.
Signed-off-by: Matthew McClintock <msm@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
| -rw-r--r-- | arch/powerpc/platforms/85xx/smp.c | 50 |
1 files changed, 50 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index c9a77fa62744..5c91a992f02b 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
| 17 | #include <linux/of.h> | 17 | #include <linux/of.h> |
| 18 | #include <linux/kexec.h> | 18 | #include <linux/kexec.h> |
| 19 | #include <linux/highmem.h> | ||
| 19 | 20 | ||
| 20 | #include <asm/machdep.h> | 21 | #include <asm/machdep.h> |
| 21 | #include <asm/pgtable.h> | 22 | #include <asm/pgtable.h> |
| @@ -140,11 +141,60 @@ static void mpc85xx_smp_kexec_down(void *arg) | |||
| 140 | ppc_md.kexec_cpu_down(0,1); | 141 | ppc_md.kexec_cpu_down(0,1); |
| 141 | } | 142 | } |
| 142 | 143 | ||
| 144 | static void map_and_flush(unsigned long paddr) | ||
| 145 | { | ||
| 146 | struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); | ||
| 147 | unsigned long kaddr = (unsigned long)kmap(page); | ||
| 148 | |||
| 149 | flush_dcache_range(kaddr, kaddr + PAGE_SIZE); | ||
| 150 | kunmap(page); | ||
| 151 | } | ||
| 152 | |||
| 153 | /** | ||
| 154 | * Before we reset the other cores, we need to flush relevant cache | ||
| 155 | * out to memory so we don't get anything corrupted, some of these flushes | ||
| 156 | * are performed out of an overabundance of caution as interrupts are not | ||
| 157 | * disabled yet and we can switch cores | ||
| 158 | */ | ||
| 159 | static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image) | ||
| 160 | { | ||
| 161 | kimage_entry_t *ptr, entry; | ||
| 162 | unsigned long paddr; | ||
| 163 | int i; | ||
| 164 | |||
| 165 | if (image->type == KEXEC_TYPE_DEFAULT) { | ||
| 166 | /* normal kexec images are stored in temporary pages */ | ||
| 167 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); | ||
| 168 | ptr = (entry & IND_INDIRECTION) ? | ||
| 169 | phys_to_virt(entry & PAGE_MASK) : ptr + 1) { | ||
| 170 | if (!(entry & IND_DESTINATION)) { | ||
| 171 | map_and_flush(entry); | ||
| 172 | } | ||
| 173 | } | ||
| 174 | /* flush out last IND_DONE page */ | ||
| 175 | map_and_flush(entry); | ||
| 176 | } else { | ||
| 177 | /* crash type kexec images are copied to the crash region */ | ||
| 178 | for (i = 0; i < image->nr_segments; i++) { | ||
| 179 | struct kexec_segment *seg = &image->segment[i]; | ||
| 180 | for (paddr = seg->mem; paddr < seg->mem + seg->memsz; | ||
| 181 | paddr += PAGE_SIZE) { | ||
| 182 | map_and_flush(paddr); | ||
| 183 | } | ||
| 184 | } | ||
| 185 | } | ||
| 186 | |||
| 187 | /* also flush the kimage struct to be passed in as well */ | ||
| 188 | flush_dcache_range((unsigned long)image, | ||
| 189 | (unsigned long)image + sizeof(*image)); | ||
| 190 | } | ||
| 191 | |||
| 143 | static void mpc85xx_smp_machine_kexec(struct kimage *image) | 192 | static void mpc85xx_smp_machine_kexec(struct kimage *image) |
| 144 | { | 193 | { |
| 145 | int timeout = INT_MAX; | 194 | int timeout = INT_MAX; |
| 146 | int i, num_cpus = num_present_cpus(); | 195 | int i, num_cpus = num_present_cpus(); |
| 147 | 196 | ||
| 197 | mpc85xx_smp_flush_dcache_kexec(image); | ||
| 148 | 198 | ||
| 149 | if (image->type == KEXEC_TYPE_DEFAULT) | 199 | if (image->type == KEXEC_TYPE_DEFAULT) |
| 150 | smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); | 200 | smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); |
