aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2016-05-17 03:02:43 -0400
committerScott Wood <oss@buserror.net>2016-07-09 03:02:48 -0400
commitf86ef74ed9193c52411277eeac2eec69af553392 (patch)
tree84c8ad083c2efc3a0688e56ab906cc49b2cc6e1d
parentc223c90386bc2306510e0ceacd768a0123ff2a2f (diff)
powerpc/8xx: Fix vaddr for IMMR early remap
Memory: 124428K/131072K available (3748K kernel code, 188K rwdata, 648K rodata, 508K init, 290K bss, 6644K reserved) Kernel virtual memory layout: * 0xfffdf000..0xfffff000 : fixmap * 0xfde00000..0xfe000000 : consistent mem * 0xfddf6000..0xfde00000 : early ioremap * 0xc9000000..0xfddf6000 : vmalloc & ioremap SLUB: HWalign=16, Order=0-3, MinObjects=0, CPUs=1, Nodes=1 Today, IMMR is mapped 1:1 at startup Mapping IMMR 1:1 is just wrong because it may overlap with another area. On most mpc8xx boards it is OK as IMMR is set to 0xff000000 but for instance on EP88xC board, IMMR is at 0xfa200000 which overlaps with VM ioremap area This patch fixes the virtual address for remapping IMMR with the fixmap regardless of the value of IMMR. The size of IMMR area is 256kbytes (CPM at offset 0, security engine at offset 128k) so a 512k page is enough Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Scott Wood <oss@buserror.net>
-rw-r--r--arch/powerpc/include/asm/fixmap.h7
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h3
-rw-r--r--arch/powerpc/kernel/asm-offsets.c8
-rw-r--r--arch/powerpc/kernel/head_8xx.S11
-rw-r--r--arch/powerpc/sysdev/cpm_common.c22
5 files changed, 41 insertions, 10 deletions
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 90f604bbcd19..4508b322f2cd 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -51,6 +51,13 @@ enum fixed_addresses {
51 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 51 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
52 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, 52 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
53#endif 53#endif
54#ifdef CONFIG_PPC_8xx
55 /* For IMMR we need an aligned 512K area */
56#define FIX_IMMR_SIZE (512 * 1024 / PAGE_SIZE)
57 FIX_IMMR_START,
58 FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 +
59 FIX_IMMR_SIZE,
60#endif
54 /* FIX_PCIE_MCFG, */ 61 /* FIX_PCIE_MCFG, */
55 __end_of_fixed_addresses 62 __end_of_fixed_addresses
56}; 63};
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 0a566f15f985..3e0e4927811c 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -169,6 +169,9 @@ typedef struct {
169 unsigned int active; 169 unsigned int active;
170 unsigned long vdso_base; 170 unsigned long vdso_base;
171} mm_context_t; 171} mm_context_t;
172
173#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
174#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
172#endif /* !__ASSEMBLY__ */ 175#endif /* !__ASSEMBLY__ */
173 176
174#if defined(CONFIG_PPC_4K_PAGES) 177#if defined(CONFIG_PPC_4K_PAGES)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 047892869257..247f6407c7d8 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -68,6 +68,10 @@
68#include "../mm/mmu_decl.h" 68#include "../mm/mmu_decl.h"
69#endif 69#endif
70 70
71#ifdef CONFIG_PPC_8xx
72#include <asm/fixmap.h>
73#endif
74
71int main(void) 75int main(void)
72{ 76{
73 DEFINE(THREAD, offsetof(struct task_struct, thread)); 77 DEFINE(THREAD, offsetof(struct task_struct, thread));
@@ -749,5 +753,9 @@ int main(void)
749 753
750 DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER); 754 DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
751 755
756#ifdef CONFIG_PPC_8xx
757 DEFINE(VIRT_IMMR_BASE, __fix_to_virt(FIX_IMMR_BASE));
758#endif
759
752 return 0; 760 return 0;
753} 761}
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 80c69472314e..378a1858687d 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -30,6 +30,7 @@
30#include <asm/ppc_asm.h> 30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h> 31#include <asm/asm-offsets.h>
32#include <asm/ptrace.h> 32#include <asm/ptrace.h>
33#include <asm/fixmap.h>
33 34
34/* Macro to make the code more readable. */ 35/* Macro to make the code more readable. */
35#ifdef CONFIG_8xx_CPU6 36#ifdef CONFIG_8xx_CPU6
@@ -763,7 +764,7 @@ start_here:
763 * virtual to physical. Also, set the cache mode since that is defined 764 * virtual to physical. Also, set the cache mode since that is defined
764 * by TLB entries and perform any additional mapping (like of the IMMR). 765 * by TLB entries and perform any additional mapping (like of the IMMR).
765 * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel, 766 * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
766 * 24 Mbytes of data, and the 8M IMMR space. Anything not covered by 767 * 24 Mbytes of data, and the 512k IMMR space. Anything not covered by
767 * these mappings is mapped by page tables. 768 * these mappings is mapped by page tables.
768 */ 769 */
769initial_mmu: 770initial_mmu:
@@ -812,7 +813,7 @@ initial_mmu:
812 ori r8, r8, MD_APG_INIT@l 813 ori r8, r8, MD_APG_INIT@l
813 mtspr SPRN_MD_AP, r8 814 mtspr SPRN_MD_AP, r8
814 815
815 /* Map another 8 MByte at the IMMR to get the processor 816 /* Map a 512k page for the IMMR to get the processor
816 * internal registers (among other things). 817 * internal registers (among other things).
817 */ 818 */
818#ifdef CONFIG_PIN_TLB 819#ifdef CONFIG_PIN_TLB
@@ -820,12 +821,12 @@ initial_mmu:
820 mtspr SPRN_MD_CTR, r10 821 mtspr SPRN_MD_CTR, r10
821#endif 822#endif
822 mfspr r9, 638 /* Get current IMMR */ 823 mfspr r9, 638 /* Get current IMMR */
823 andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */ 824 andis. r9, r9, 0xfff8 /* Get 512 kbytes boundary */
824 825
825 mr r8, r9 /* Create vaddr for TLB */ 826 lis r8, VIRT_IMMR_BASE@h /* Create vaddr for TLB */
826 ori r8, r8, MD_EVALID /* Mark it valid */ 827 ori r8, r8, MD_EVALID /* Mark it valid */
827 mtspr SPRN_MD_EPN, r8 828 mtspr SPRN_MD_EPN, r8
828 li r8, MD_PS8MEG /* Set 8M byte page */ 829 li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */
829 ori r8, r8, MD_SVALID /* Make it valid */ 830 ori r8, r8, MD_SVALID /* Make it valid */
830 mtspr SPRN_MD_TWC, r8 831 mtspr SPRN_MD_TWC, r8
831 mr r8, r9 /* Create paddr for TLB */ 832 mr r8, r9 /* Create paddr for TLB */
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 0ac12e5fd8ab..911456d17713 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -28,6 +28,7 @@
28#include <asm/udbg.h> 28#include <asm/udbg.h>
29#include <asm/io.h> 29#include <asm/io.h>
30#include <asm/cpm.h> 30#include <asm/cpm.h>
31#include <asm/fixmap.h>
31#include <soc/fsl/qe/qe.h> 32#include <soc/fsl/qe/qe.h>
32 33
33#include <mm/mmu_decl.h> 34#include <mm/mmu_decl.h>
@@ -37,25 +38,36 @@
37#endif 38#endif
38 39
39#ifdef CONFIG_PPC_EARLY_DEBUG_CPM 40#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
40static u32 __iomem *cpm_udbg_txdesc = 41static u32 __iomem *cpm_udbg_txdesc;
41 (u32 __iomem __force *)CONFIG_PPC_EARLY_DEBUG_CPM_ADDR; 42static u8 __iomem *cpm_udbg_txbuf;
42 43
43static void udbg_putc_cpm(char c) 44static void udbg_putc_cpm(char c)
44{ 45{
45 u8 __iomem *txbuf = (u8 __iomem __force *)in_be32(&cpm_udbg_txdesc[1]);
46
47 if (c == '\n') 46 if (c == '\n')
48 udbg_putc_cpm('\r'); 47 udbg_putc_cpm('\r');
49 48
50 while (in_be32(&cpm_udbg_txdesc[0]) & 0x80000000) 49 while (in_be32(&cpm_udbg_txdesc[0]) & 0x80000000)
51 ; 50 ;
52 51
53 out_8(txbuf, c); 52 out_8(cpm_udbg_txbuf, c);
54 out_be32(&cpm_udbg_txdesc[0], 0xa0000001); 53 out_be32(&cpm_udbg_txdesc[0], 0xa0000001);
55} 54}
56 55
57void __init udbg_init_cpm(void) 56void __init udbg_init_cpm(void)
58{ 57{
58#ifdef CONFIG_PPC_8xx
59 cpm_udbg_txdesc = (u32 __iomem __force *)
60 (CONFIG_PPC_EARLY_DEBUG_CPM_ADDR - PHYS_IMMR_BASE +
61 VIRT_IMMR_BASE);
62 cpm_udbg_txbuf = (u8 __iomem __force *)
63 (in_be32(&cpm_udbg_txdesc[1]) - PHYS_IMMR_BASE +
64 VIRT_IMMR_BASE);
65#else
66 cpm_udbg_txdesc = (u32 __iomem __force *)
67 CONFIG_PPC_EARLY_DEBUG_CPM_ADDR;
68 cpm_udbg_txbuf = (u8 __iomem __force *)in_be32(&cpm_udbg_txdesc[1]);
69#endif
70
59 if (cpm_udbg_txdesc) { 71 if (cpm_udbg_txdesc) {
60#ifdef CONFIG_CPM2 72#ifdef CONFIG_CPM2
61 setbat(1, 0xf0000000, 0xf0000000, 1024*1024, PAGE_KERNEL_NCG); 73 setbat(1, 0xf0000000, 0xf0000000, 1024*1024, PAGE_KERNEL_NCG);