diff options
author | Suzuki Poulose <suzuki@in.ibm.com> | 2011-12-14 17:59:24 -0500 |
---|---|---|
committer | Josh Boyer <jwboyer@gmail.com> | 2011-12-20 10:21:57 -0500 |
commit | 26ecb6c44bb33afc62905ba01b636dde70fc2dc6 (patch) | |
tree | 1e2a12510dcc81b40705ed04a1f4e1470856b041 | |
parent | 368ff8f14d6ed8e9fd3b7c2156f2607719bf5a7a (diff) |
powerpc/44x: Enable CONFIG_RELOCATABLE for PPC44x
The following patch adds relocatable kernel support - based on processing
of dynamic relocations - for PPC44x kernel.
We find the runtime address of _stext and relocate ourselves based
on the following calculation.
virtual_base = ALIGN(KERNELBASE,256M) +
MODULO(_stext.run,256M)
relocate() is called with the Effective Virtual Base Address (as
shown below)
| Phys. Addr| Virt. Addr |
Page (256M) |------------------------|
Boundary | | |
| | |
| | |
Kernel Load |___________|_ __ _ _ _ _|<- Effective
Addr(_stext)| | ^ |Virt. Base Addr
| | | |
| | | |
| |reloc_offset|
| | | |
| | | |
| |______v_____|<-(KERNELBASE)%256M
| | |
| | |
| | |
Page(256M) |-----------|------------|
Boundary | | |
The virt_phys_offset is updated accordingly, i.e,
virt_phys_offset = effective. kernel virt base - kernstart_addr
I have tested the patches on 440x platforms only. However this should
work fine for PPC_47x also, as we only depend on the runtime address
and the current TLB XLAT entry for the startup code, which is available
in r25. I don't have access to a 47x board yet. So, it would be great if
somebody could test this on 47x.
Signed-off-by: Suzuki K. Poulose <suzuki@in.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Kumar Gala <galak@kernel.crashing.org>
Cc: Tony Breeds <tony@bakeyournoodle.com>
Cc: Josh Boyer <jwboyer@gmail.com>
Cc: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>
Signed-off-by: Josh Boyer <jwboyer@gmail.com>
-rw-r--r-- | arch/powerpc/Kconfig | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_44x.S | 95 |
2 files changed, 94 insertions, 3 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 2ad5ea827820..1703509649b0 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -851,7 +851,7 @@ config DYNAMIC_MEMSTART | |||
851 | 851 | ||
852 | config RELOCATABLE | 852 | config RELOCATABLE |
853 | bool "Build a relocatable kernel (EXPERIMENTAL)" | 853 | bool "Build a relocatable kernel (EXPERIMENTAL)" |
854 | depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM | 854 | depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && 44x |
855 | select NONSTATIC_KERNEL | 855 | select NONSTATIC_KERNEL |
856 | help | 856 | help |
857 | This builds a kernel image that is capable of running at the | 857 | This builds a kernel image that is capable of running at the |
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 0878bf5d8a68..7dd2981bcc50 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S | |||
@@ -64,6 +64,35 @@ _ENTRY(_start); | |||
64 | mr r31,r3 /* save device tree ptr */ | 64 | mr r31,r3 /* save device tree ptr */ |
65 | li r24,0 /* CPU number */ | 65 | li r24,0 /* CPU number */ |
66 | 66 | ||
67 | #ifdef CONFIG_RELOCATABLE | ||
68 | /* | ||
69 | * Relocate ourselves to the current runtime address. | ||
70 | * This is called only by the Boot CPU. | ||
71 | * "relocate" is called with our current runtime virutal | ||
72 | * address. | ||
73 | * r21 will be loaded with the physical runtime address of _stext | ||
74 | */ | ||
75 | bl 0f /* Get our runtime address */ | ||
76 | 0: mflr r21 /* Make it accessible */ | ||
77 | addis r21,r21,(_stext - 0b)@ha | ||
78 | addi r21,r21,(_stext - 0b)@l /* Get our current runtime base */ | ||
79 | |||
80 | /* | ||
81 | * We have the runtime (virutal) address of our base. | ||
82 | * We calculate our shift of offset from a 256M page. | ||
83 | * We could map the 256M page we belong to at PAGE_OFFSET and | ||
84 | * get going from there. | ||
85 | */ | ||
86 | lis r4,KERNELBASE@h | ||
87 | ori r4,r4,KERNELBASE@l | ||
88 | rlwinm r6,r21,0,4,31 /* r6 = PHYS_START % 256M */ | ||
89 | rlwinm r5,r4,0,4,31 /* r5 = KERNELBASE % 256M */ | ||
90 | subf r3,r5,r6 /* r3 = r6 - r5 */ | ||
91 | add r3,r4,r3 /* Required Virutal Address */ | ||
92 | |||
93 | bl relocate | ||
94 | #endif | ||
95 | |||
67 | bl init_cpu_state | 96 | bl init_cpu_state |
68 | 97 | ||
69 | /* | 98 | /* |
@@ -86,7 +115,64 @@ _ENTRY(_start); | |||
86 | 115 | ||
87 | bl early_init | 116 | bl early_init |
88 | 117 | ||
89 | #ifdef CONFIG_DYNAMIC_MEMSTART | 118 | #ifdef CONFIG_RELOCATABLE |
119 | /* | ||
120 | * Relocatable kernel support based on processing of dynamic | ||
121 | * relocation entries. | ||
122 | * | ||
123 | * r25 will contain RPN/ERPN for the start address of memory | ||
124 | * r21 will contain the current offset of _stext | ||
125 | */ | ||
126 | lis r3,kernstart_addr@ha | ||
127 | la r3,kernstart_addr@l(r3) | ||
128 | |||
129 | /* | ||
130 | * Compute the kernstart_addr. | ||
131 | * kernstart_addr => (r6,r8) | ||
132 | * kernstart_addr & ~0xfffffff => (r6,r7) | ||
133 | */ | ||
134 | rlwinm r6,r25,0,28,31 /* ERPN. Bits 32-35 of Address */ | ||
135 | rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */ | ||
136 | rlwinm r8,r21,0,4,31 /* r8 = (_stext & 0xfffffff) */ | ||
137 | or r8,r7,r8 /* Compute the lower 32bit of kernstart_addr */ | ||
138 | |||
139 | /* Store kernstart_addr */ | ||
140 | stw r6,0(r3) /* higher 32bit */ | ||
141 | stw r8,4(r3) /* lower 32bit */ | ||
142 | |||
143 | /* | ||
144 | * Compute the virt_phys_offset : | ||
145 | * virt_phys_offset = stext.run - kernstart_addr | ||
146 | * | ||
147 | * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff) | ||
148 | * When we relocate, we have : | ||
149 | * | ||
150 | * (kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff) | ||
151 | * | ||
152 | * hence: | ||
153 | * virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff) | ||
154 | * | ||
155 | */ | ||
156 | |||
157 | /* KERNELBASE&~0xfffffff => (r4,r5) */ | ||
158 | li r4, 0 /* higer 32bit */ | ||
159 | lis r5,KERNELBASE@h | ||
160 | rlwinm r5,r5,0,0,3 /* Align to 256M, lower 32bit */ | ||
161 | |||
162 | /* | ||
163 | * 64bit subtraction. | ||
164 | */ | ||
165 | subfc r5,r7,r5 | ||
166 | subfe r4,r6,r4 | ||
167 | |||
168 | /* Store virt_phys_offset */ | ||
169 | lis r3,virt_phys_offset@ha | ||
170 | la r3,virt_phys_offset@l(r3) | ||
171 | |||
172 | stw r4,0(r3) | ||
173 | stw r5,4(r3) | ||
174 | |||
175 | #elif defined(CONFIG_DYNAMIC_MEMSTART) | ||
90 | /* | 176 | /* |
91 | * Mapping based, page aligned dynamic kernel loading. | 177 | * Mapping based, page aligned dynamic kernel loading. |
92 | * | 178 | * |
@@ -804,7 +890,12 @@ skpinv: addi r4,r4,1 /* Increment */ | |||
804 | /* | 890 | /* |
805 | * Configure and load pinned entry into TLB slot 63. | 891 | * Configure and load pinned entry into TLB slot 63. |
806 | */ | 892 | */ |
807 | #ifdef CONFIG_DYNAMIC_MEMSTART | 893 | #ifdef CONFIG_NONSTATIC_KERNEL |
894 | /* | ||
895 | * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT | ||
896 | * entries of the initial mapping set by the boot loader. | ||
897 | * The XLAT entry is stored in r25 | ||
898 | */ | ||
808 | 899 | ||
809 | /* Read the XLAT entry for our current mapping */ | 900 | /* Read the XLAT entry for our current mapping */ |
810 | tlbre r25,r23,PPC44x_TLB_XLAT | 901 | tlbre r25,r23,PPC44x_TLB_XLAT |