diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/head_44x.S | 95 |
1 files changed, 93 insertions, 2 deletions
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 0878bf5d8a68..7dd2981bcc50 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S | |||
@@ -64,6 +64,35 @@ _ENTRY(_start); | |||
64 | mr r31,r3 /* save device tree ptr */ | 64 | mr r31,r3 /* save device tree ptr */ |
65 | li r24,0 /* CPU number */ | 65 | li r24,0 /* CPU number */ |
66 | 66 | ||
67 | #ifdef CONFIG_RELOCATABLE | ||
68 | /* | ||
69 | * Relocate ourselves to the current runtime address. | ||
70 | * This is called only by the Boot CPU. | ||
71 | * "relocate" is called with our current runtime virutal | ||
72 | * address. | ||
73 | * r21 will be loaded with the physical runtime address of _stext | ||
74 | */ | ||
75 | bl 0f /* Get our runtime address */ | ||
76 | 0: mflr r21 /* Make it accessible */ | ||
77 | addis r21,r21,(_stext - 0b)@ha | ||
78 | addi r21,r21,(_stext - 0b)@l /* Get our current runtime base */ | ||
79 | |||
80 | /* | ||
81 | * We have the runtime (virutal) address of our base. | ||
82 | * We calculate our shift of offset from a 256M page. | ||
83 | * We could map the 256M page we belong to at PAGE_OFFSET and | ||
84 | * get going from there. | ||
85 | */ | ||
86 | lis r4,KERNELBASE@h | ||
87 | ori r4,r4,KERNELBASE@l | ||
88 | rlwinm r6,r21,0,4,31 /* r6 = PHYS_START % 256M */ | ||
89 | rlwinm r5,r4,0,4,31 /* r5 = KERNELBASE % 256M */ | ||
90 | subf r3,r5,r6 /* r3 = r6 - r5 */ | ||
91 | add r3,r4,r3 /* Required Virutal Address */ | ||
92 | |||
93 | bl relocate | ||
94 | #endif | ||
95 | |||
67 | bl init_cpu_state | 96 | bl init_cpu_state |
68 | 97 | ||
69 | /* | 98 | /* |
@@ -86,7 +115,64 @@ _ENTRY(_start); | |||
86 | 115 | ||
87 | bl early_init | 116 | bl early_init |
88 | 117 | ||
89 | #ifdef CONFIG_DYNAMIC_MEMSTART | 118 | #ifdef CONFIG_RELOCATABLE |
119 | /* | ||
120 | * Relocatable kernel support based on processing of dynamic | ||
121 | * relocation entries. | ||
122 | * | ||
123 | * r25 will contain RPN/ERPN for the start address of memory | ||
124 | * r21 will contain the current offset of _stext | ||
125 | */ | ||
126 | lis r3,kernstart_addr@ha | ||
127 | la r3,kernstart_addr@l(r3) | ||
128 | |||
129 | /* | ||
130 | * Compute the kernstart_addr. | ||
131 | * kernstart_addr => (r6,r8) | ||
132 | * kernstart_addr & ~0xfffffff => (r6,r7) | ||
133 | */ | ||
134 | rlwinm r6,r25,0,28,31 /* ERPN. Bits 32-35 of Address */ | ||
135 | rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */ | ||
136 | rlwinm r8,r21,0,4,31 /* r8 = (_stext & 0xfffffff) */ | ||
137 | or r8,r7,r8 /* Compute the lower 32bit of kernstart_addr */ | ||
138 | |||
139 | /* Store kernstart_addr */ | ||
140 | stw r6,0(r3) /* higher 32bit */ | ||
141 | stw r8,4(r3) /* lower 32bit */ | ||
142 | |||
143 | /* | ||
144 | * Compute the virt_phys_offset : | ||
145 | * virt_phys_offset = stext.run - kernstart_addr | ||
146 | * | ||
147 | * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff) | ||
148 | * When we relocate, we have : | ||
149 | * | ||
150 | * (kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff) | ||
151 | * | ||
152 | * hence: | ||
153 | * virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff) | ||
154 | * | ||
155 | */ | ||
156 | |||
157 | /* KERNELBASE&~0xfffffff => (r4,r5) */ | ||
158 | li r4, 0 /* higer 32bit */ | ||
159 | lis r5,KERNELBASE@h | ||
160 | rlwinm r5,r5,0,0,3 /* Align to 256M, lower 32bit */ | ||
161 | |||
162 | /* | ||
163 | * 64bit subtraction. | ||
164 | */ | ||
165 | subfc r5,r7,r5 | ||
166 | subfe r4,r6,r4 | ||
167 | |||
168 | /* Store virt_phys_offset */ | ||
169 | lis r3,virt_phys_offset@ha | ||
170 | la r3,virt_phys_offset@l(r3) | ||
171 | |||
172 | stw r4,0(r3) | ||
173 | stw r5,4(r3) | ||
174 | |||
175 | #elif defined(CONFIG_DYNAMIC_MEMSTART) | ||
90 | /* | 176 | /* |
91 | * Mapping based, page aligned dynamic kernel loading. | 177 | * Mapping based, page aligned dynamic kernel loading. |
92 | * | 178 | * |
@@ -804,7 +890,12 @@ skpinv: addi r4,r4,1 /* Increment */ | |||
804 | /* | 890 | /* |
805 | * Configure and load pinned entry into TLB slot 63. | 891 | * Configure and load pinned entry into TLB slot 63. |
806 | */ | 892 | */ |
807 | #ifdef CONFIG_DYNAMIC_MEMSTART | 893 | #ifdef CONFIG_NONSTATIC_KERNEL |
894 | /* | ||
895 | * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT | ||
896 | * entries of the initial mapping set by the boot loader. | ||
897 | * The XLAT entry is stored in r25 | ||
898 | */ | ||
808 | 899 | ||
809 | /* Read the XLAT entry for our current mapping */ | 900 | /* Read the XLAT entry for our current mapping */ |
810 | tlbre r25,r23,PPC44x_TLB_XLAT | 901 | tlbre r25,r23,PPC44x_TLB_XLAT |