aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/head_44x.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/head_44x.S')
-rw-r--r--arch/powerpc/kernel/head_44x.S107
1 files changed, 107 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index b725dab0f88a..7dd2981bcc50 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -64,6 +64,35 @@ _ENTRY(_start);
64 mr r31,r3 /* save device tree ptr */ 64 mr r31,r3 /* save device tree ptr */
65 li r24,0 /* CPU number */ 65 li r24,0 /* CPU number */
66 66
67#ifdef CONFIG_RELOCATABLE
68/*
69 * Relocate ourselves to the current runtime address.
70 * This is called only by the Boot CPU.
71 * "relocate" is called with our current runtime virutal
72 * address.
73 * r21 will be loaded with the physical runtime address of _stext
74 */
75 bl 0f /* Get our runtime address */
760: mflr r21 /* Make it accessible */
77 addis r21,r21,(_stext - 0b)@ha
78 addi r21,r21,(_stext - 0b)@l /* Get our current runtime base */
79
80 /*
81 * We have the runtime (virutal) address of our base.
82 * We calculate our shift of offset from a 256M page.
83 * We could map the 256M page we belong to at PAGE_OFFSET and
84 * get going from there.
85 */
86 lis r4,KERNELBASE@h
87 ori r4,r4,KERNELBASE@l
88 rlwinm r6,r21,0,4,31 /* r6 = PHYS_START % 256M */
89 rlwinm r5,r4,0,4,31 /* r5 = KERNELBASE % 256M */
90 subf r3,r5,r6 /* r3 = r6 - r5 */
91 add r3,r4,r3 /* Required Virutal Address */
92
93 bl relocate
94#endif
95
67 bl init_cpu_state 96 bl init_cpu_state
68 97
69 /* 98 /*
@@ -88,6 +117,65 @@ _ENTRY(_start);
88 117
89#ifdef CONFIG_RELOCATABLE 118#ifdef CONFIG_RELOCATABLE
90 /* 119 /*
120 * Relocatable kernel support based on processing of dynamic
121 * relocation entries.
122 *
123 * r25 will contain RPN/ERPN for the start address of memory
124 * r21 will contain the current offset of _stext
125 */
126 lis r3,kernstart_addr@ha
127 la r3,kernstart_addr@l(r3)
128
129 /*
130 * Compute the kernstart_addr.
131 * kernstart_addr => (r6,r8)
132 * kernstart_addr & ~0xfffffff => (r6,r7)
133 */
134 rlwinm r6,r25,0,28,31 /* ERPN. Bits 32-35 of Address */
135 rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */
136 rlwinm r8,r21,0,4,31 /* r8 = (_stext & 0xfffffff) */
137 or r8,r7,r8 /* Compute the lower 32bit of kernstart_addr */
138
139 /* Store kernstart_addr */
140 stw r6,0(r3) /* higher 32bit */
141 stw r8,4(r3) /* lower 32bit */
142
143 /*
144 * Compute the virt_phys_offset :
145 * virt_phys_offset = stext.run - kernstart_addr
146 *
147 * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff)
148 * When we relocate, we have :
149 *
150 * (kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff)
151 *
152 * hence:
153 * virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff)
154 *
155 */
156
157 /* KERNELBASE&~0xfffffff => (r4,r5) */
158 li r4, 0 /* higer 32bit */
159 lis r5,KERNELBASE@h
160 rlwinm r5,r5,0,0,3 /* Align to 256M, lower 32bit */
161
162 /*
163 * 64bit subtraction.
164 */
165 subfc r5,r7,r5
166 subfe r4,r6,r4
167
168 /* Store virt_phys_offset */
169 lis r3,virt_phys_offset@ha
170 la r3,virt_phys_offset@l(r3)
171
172 stw r4,0(r3)
173 stw r5,4(r3)
174
175#elif defined(CONFIG_DYNAMIC_MEMSTART)
176 /*
177 * Mapping based, page aligned dynamic kernel loading.
178 *
91 * r25 will contain RPN/ERPN for the start address of memory 179 * r25 will contain RPN/ERPN for the start address of memory
92 * 180 *
93 * Add the difference between KERNELBASE and PAGE_OFFSET to the 181 * Add the difference between KERNELBASE and PAGE_OFFSET to the
@@ -732,6 +820,8 @@ _GLOBAL(init_cpu_state)
732 /* We use the PVR to differenciate 44x cores from 476 */ 820 /* We use the PVR to differenciate 44x cores from 476 */
733 mfspr r3,SPRN_PVR 821 mfspr r3,SPRN_PVR
734 srwi r3,r3,16 822 srwi r3,r3,16
823 cmplwi cr0,r3,PVR_476FPE@h
824 beq head_start_47x
735 cmplwi cr0,r3,PVR_476@h 825 cmplwi cr0,r3,PVR_476@h
736 beq head_start_47x 826 beq head_start_47x
737 cmplwi cr0,r3,PVR_476_ISS@h 827 cmplwi cr0,r3,PVR_476_ISS@h
@@ -800,12 +890,29 @@ skpinv: addi r4,r4,1 /* Increment */
800/* 890/*
801 * Configure and load pinned entry into TLB slot 63. 891 * Configure and load pinned entry into TLB slot 63.
802 */ 892 */
893#ifdef CONFIG_NONSTATIC_KERNEL
894 /*
895 * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT
896 * entries of the initial mapping set by the boot loader.
897 * The XLAT entry is stored in r25
898 */
899
900 /* Read the XLAT entry for our current mapping */
901 tlbre r25,r23,PPC44x_TLB_XLAT
902
903 lis r3,KERNELBASE@h
904 ori r3,r3,KERNELBASE@l
905
906 /* Use our current RPN entry */
907 mr r4,r25
908#else
803 909
804 lis r3,PAGE_OFFSET@h 910 lis r3,PAGE_OFFSET@h
805 ori r3,r3,PAGE_OFFSET@l 911 ori r3,r3,PAGE_OFFSET@l
806 912
807 /* Kernel is at the base of RAM */ 913 /* Kernel is at the base of RAM */
808 li r4, 0 /* Load the kernel physical address */ 914 li r4, 0 /* Load the kernel physical address */
915#endif
809 916
810 /* Load the kernel PID = 0 */ 917 /* Load the kernel PID = 0 */
811 li r0,0 918 li r0,0