aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2006-08-11 01:07:08 -0400
committerPaul Mackerras <paulus@samba.org>2006-08-24 23:27:35 -0400
commitf39b7a55a84e34e3074b168e30dc73b66e85261d (patch)
tree9be321bfcd5d0404309b1514127987117c2541cc
parent2e97425197ecf85641a89e5a4868f8e147cc443f (diff)
[POWERPC] Cleanup CPU inits
Cleanup CPU inits a bit more, Geoff Levand already did some earlier. * Move CPU state save to cpu_setup, since cpu_setup is only ever done on cpu 0 on 64-bit and save is never done more than once. * Rename __restore_cpu_setup to __restore_cpu_ppc970 and add function pointers to the cputable to use instead. Powermac always has 970 so no need to check there. * Rename __970_cpu_preinit to __cpu_preinit_ppc970 and check PVR before calling it instead of in it, it's too early to use cputable. * Rename pSeries_secondary_smp_init to generic_secondary_smp_init since everyone but powermac and iSeries use it. Signed-off-by: Olof Johansson <olof@lixom.net> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/cpu_setup_ppc970.S99
-rw-r--r--arch/powerpc/kernel/cputable.c6
-rw-r--r--arch/powerpc/kernel/head_64.S52
-rw-r--r--arch/powerpc/platforms/cell/smp.c4
-rw-r--r--arch/powerpc/platforms/pseries/smp.c4
-rw-r--r--include/asm-powerpc/cputable.h3
7 files changed, 71 insertions, 98 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index a2f95e467a75..c53acd2a6dfc 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -246,6 +246,7 @@ int main(void)
246 DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value)); 246 DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
247 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); 247 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
248 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); 248 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
249 DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore));
249 250
250#ifndef CONFIG_PPC64 251#ifndef CONFIG_PPC64
251 DEFINE(pbe_address, offsetof(struct pbe, address)); 252 DEFINE(pbe_address, offsetof(struct pbe, address));
diff --git a/arch/powerpc/kernel/cpu_setup_ppc970.S b/arch/powerpc/kernel/cpu_setup_ppc970.S
index f69af2c5d7b3..f619932794e8 100644
--- a/arch/powerpc/kernel/cpu_setup_ppc970.S
+++ b/arch/powerpc/kernel/cpu_setup_ppc970.S
@@ -16,27 +16,12 @@
16#include <asm/asm-offsets.h> 16#include <asm/asm-offsets.h>
17#include <asm/cache.h> 17#include <asm/cache.h>
18 18
19_GLOBAL(__970_cpu_preinit) 19_GLOBAL(__cpu_preinit_ppc970)
20 /* 20 /* Do nothing if not running in HV mode */
21 * Do nothing if not running in HV mode
22 */
23 mfmsr r0 21 mfmsr r0
24 rldicl. r0,r0,4,63 22 rldicl. r0,r0,4,63
25 beqlr 23 beqlr
26 24
27 /*
28 * Deal only with PPC970 and PPC970FX.
29 */
30 mfspr r0,SPRN_PVR
31 srwi r0,r0,16
32 cmpwi r0,0x39
33 beq 1f
34 cmpwi r0,0x3c
35 beq 1f
36 cmpwi r0,0x44
37 bnelr
381:
39
40 /* Make sure HID4:rm_ci is off before MMU is turned off, that large 25 /* Make sure HID4:rm_ci is off before MMU is turned off, that large
41 * pages are enabled with HID4:61 and clear HID5:DCBZ_size and 26 * pages are enabled with HID4:61 and clear HID5:DCBZ_size and
42 * HID5:DCBZ32_ill 27 * HID5:DCBZ32_ill
@@ -72,21 +57,6 @@ _GLOBAL(__970_cpu_preinit)
72 isync 57 isync
73 blr 58 blr
74 59
75_GLOBAL(__setup_cpu_ppc970)
76 mfspr r0,SPRN_HID0
77 li r11,5 /* clear DOZE and SLEEP */
78 rldimi r0,r11,52,8 /* set NAP and DPM */
79 mtspr SPRN_HID0,r0
80 mfspr r0,SPRN_HID0
81 mfspr r0,SPRN_HID0
82 mfspr r0,SPRN_HID0
83 mfspr r0,SPRN_HID0
84 mfspr r0,SPRN_HID0
85 mfspr r0,SPRN_HID0
86 sync
87 isync
88 blr
89
90/* Definitions for the table use to save CPU states */ 60/* Definitions for the table use to save CPU states */
91#define CS_HID0 0 61#define CS_HID0 0
92#define CS_HID1 8 62#define CS_HID1 8
@@ -101,33 +71,28 @@ cpu_state_storage:
101 .balign L1_CACHE_BYTES,0 71 .balign L1_CACHE_BYTES,0
102 .text 72 .text
103 73
104/* Called in normal context to backup CPU 0 state. This
105 * does not include cache settings. This function is also
106 * called for machine sleep. This does not include the MMU
107 * setup, BATs, etc... but rather the "special" registers
108 * like HID0, HID1, HID4, etc...
109 */
110_GLOBAL(__save_cpu_setup)
111 /* Some CR fields are volatile, we back it up all */
112 mfcr r7
113
114 /* Get storage ptr */
115 LOAD_REG_IMMEDIATE(r5,cpu_state_storage)
116 74
117 /* We only deal with 970 for now */ 75_GLOBAL(__setup_cpu_ppc970)
118 mfspr r0,SPRN_PVR 76 /* Do nothing if not running in HV mode */
119 srwi r0,r0,16
120 cmpwi r0,0x39
121 beq 1f
122 cmpwi r0,0x3c
123 beq 1f
124 cmpwi r0,0x44
125 bne 2f
126
1271: /* skip if not running in HV mode */
128 mfmsr r0 77 mfmsr r0
129 rldicl. r0,r0,4,63 78 rldicl. r0,r0,4,63
130 beq 2f 79 beqlr
80
81 mfspr r0,SPRN_HID0
82 li r11,5 /* clear DOZE and SLEEP */
83 rldimi r0,r11,52,8 /* set NAP and DPM */
84 mtspr SPRN_HID0,r0
85 mfspr r0,SPRN_HID0
86 mfspr r0,SPRN_HID0
87 mfspr r0,SPRN_HID0
88 mfspr r0,SPRN_HID0
89 mfspr r0,SPRN_HID0
90 mfspr r0,SPRN_HID0
91 sync
92 isync
93
94 /* Save away cpu state */
95 LOAD_REG_IMMEDIATE(r5,cpu_state_storage)
131 96
132 /* Save HID0,1,4 and 5 */ 97 /* Save HID0,1,4 and 5 */
133 mfspr r3,SPRN_HID0 98 mfspr r3,SPRN_HID0
@@ -139,35 +104,19 @@ _GLOBAL(__save_cpu_setup)
139 mfspr r3,SPRN_HID5 104 mfspr r3,SPRN_HID5
140 std r3,CS_HID5(r5) 105 std r3,CS_HID5(r5)
141 106
1422:
143 mtcr r7
144 blr 107 blr
145 108
146/* Called with no MMU context (typically MSR:IR/DR off) to 109/* Called with no MMU context (typically MSR:IR/DR off) to
147 * restore CPU state as backed up by the previous 110 * restore CPU state as backed up by the previous
148 * function. This does not include cache setting 111 * function. This does not include cache setting
149 */ 112 */
150_GLOBAL(__restore_cpu_setup) 113_GLOBAL(__restore_cpu_ppc970)
151 /* Get storage ptr (FIXME when using anton reloc as we 114 /* Do nothing if not running in HV mode */
152 * are running with translation disabled here
153 */
154 LOAD_REG_IMMEDIATE(r5,cpu_state_storage)
155
156 /* We only deal with 970 for now */
157 mfspr r0,SPRN_PVR
158 srwi r0,r0,16
159 cmpwi r0,0x39
160 beq 1f
161 cmpwi r0,0x3c
162 beq 1f
163 cmpwi r0,0x44
164 bnelr
165
1661: /* skip if not running in HV mode */
167 mfmsr r0 115 mfmsr r0
168 rldicl. r0,r0,4,63 116 rldicl. r0,r0,4,63
169 beqlr 117 beqlr
170 118
119 LOAD_REG_IMMEDIATE(r5,cpu_state_storage)
171 /* Before accessing memory, we make sure rm_ci is clear */ 120 /* Before accessing memory, we make sure rm_ci is clear */
172 li r0,0 121 li r0,0
173 mfspr r3,SPRN_HID4 122 mfspr r3,SPRN_HID4
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 272e43622fd6..306da4cd37a0 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -39,7 +39,10 @@ extern void __setup_cpu_7400(unsigned long offset, struct cpu_spec* spec);
39extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec); 39extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec);
40extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec); 40extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
41#endif /* CONFIG_PPC32 */ 41#endif /* CONFIG_PPC32 */
42#ifdef CONFIG_PPC64
42extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); 43extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
44extern void __restore_cpu_ppc970(void);
45#endif /* CONFIG_PPC64 */
43 46
44/* This table only contains "desktop" CPUs, it need to be filled with embedded 47/* This table only contains "desktop" CPUs, it need to be filled with embedded
45 * ones as well... 48 * ones as well...
@@ -184,6 +187,7 @@ struct cpu_spec cpu_specs[] = {
184 .dcache_bsize = 128, 187 .dcache_bsize = 128,
185 .num_pmcs = 8, 188 .num_pmcs = 8,
186 .cpu_setup = __setup_cpu_ppc970, 189 .cpu_setup = __setup_cpu_ppc970,
190 .cpu_restore = __restore_cpu_ppc970,
187 .oprofile_cpu_type = "ppc64/970", 191 .oprofile_cpu_type = "ppc64/970",
188 .oprofile_type = PPC_OPROFILE_POWER4, 192 .oprofile_type = PPC_OPROFILE_POWER4,
189 .platform = "ppc970", 193 .platform = "ppc970",
@@ -199,6 +203,7 @@ struct cpu_spec cpu_specs[] = {
199 .dcache_bsize = 128, 203 .dcache_bsize = 128,
200 .num_pmcs = 8, 204 .num_pmcs = 8,
201 .cpu_setup = __setup_cpu_ppc970, 205 .cpu_setup = __setup_cpu_ppc970,
206 .cpu_restore = __restore_cpu_ppc970,
202 .oprofile_cpu_type = "ppc64/970", 207 .oprofile_cpu_type = "ppc64/970",
203 .oprofile_type = PPC_OPROFILE_POWER4, 208 .oprofile_type = PPC_OPROFILE_POWER4,
204 .platform = "ppc970", 209 .platform = "ppc970",
@@ -214,6 +219,7 @@ struct cpu_spec cpu_specs[] = {
214 .dcache_bsize = 128, 219 .dcache_bsize = 128,
215 .num_pmcs = 8, 220 .num_pmcs = 8,
216 .cpu_setup = __setup_cpu_ppc970, 221 .cpu_setup = __setup_cpu_ppc970,
222 .cpu_restore = __restore_cpu_ppc970,
217 .oprofile_cpu_type = "ppc64/970", 223 .oprofile_cpu_type = "ppc64/970",
218 .oprofile_type = PPC_OPROFILE_POWER4, 224 .oprofile_type = PPC_OPROFILE_POWER4,
219 .platform = "ppc970", 225 .platform = "ppc970",
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 6ff3cf506088..e9963d9f335a 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -132,7 +132,7 @@ _GLOBAL(__secondary_hold)
132 bne 100b 132 bne 100b
133 133
134#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) 134#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
135 LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init) 135 LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init)
136 mtctr r4 136 mtctr r4
137 mr r3,r24 137 mr r3,r24
138 bctr 138 bctr
@@ -1484,19 +1484,17 @@ fwnmi_data_area:
1484 . = 0x8000 1484 . = 0x8000
1485 1485
1486/* 1486/*
1487 * On pSeries, secondary processors spin in the following code. 1487 * On pSeries and most other platforms, secondary processors spin
1488 * in the following code.
1488 * At entry, r3 = this processor's number (physical cpu id) 1489 * At entry, r3 = this processor's number (physical cpu id)
1489 */ 1490 */
1490_GLOBAL(pSeries_secondary_smp_init) 1491_GLOBAL(generic_secondary_smp_init)
1491 mr r24,r3 1492 mr r24,r3
1492 1493
1493 /* turn on 64-bit mode */ 1494 /* turn on 64-bit mode */
1494 bl .enable_64b_mode 1495 bl .enable_64b_mode
1495 isync 1496 isync
1496 1497
1497 /* Copy some CPU settings from CPU 0 */
1498 bl .__restore_cpu_setup
1499
1500 /* Set up a paca value for this processor. Since we have the 1498 /* Set up a paca value for this processor. Since we have the
1501 * physical cpu id in r24, we need to search the pacas to find 1499 * physical cpu id in r24, we need to search the pacas to find
1502 * which logical id maps to our physical one. 1500 * which logical id maps to our physical one.
@@ -1522,15 +1520,28 @@ _GLOBAL(pSeries_secondary_smp_init)
1522 /* start. */ 1520 /* start. */
1523 sync 1521 sync
1524 1522
1525 /* Create a temp kernel stack for use before relocation is on. */ 1523#ifndef CONFIG_SMP
1524 b 3b /* Never go on non-SMP */
1525#else
1526 cmpwi 0,r23,0
1527 beq 3b /* Loop until told to go */
1528
1529 /* See if we need to call a cpu state restore handler */
1530 LOAD_REG_IMMEDIATE(r23, cur_cpu_spec)
1531 ld r23,0(r23)
1532 ld r23,CPU_SPEC_RESTORE(r23)
1533 cmpdi 0,r23,0
1534 beq 4f
1535 ld r23,0(r23)
1536 mtctr r23
1537 bctrl
1538
15394: /* Create a temp kernel stack for use before relocation is on. */
1526 ld r1,PACAEMERGSP(r13) 1540 ld r1,PACAEMERGSP(r13)
1527 subi r1,r1,STACK_FRAME_OVERHEAD 1541 subi r1,r1,STACK_FRAME_OVERHEAD
1528 1542
1529 cmpwi 0,r23,0 1543 b .__secondary_start
1530#ifdef CONFIG_SMP
1531 bne .__secondary_start
1532#endif 1544#endif
1533 b 3b /* Loop until told to go */
1534 1545
1535#ifdef CONFIG_PPC_ISERIES 1546#ifdef CONFIG_PPC_ISERIES
1536_STATIC(__start_initialization_iSeries) 1547_STATIC(__start_initialization_iSeries)
@@ -1611,7 +1622,16 @@ _GLOBAL(__start_initialization_multiplatform)
1611 bl .enable_64b_mode 1622 bl .enable_64b_mode
1612 1623
1613 /* Setup some critical 970 SPRs before switching MMU off */ 1624 /* Setup some critical 970 SPRs before switching MMU off */
1614 bl .__970_cpu_preinit 1625 mfspr r0,SPRN_PVR
1626 srwi r0,r0,16
1627 cmpwi r0,0x39 /* 970 */
1628 beq 1f
1629 cmpwi r0,0x3c /* 970FX */
1630 beq 1f
1631 cmpwi r0,0x44 /* 970MP */
1632 bne 2f
16331: bl .__cpu_preinit_ppc970
16342:
1615 1635
1616 /* Switch off MMU if not already */ 1636 /* Switch off MMU if not already */
1617 LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE) 1637 LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE)
@@ -1782,7 +1802,7 @@ _GLOBAL(pmac_secondary_start)
1782 isync 1802 isync
1783 1803
1784 /* Copy some CPU settings from CPU 0 */ 1804 /* Copy some CPU settings from CPU 0 */
1785 bl .__restore_cpu_setup 1805 bl .__restore_cpu_ppc970
1786 1806
1787 /* pSeries do that early though I don't think we really need it */ 1807 /* pSeries do that early though I don't think we really need it */
1788 mfmsr r3 1808 mfmsr r3
@@ -1932,12 +1952,6 @@ _STATIC(start_here_multiplatform)
1932 mr r5,r26 1952 mr r5,r26
1933 bl .identify_cpu 1953 bl .identify_cpu
1934 1954
1935 /* Save some low level config HIDs of CPU0 to be copied to
1936 * other CPUs later on, or used for suspend/resume
1937 */
1938 bl .__save_cpu_setup
1939 sync
1940
1941 /* Do very early kernel initializations, including initial hash table, 1955 /* Do very early kernel initializations, including initial hash table,
1942 * stab and slb setup before we turn on relocation. */ 1956 * stab and slb setup before we turn on relocation. */
1943 1957
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index 46aef0640742..1c0acbad7425 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -57,7 +57,7 @@
57 */ 57 */
58static cpumask_t of_spin_map; 58static cpumask_t of_spin_map;
59 59
60extern void pSeries_secondary_smp_init(unsigned long); 60extern void generic_secondary_smp_init(unsigned long);
61 61
62/** 62/**
63 * smp_startup_cpu() - start the given cpu 63 * smp_startup_cpu() - start the given cpu
@@ -74,7 +74,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
74{ 74{
75 int status; 75 int status;
76 unsigned long start_here = __pa((u32)*((unsigned long *) 76 unsigned long start_here = __pa((u32)*((unsigned long *)
77 pSeries_secondary_smp_init)); 77 generic_secondary_smp_init));
78 unsigned int pcpu; 78 unsigned int pcpu;
79 int start_cpu; 79 int start_cpu;
80 80
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index f39dad8b99e0..c6624b8a0e77 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -62,7 +62,7 @@
62 */ 62 */
63static cpumask_t of_spin_map; 63static cpumask_t of_spin_map;
64 64
65extern void pSeries_secondary_smp_init(unsigned long); 65extern void generic_secondary_smp_init(unsigned long);
66 66
67#ifdef CONFIG_HOTPLUG_CPU 67#ifdef CONFIG_HOTPLUG_CPU
68 68
@@ -270,7 +270,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
270{ 270{
271 int status; 271 int status;
272 unsigned long start_here = __pa((u32)*((unsigned long *) 272 unsigned long start_here = __pa((u32)*((unsigned long *)
273 pSeries_secondary_smp_init)); 273 generic_secondary_smp_init));
274 unsigned int pcpu; 274 unsigned int pcpu;
275 int start_cpu; 275 int start_cpu;
276 276
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 1ba3c9983614..748bc1805da9 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -36,6 +36,7 @@
36struct cpu_spec; 36struct cpu_spec;
37 37
38typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec); 38typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
39typedef void (*cpu_restore_t)(void);
39 40
40enum powerpc_oprofile_type { 41enum powerpc_oprofile_type {
41 PPC_OPROFILE_INVALID = 0, 42 PPC_OPROFILE_INVALID = 0,
@@ -65,6 +66,8 @@ struct cpu_spec {
65 * BHT, SPD, etc... from head.S before branching to identify_machine 66 * BHT, SPD, etc... from head.S before branching to identify_machine
66 */ 67 */
67 cpu_setup_t cpu_setup; 68 cpu_setup_t cpu_setup;
69 /* Used to restore cpu setup on secondary processors and at resume */
70 cpu_restore_t cpu_restore;
68 71
69 /* Used by oprofile userspace to select the right counters */ 72 /* Used by oprofile userspace to select the right counters */
70 char *oprofile_cpu_type; 73 char *oprofile_cpu_type;