diff options
Diffstat (limited to 'arch/powerpc/kernel/cpu_setup_a2.S')
-rw-r--r-- | arch/powerpc/kernel/cpu_setup_a2.S | 114 |
1 files changed, 114 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S new file mode 100644 index 000000000000..7f818feaa7a5 --- /dev/null +++ b/arch/powerpc/kernel/cpu_setup_a2.S | |||
@@ -0,0 +1,114 @@ | |||
1 | /* | ||
2 | * A2 specific assembly support code | ||
3 | * | ||
4 | * Copyright 2009 Ben Herrenschmidt, IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <asm/asm-offsets.h> | ||
13 | #include <asm/ppc_asm.h> | ||
14 | #include <asm/ppc-opcode.h> | ||
15 | #include <asm/processor.h> | ||
16 | #include <asm/reg_a2.h> | ||
17 | #include <asm/reg.h> | ||
18 | #include <asm/thread_info.h> | ||
19 | |||
20 | /* | ||
21 | * Disable thdid and class fields in ERATs to bump PID to full 14 bits capacity. | ||
22 | * This also prevents external LPID accesses but that isn't a problem when not a | ||
23 | * guest. Under PV, this setting will be ignored and MMUCR will return the right | ||
24 | * number of PID bits we can use. | ||
25 | */ | ||
26 | #define MMUCR1_EXTEND_PID \ | ||
27 | (MMUCR1_ICTID | MMUCR1_ITTID | MMUCR1_DCTID | \ | ||
28 | MMUCR1_DTTID | MMUCR1_DCCD) | ||
29 | |||
30 | /* | ||
31 | * Use extended PIDs if enabled. | ||
32 | * Don't clear the ERATs on context sync events and enable I & D LRU. | ||
33 | * Enable ERAT back invalidate when tlbwe overwrites an entry. | ||
34 | */ | ||
35 | #define INITIAL_MMUCR1 \ | ||
36 | (MMUCR1_EXTEND_PID | MMUCR1_CSINV_NEVER | MMUCR1_IRRE | \ | ||
37 | MMUCR1_DRRE | MMUCR1_TLBWE_BINV) | ||
38 | |||
39 | _GLOBAL(__setup_cpu_a2) | ||
40 | /* Some of these are actually thread local and some are | ||
41 | * core local but doing it always won't hurt | ||
42 | */ | ||
43 | |||
44 | #ifdef CONFIG_PPC_WSP_COPRO | ||
45 | /* Make sure ACOP starts out as zero */ | ||
46 | li r3,0 | ||
47 | mtspr SPRN_ACOP,r3 | ||
48 | |||
49 | /* Enable icswx instruction */ | ||
50 | mfspr r3,SPRN_A2_CCR2 | ||
51 | ori r3,r3,A2_CCR2_ENABLE_ICSWX | ||
52 | mtspr SPRN_A2_CCR2,r3 | ||
53 | |||
54 | /* Unmask all CTs in HACOP */ | ||
55 | li r3,-1 | ||
56 | mtspr SPRN_HACOP,r3 | ||
57 | #endif /* CONFIG_PPC_WSP_COPRO */ | ||
58 | |||
59 | /* Enable doorbell */ | ||
60 | mfspr r3,SPRN_A2_CCR2 | ||
61 | oris r3,r3,A2_CCR2_ENABLE_PC@h | ||
62 | mtspr SPRN_A2_CCR2,r3 | ||
63 | isync | ||
64 | |||
65 | /* Setup CCR0 to disable power saving for now as it's busted | ||
66 | * in the current implementations. Setup CCR1 to wake on | ||
67 | * interrupts normally (we write the default value but who | ||
68 | * knows what FW may have clobbered...) | ||
69 | */ | ||
70 | li r3,0 | ||
71 | mtspr SPRN_A2_CCR0, r3 | ||
72 | LOAD_REG_IMMEDIATE(r3,0x0f0f0f0f) | ||
73 | mtspr SPRN_A2_CCR1, r3 | ||
74 | |||
75 | /* Initialise MMUCR1 */ | ||
76 | lis r3,INITIAL_MMUCR1@h | ||
77 | ori r3,r3,INITIAL_MMUCR1@l | ||
78 | mtspr SPRN_MMUCR1,r3 | ||
79 | |||
80 | /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */ | ||
81 | LOAD_REG_IMMEDIATE(r3, 0x000a7531) | ||
82 | mtspr SPRN_MMUCR2,r3 | ||
83 | |||
84 | /* Set MMUCR3 to write all thids bit to the TLB */ | ||
85 | LOAD_REG_IMMEDIATE(r3, 0x0000000f) | ||
86 | mtspr SPRN_MMUCR3,r3 | ||
87 | |||
88 | /* Don't do ERAT stuff if running guest mode */ | ||
89 | mfmsr r3 | ||
90 | andis. r0,r3,MSR_GS@h | ||
91 | bne 1f | ||
92 | |||
93 | /* Now set the I-ERAT watermark to 15 */ | ||
94 | lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h | ||
95 | mtspr SPRN_MMUCR0, r4 | ||
96 | li r4,A2_IERAT_SIZE-1 | ||
97 | PPC_ERATWE(r4,r4,3) | ||
98 | |||
99 | /* Now set the D-ERAT watermark to 31 */ | ||
100 | lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h | ||
101 | mtspr SPRN_MMUCR0, r4 | ||
102 | li r4,A2_DERAT_SIZE-1 | ||
103 | PPC_ERATWE(r4,r4,3) | ||
104 | |||
105 | /* And invalidate the beast just in case. That won't get rid of | ||
106 | * a bolted entry though it will be in LRU and so will go away eventually | ||
107 | * but let's not bother for now | ||
108 | */ | ||
109 | PPC_ERATILX(0,0,0) | ||
110 | 1: | ||
111 | blr | ||
112 | |||
113 | _GLOBAL(__restore_cpu_a2) | ||
114 | b __setup_cpu_a2 | ||