diff options
Diffstat (limited to 'arch/sparc64')
36 files changed, 1217 insertions, 1834 deletions
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug index af0e9411b83e..fa06ea04837b 100644 --- a/arch/sparc64/Kconfig.debug +++ b/arch/sparc64/Kconfig.debug | |||
@@ -33,6 +33,14 @@ config DEBUG_BOOTMEM | |||
33 | depends on DEBUG_KERNEL | 33 | depends on DEBUG_KERNEL |
34 | bool "Debug BOOTMEM initialization" | 34 | bool "Debug BOOTMEM initialization" |
35 | 35 | ||
36 | config DEBUG_PAGEALLOC | ||
37 | bool "Page alloc debugging" | ||
38 | depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND | ||
39 | help | ||
40 | Unmap pages from the kernel linear mapping after free_pages(). | ||
41 | This results in a large slowdown, but helps to find certain types | ||
42 | of memory corruptions. | ||
43 | |||
36 | config MCOUNT | 44 | config MCOUNT |
37 | bool | 45 | bool |
38 | depends on STACK_DEBUG | 46 | depends on STACK_DEBUG |
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c index 48756958116b..77ef5df4e5a7 100644 --- a/arch/sparc64/kernel/cpu.c +++ b/arch/sparc64/kernel/cpu.c | |||
@@ -39,6 +39,8 @@ struct cpu_fp_info linux_sparc_fpu[] = { | |||
39 | { 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"}, | 39 | { 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"}, |
40 | { 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"}, | 40 | { 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"}, |
41 | { 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"}, | 41 | { 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"}, |
42 | { 0x3e, 0x19, 0, "UltraSparc IV+ integrated FPU"}, | ||
43 | { 0x3e, 0x22, 0, "UltraSparc IIIi+ integrated FPU"}, | ||
42 | }; | 44 | }; |
43 | 45 | ||
44 | #define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info)) | 46 | #define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info)) |
@@ -53,6 +55,8 @@ struct cpu_iu_info linux_sparc_chips[] = { | |||
53 | { 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"}, | 55 | { 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"}, |
54 | { 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"}, | 56 | { 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"}, |
55 | { 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"}, | 57 | { 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"}, |
58 | { 0x3e, 0x19, "TI UltraSparc IV+ (Panther)"}, | ||
59 | { 0x3e, 0x22, "TI UltraSparc IIIi+ (Serrano)"}, | ||
56 | }; | 60 | }; |
57 | 61 | ||
58 | #define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info)) | 62 | #define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info)) |
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c index d710274e516b..df9a1ca8fd77 100644 --- a/arch/sparc64/kernel/devices.c +++ b/arch/sparc64/kernel/devices.c | |||
@@ -135,6 +135,28 @@ void __init device_scan(void) | |||
135 | cpu_data(0).clock_tick = prom_getintdefault(cpu_node, | 135 | cpu_data(0).clock_tick = prom_getintdefault(cpu_node, |
136 | "clock-frequency", | 136 | "clock-frequency", |
137 | 0); | 137 | 0); |
138 | cpu_data(0).dcache_size = prom_getintdefault(cpu_node, | ||
139 | "dcache-size", | ||
140 | 16 * 1024); | ||
141 | cpu_data(0).dcache_line_size = | ||
142 | prom_getintdefault(cpu_node, "dcache-line-size", 32); | ||
143 | cpu_data(0).icache_size = prom_getintdefault(cpu_node, | ||
144 | "icache-size", | ||
145 | 16 * 1024); | ||
146 | cpu_data(0).icache_line_size = | ||
147 | prom_getintdefault(cpu_node, "icache-line-size", 32); | ||
148 | cpu_data(0).ecache_size = prom_getintdefault(cpu_node, | ||
149 | "ecache-size", | ||
150 | 4 * 1024 * 1024); | ||
151 | cpu_data(0).ecache_line_size = | ||
152 | prom_getintdefault(cpu_node, "ecache-line-size", 64); | ||
153 | printk("CPU[0]: Caches " | ||
154 | "D[sz(%d):line_sz(%d)] " | ||
155 | "I[sz(%d):line_sz(%d)] " | ||
156 | "E[sz(%d):line_sz(%d)]\n", | ||
157 | cpu_data(0).dcache_size, cpu_data(0).dcache_line_size, | ||
158 | cpu_data(0).icache_size, cpu_data(0).icache_line_size, | ||
159 | cpu_data(0).ecache_size, cpu_data(0).ecache_line_size); | ||
138 | } | 160 | } |
139 | #endif | 161 | #endif |
140 | 162 | ||
diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S index 538522848ad4..acc889a7f9c1 100644 --- a/arch/sparc64/kernel/dtlb_backend.S +++ b/arch/sparc64/kernel/dtlb_backend.S | |||
@@ -9,17 +9,7 @@ | |||
9 | #include <asm/pgtable.h> | 9 | #include <asm/pgtable.h> |
10 | #include <asm/mmu.h> | 10 | #include <asm/mmu.h> |
11 | 11 | ||
12 | #if PAGE_SHIFT == 13 | 12 | #define VALID_SZ_BITS (_PAGE_VALID | _PAGE_SZBITS) |
13 | #define SZ_BITS _PAGE_SZ8K | ||
14 | #elif PAGE_SHIFT == 16 | ||
15 | #define SZ_BITS _PAGE_SZ64K | ||
16 | #elif PAGE_SHIFT == 19 | ||
17 | #define SZ_BITS _PAGE_SZ512K | ||
18 | #elif PAGE_SHIFT == 22 | ||
19 | #define SZ_BITS _PAGE_SZ4MB | ||
20 | #endif | ||
21 | |||
22 | #define VALID_SZ_BITS (_PAGE_VALID | SZ_BITS) | ||
23 | 13 | ||
24 | #define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P ) | 14 | #define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P ) |
25 | #define VPTE_SHIFT (PAGE_SHIFT - 3) | 15 | #define VPTE_SHIFT (PAGE_SHIFT - 3) |
@@ -163,7 +153,6 @@ sparc64_vpte_continue: | |||
163 | stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS | 153 | stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS |
164 | retry ! Load PTE once again | 154 | retry ! Load PTE once again |
165 | 155 | ||
166 | #undef SZ_BITS | ||
167 | #undef VALID_SZ_BITS | 156 | #undef VALID_SZ_BITS |
168 | #undef VPTE_SHIFT | 157 | #undef VPTE_SHIFT |
169 | #undef VPTE_BITS | 158 | #undef VPTE_BITS |
diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S index ded2fed23fcc..702d349c1e88 100644 --- a/arch/sparc64/kernel/dtlb_base.S +++ b/arch/sparc64/kernel/dtlb_base.S | |||
@@ -71,7 +71,7 @@ | |||
71 | from_tl1_trap: | 71 | from_tl1_trap: |
72 | rdpr %tl, %g5 ! For TL==3 test | 72 | rdpr %tl, %g5 ! For TL==3 test |
73 | CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset | 73 | CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset |
74 | be,pn %xcc, 3f ! Yep, special processing | 74 | be,pn %xcc, kvmap ! Yep, special processing |
75 | CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset | 75 | CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset |
76 | cmp %g5, 4 ! Last trap level? | 76 | cmp %g5, 4 ! Last trap level? |
77 | be,pn %xcc, longpath ! Yep, cannot risk VPTE miss | 77 | be,pn %xcc, longpath ! Yep, cannot risk VPTE miss |
@@ -83,9 +83,9 @@ from_tl1_trap: | |||
83 | nop ! Delay-slot | 83 | nop ! Delay-slot |
84 | 9: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB | 84 | 9: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB |
85 | retry ! Trap return | 85 | retry ! Trap return |
86 | 3: brlz,pt %g4, 9b ! Kernel virtual map? | 86 | nop |
87 | xor %g2, %g4, %g5 ! Finish bit twiddles | 87 | nop |
88 | ba,a,pt %xcc, kvmap ! Yep, go check for obp/vmalloc | 88 | nop |
89 | 89 | ||
90 | /* DTLB ** ICACHE line 3: winfixups+real_faults */ | 90 | /* DTLB ** ICACHE line 3: winfixups+real_faults */ |
91 | longpath: | 91 | longpath: |
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index b48349527853..2879b1072921 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S | |||
@@ -30,159 +30,6 @@ | |||
30 | .text | 30 | .text |
31 | .align 32 | 31 | .align 32 |
32 | 32 | ||
33 | .globl sparc64_vpte_patchme1 | ||
34 | .globl sparc64_vpte_patchme2 | ||
35 | /* | ||
36 | * On a second level vpte miss, check whether the original fault is to the OBP | ||
37 | * range (note that this is only possible for instruction miss, data misses to | ||
38 | * obp range do not use vpte). If so, go back directly to the faulting address. | ||
39 | * This is because we want to read the tpc, otherwise we have no way of knowing | ||
40 | * the 8k aligned faulting address if we are using >8k kernel pagesize. This | ||
41 | * also ensures no vpte range addresses are dropped into tlb while obp is | ||
42 | * executing (see inherit_locked_prom_mappings() rant). | ||
43 | */ | ||
44 | sparc64_vpte_nucleus: | ||
45 | /* Note that kvmap below has verified that the address is | ||
46 | * in the range MODULES_VADDR --> VMALLOC_END already. So | ||
47 | * here we need only check if it is an OBP address or not. | ||
48 | */ | ||
49 | sethi %hi(LOW_OBP_ADDRESS), %g5 | ||
50 | cmp %g4, %g5 | ||
51 | blu,pn %xcc, sparc64_vpte_patchme1 | ||
52 | mov 0x1, %g5 | ||
53 | sllx %g5, 32, %g5 | ||
54 | cmp %g4, %g5 | ||
55 | blu,pn %xcc, obp_iaddr_patch | ||
56 | nop | ||
57 | |||
58 | /* These two instructions are patched by paginig_init(). */ | ||
59 | sparc64_vpte_patchme1: | ||
60 | sethi %hi(0), %g5 | ||
61 | sparc64_vpte_patchme2: | ||
62 | or %g5, %lo(0), %g5 | ||
63 | |||
64 | /* With kernel PGD in %g5, branch back into dtlb_backend. */ | ||
65 | ba,pt %xcc, sparc64_kpte_continue | ||
66 | andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */ | ||
67 | |||
68 | vpte_noent: | ||
69 | /* Restore previous TAG_ACCESS, %g5 is zero, and we will | ||
70 | * skip over the trap instruction so that the top level | ||
71 | * TLB miss handler will thing this %g5 value is just an | ||
72 | * invalid PTE, thus branching to full fault processing. | ||
73 | */ | ||
74 | mov TLB_SFSR, %g1 | ||
75 | stxa %g4, [%g1 + %g1] ASI_DMMU | ||
76 | done | ||
77 | |||
78 | .globl obp_iaddr_patch | ||
79 | obp_iaddr_patch: | ||
80 | /* These two instructions patched by inherit_prom_mappings(). */ | ||
81 | sethi %hi(0), %g5 | ||
82 | or %g5, %lo(0), %g5 | ||
83 | |||
84 | /* Behave as if we are at TL0. */ | ||
85 | wrpr %g0, 1, %tl | ||
86 | rdpr %tpc, %g4 /* Find original faulting iaddr */ | ||
87 | srlx %g4, 13, %g4 /* Throw out context bits */ | ||
88 | sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */ | ||
89 | |||
90 | /* Restore previous TAG_ACCESS. */ | ||
91 | mov TLB_SFSR, %g1 | ||
92 | stxa %g4, [%g1 + %g1] ASI_IMMU | ||
93 | |||
94 | /* Get PMD offset. */ | ||
95 | srlx %g4, 23, %g6 | ||
96 | and %g6, 0x7ff, %g6 | ||
97 | sllx %g6, 2, %g6 | ||
98 | |||
99 | /* Load PMD, is it valid? */ | ||
100 | lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
101 | brz,pn %g5, longpath | ||
102 | sllx %g5, 11, %g5 | ||
103 | |||
104 | /* Get PTE offset. */ | ||
105 | srlx %g4, 13, %g6 | ||
106 | and %g6, 0x3ff, %g6 | ||
107 | sllx %g6, 3, %g6 | ||
108 | |||
109 | /* Load PTE. */ | ||
110 | ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
111 | brgez,pn %g5, longpath | ||
112 | nop | ||
113 | |||
114 | /* TLB load and return from trap. */ | ||
115 | stxa %g5, [%g0] ASI_ITLB_DATA_IN | ||
116 | retry | ||
117 | |||
118 | .globl obp_daddr_patch | ||
119 | obp_daddr_patch: | ||
120 | /* These two instructions patched by inherit_prom_mappings(). */ | ||
121 | sethi %hi(0), %g5 | ||
122 | or %g5, %lo(0), %g5 | ||
123 | |||
124 | /* Get PMD offset. */ | ||
125 | srlx %g4, 23, %g6 | ||
126 | and %g6, 0x7ff, %g6 | ||
127 | sllx %g6, 2, %g6 | ||
128 | |||
129 | /* Load PMD, is it valid? */ | ||
130 | lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
131 | brz,pn %g5, longpath | ||
132 | sllx %g5, 11, %g5 | ||
133 | |||
134 | /* Get PTE offset. */ | ||
135 | srlx %g4, 13, %g6 | ||
136 | and %g6, 0x3ff, %g6 | ||
137 | sllx %g6, 3, %g6 | ||
138 | |||
139 | /* Load PTE. */ | ||
140 | ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
141 | brgez,pn %g5, longpath | ||
142 | nop | ||
143 | |||
144 | /* TLB load and return from trap. */ | ||
145 | stxa %g5, [%g0] ASI_DTLB_DATA_IN | ||
146 | retry | ||
147 | |||
148 | /* | ||
149 | * On a first level data miss, check whether this is to the OBP range (note | ||
150 | * that such accesses can be made by prom, as well as by kernel using | ||
151 | * prom_getproperty on "address"), and if so, do not use vpte access ... | ||
152 | * rather, use information saved during inherit_prom_mappings() using 8k | ||
153 | * pagesize. | ||
154 | */ | ||
155 | .align 32 | ||
156 | kvmap: | ||
157 | sethi %hi(MODULES_VADDR), %g5 | ||
158 | cmp %g4, %g5 | ||
159 | blu,pn %xcc, longpath | ||
160 | mov (VMALLOC_END >> 24), %g5 | ||
161 | sllx %g5, 24, %g5 | ||
162 | cmp %g4, %g5 | ||
163 | bgeu,pn %xcc, longpath | ||
164 | nop | ||
165 | |||
166 | kvmap_check_obp: | ||
167 | sethi %hi(LOW_OBP_ADDRESS), %g5 | ||
168 | cmp %g4, %g5 | ||
169 | blu,pn %xcc, kvmap_vmalloc_addr | ||
170 | mov 0x1, %g5 | ||
171 | sllx %g5, 32, %g5 | ||
172 | cmp %g4, %g5 | ||
173 | blu,pn %xcc, obp_daddr_patch | ||
174 | nop | ||
175 | |||
176 | kvmap_vmalloc_addr: | ||
177 | /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */ | ||
178 | ldxa [%g3 + %g6] ASI_N, %g5 | ||
179 | brgez,pn %g5, longpath | ||
180 | nop | ||
181 | |||
182 | /* PTE is valid, load into TLB and return from trap. */ | ||
183 | stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB | ||
184 | retry | ||
185 | |||
186 | /* This is trivial with the new code... */ | 33 | /* This is trivial with the new code... */ |
187 | .globl do_fpdis | 34 | .globl do_fpdis |
188 | do_fpdis: | 35 | do_fpdis: |
@@ -525,14 +372,13 @@ cheetah_plus_patch_fpdis: | |||
525 | * | 372 | * |
526 | * DATA 0: [low 32-bits] Address of function to call, jmp to this | 373 | * DATA 0: [low 32-bits] Address of function to call, jmp to this |
527 | * [high 32-bits] MMU Context Argument 0, place in %g5 | 374 | * [high 32-bits] MMU Context Argument 0, place in %g5 |
528 | * DATA 1: Address Argument 1, place in %g6 | 375 | * DATA 1: Address Argument 1, place in %g1 |
529 | * DATA 2: Address Argument 2, place in %g7 | 376 | * DATA 2: Address Argument 2, place in %g7 |
530 | * | 377 | * |
531 | * With this method we can do most of the cross-call tlb/cache | 378 | * With this method we can do most of the cross-call tlb/cache |
532 | * flushing very quickly. | 379 | * flushing very quickly. |
533 | * | 380 | * |
534 | * Current CPU's IRQ worklist table is locked into %g1, | 381 | * Current CPU's IRQ worklist table is locked into %g6, don't touch. |
535 | * don't touch. | ||
536 | */ | 382 | */ |
537 | .text | 383 | .text |
538 | .align 32 | 384 | .align 32 |
@@ -1006,13 +852,14 @@ cheetah_plus_dcpe_trap_vector: | |||
1006 | nop | 852 | nop |
1007 | 853 | ||
1008 | do_cheetah_plus_data_parity: | 854 | do_cheetah_plus_data_parity: |
1009 | ba,pt %xcc, etrap | 855 | rdpr %pil, %g2 |
856 | wrpr %g0, 15, %pil | ||
857 | ba,pt %xcc, etrap_irq | ||
1010 | rd %pc, %g7 | 858 | rd %pc, %g7 |
1011 | mov 0x0, %o0 | 859 | mov 0x0, %o0 |
1012 | call cheetah_plus_parity_error | 860 | call cheetah_plus_parity_error |
1013 | add %sp, PTREGS_OFF, %o1 | 861 | add %sp, PTREGS_OFF, %o1 |
1014 | ba,pt %xcc, rtrap | 862 | ba,a,pt %xcc, rtrap_irq |
1015 | clr %l6 | ||
1016 | 863 | ||
1017 | cheetah_plus_dcpe_trap_vector_tl1: | 864 | cheetah_plus_dcpe_trap_vector_tl1: |
1018 | membar #Sync | 865 | membar #Sync |
@@ -1036,13 +883,14 @@ cheetah_plus_icpe_trap_vector: | |||
1036 | nop | 883 | nop |
1037 | 884 | ||
1038 | do_cheetah_plus_insn_parity: | 885 | do_cheetah_plus_insn_parity: |
1039 | ba,pt %xcc, etrap | 886 | rdpr %pil, %g2 |
887 | wrpr %g0, 15, %pil | ||
888 | ba,pt %xcc, etrap_irq | ||
1040 | rd %pc, %g7 | 889 | rd %pc, %g7 |
1041 | mov 0x1, %o0 | 890 | mov 0x1, %o0 |
1042 | call cheetah_plus_parity_error | 891 | call cheetah_plus_parity_error |
1043 | add %sp, PTREGS_OFF, %o1 | 892 | add %sp, PTREGS_OFF, %o1 |
1044 | ba,pt %xcc, rtrap | 893 | ba,a,pt %xcc, rtrap_irq |
1045 | clr %l6 | ||
1046 | 894 | ||
1047 | cheetah_plus_icpe_trap_vector_tl1: | 895 | cheetah_plus_icpe_trap_vector_tl1: |
1048 | membar #Sync | 896 | membar #Sync |
@@ -1075,6 +923,10 @@ do_dcpe_tl1: | |||
1075 | nop | 923 | nop |
1076 | wrpr %g1, %tl ! Restore original trap level | 924 | wrpr %g1, %tl ! Restore original trap level |
1077 | do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ | 925 | do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ |
926 | sethi %hi(dcache_parity_tl1_occurred), %g2 | ||
927 | lduw [%g2 + %lo(dcache_parity_tl1_occurred)], %g1 | ||
928 | add %g1, 1, %g1 | ||
929 | stw %g1, [%g2 + %lo(dcache_parity_tl1_occurred)] | ||
1078 | /* Reset D-cache parity */ | 930 | /* Reset D-cache parity */ |
1079 | sethi %hi(1 << 16), %g1 ! D-cache size | 931 | sethi %hi(1 << 16), %g1 ! D-cache size |
1080 | mov (1 << 5), %g2 ! D-cache line size | 932 | mov (1 << 5), %g2 ! D-cache line size |
@@ -1121,6 +973,10 @@ do_icpe_tl1: | |||
1121 | nop | 973 | nop |
1122 | wrpr %g1, %tl ! Restore original trap level | 974 | wrpr %g1, %tl ! Restore original trap level |
1123 | do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ | 975 | do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ |
976 | sethi %hi(icache_parity_tl1_occurred), %g2 | ||
977 | lduw [%g2 + %lo(icache_parity_tl1_occurred)], %g1 | ||
978 | add %g1, 1, %g1 | ||
979 | stw %g1, [%g2 + %lo(icache_parity_tl1_occurred)] | ||
1124 | /* Flush I-cache */ | 980 | /* Flush I-cache */ |
1125 | sethi %hi(1 << 15), %g1 ! I-cache size | 981 | sethi %hi(1 << 15), %g1 ! I-cache size |
1126 | mov (1 << 5), %g2 ! I-cache line size | 982 | mov (1 << 5), %g2 ! I-cache line size |
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index 1fa06c4e3bdb..89406f9649a9 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S | |||
@@ -80,15 +80,165 @@ sparc_ramdisk_image64: | |||
80 | .xword 0 | 80 | .xword 0 |
81 | .word _end | 81 | .word _end |
82 | 82 | ||
83 | /* We must be careful, 32-bit OpenBOOT will get confused if it | 83 | /* PROM cif handler code address is in %o4. */ |
84 | * tries to save away a register window to a 64-bit kernel | 84 | sparc64_boot: |
85 | * stack address. Flush all windows, disable interrupts, | 85 | 1: rd %pc, %g7 |
86 | * remap if necessary, jump onto kernel trap table, then kernel | 86 | set 1b, %g1 |
87 | * stack, or else we die. | 87 | cmp %g1, %g7 |
88 | be,pn %xcc, sparc64_boot_after_remap | ||
89 | mov %o4, %l7 | ||
90 | |||
91 | /* We need to remap the kernel. Use position independant | ||
92 | * code to remap us to KERNBASE. | ||
88 | * | 93 | * |
89 | * PROM entry point is on %o4 | 94 | * SILO can invoke us with 32-bit address masking enabled, |
95 | * so make sure that's clear. | ||
90 | */ | 96 | */ |
91 | sparc64_boot: | 97 | rdpr %pstate, %g1 |
98 | andn %g1, PSTATE_AM, %g1 | ||
99 | wrpr %g1, 0x0, %pstate | ||
100 | ba,a,pt %xcc, 1f | ||
101 | |||
102 | .globl prom_finddev_name, prom_chosen_path | ||
103 | .globl prom_getprop_name, prom_mmu_name | ||
104 | .globl prom_callmethod_name, prom_translate_name | ||
105 | .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache | ||
106 | .globl prom_boot_mapped_pc, prom_boot_mapping_mode | ||
107 | .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low | ||
108 | prom_finddev_name: | ||
109 | .asciz "finddevice" | ||
110 | prom_chosen_path: | ||
111 | .asciz "/chosen" | ||
112 | prom_getprop_name: | ||
113 | .asciz "getprop" | ||
114 | prom_mmu_name: | ||
115 | .asciz "mmu" | ||
116 | prom_callmethod_name: | ||
117 | .asciz "call-method" | ||
118 | prom_translate_name: | ||
119 | .asciz "translate" | ||
120 | prom_map_name: | ||
121 | .asciz "map" | ||
122 | prom_unmap_name: | ||
123 | .asciz "unmap" | ||
124 | .align 4 | ||
125 | prom_mmu_ihandle_cache: | ||
126 | .word 0 | ||
127 | prom_boot_mapped_pc: | ||
128 | .word 0 | ||
129 | prom_boot_mapping_mode: | ||
130 | .word 0 | ||
131 | .align 8 | ||
132 | prom_boot_mapping_phys_high: | ||
133 | .xword 0 | ||
134 | prom_boot_mapping_phys_low: | ||
135 | .xword 0 | ||
136 | 1: | ||
137 | rd %pc, %l0 | ||
138 | mov (1b - prom_finddev_name), %l1 | ||
139 | mov (1b - prom_chosen_path), %l2 | ||
140 | mov (1b - prom_boot_mapped_pc), %l3 | ||
141 | sub %l0, %l1, %l1 | ||
142 | sub %l0, %l2, %l2 | ||
143 | sub %l0, %l3, %l3 | ||
144 | stw %l0, [%l3] | ||
145 | sub %sp, (192 + 128), %sp | ||
146 | |||
147 | /* chosen_node = prom_finddevice("/chosen") */ | ||
148 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice" | ||
149 | mov 1, %l3 | ||
150 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1 | ||
151 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
152 | stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/chosen" | ||
153 | stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1 | ||
154 | call %l7 | ||
155 | add %sp, (2047 + 128), %o0 ! argument array | ||
156 | |||
157 | ldx [%sp + 2047 + 128 + 0x20], %l4 ! chosen device node | ||
158 | |||
159 | mov (1b - prom_getprop_name), %l1 | ||
160 | mov (1b - prom_mmu_name), %l2 | ||
161 | mov (1b - prom_mmu_ihandle_cache), %l5 | ||
162 | sub %l0, %l1, %l1 | ||
163 | sub %l0, %l2, %l2 | ||
164 | sub %l0, %l5, %l5 | ||
165 | |||
166 | /* prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu") */ | ||
167 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop" | ||
168 | mov 4, %l3 | ||
169 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4 | ||
170 | mov 1, %l3 | ||
171 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
172 | stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, chosen_node | ||
173 | stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "mmu" | ||
174 | stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_mmu_ihandle_cache | ||
175 | mov 4, %l3 | ||
176 | stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, sizeof(arg3) | ||
177 | stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1 | ||
178 | call %l7 | ||
179 | add %sp, (2047 + 128), %o0 ! argument array | ||
180 | |||
181 | mov (1b - prom_callmethod_name), %l1 | ||
182 | mov (1b - prom_translate_name), %l2 | ||
183 | sub %l0, %l1, %l1 | ||
184 | sub %l0, %l2, %l2 | ||
185 | lduw [%l5], %l5 ! prom_mmu_ihandle_cache | ||
186 | |||
187 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "call-method" | ||
188 | mov 3, %l3 | ||
189 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 3 | ||
190 | mov 5, %l3 | ||
191 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 5 | ||
192 | stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1: "translate" | ||
193 | stx %l5, [%sp + 2047 + 128 + 0x20] ! arg2: prom_mmu_ihandle_cache | ||
194 | srlx %l0, 22, %l3 | ||
195 | sllx %l3, 22, %l3 | ||
196 | stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: vaddr, our PC | ||
197 | stx %g0, [%sp + 2047 + 128 + 0x30] ! res1 | ||
198 | stx %g0, [%sp + 2047 + 128 + 0x38] ! res2 | ||
199 | stx %g0, [%sp + 2047 + 128 + 0x40] ! res3 | ||
200 | stx %g0, [%sp + 2047 + 128 + 0x48] ! res4 | ||
201 | stx %g0, [%sp + 2047 + 128 + 0x50] ! res5 | ||
202 | call %l7 | ||
203 | add %sp, (2047 + 128), %o0 ! argument array | ||
204 | |||
205 | ldx [%sp + 2047 + 128 + 0x40], %l1 ! translation mode | ||
206 | mov (1b - prom_boot_mapping_mode), %l4 | ||
207 | sub %l0, %l4, %l4 | ||
208 | stw %l1, [%l4] | ||
209 | mov (1b - prom_boot_mapping_phys_high), %l4 | ||
210 | sub %l0, %l4, %l4 | ||
211 | ldx [%sp + 2047 + 128 + 0x48], %l2 ! physaddr high | ||
212 | stx %l2, [%l4 + 0x0] | ||
213 | ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low | ||
214 | stx %l3, [%l4 + 0x8] | ||
215 | |||
216 | /* Leave service as-is, "call-method" */ | ||
217 | mov 7, %l3 | ||
218 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 7 | ||
219 | mov 1, %l3 | ||
220 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
221 | mov (1b - prom_map_name), %l3 | ||
222 | sub %l0, %l3, %l3 | ||
223 | stx %l3, [%sp + 2047 + 128 + 0x18] ! arg1: "map" | ||
224 | /* Leave arg2 as-is, prom_mmu_ihandle_cache */ | ||
225 | mov -1, %l3 | ||
226 | stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default) | ||
227 | sethi %hi(8 * 1024 * 1024), %l3 | ||
228 | stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: size (8MB) | ||
229 | sethi %hi(KERNBASE), %l3 | ||
230 | stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE) | ||
231 | stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty | ||
232 | mov (1b - prom_boot_mapping_phys_low), %l3 | ||
233 | sub %l0, %l3, %l3 | ||
234 | ldx [%l3], %l3 | ||
235 | stx %l3, [%sp + 2047 + 128 + 0x48] ! arg7: phys addr | ||
236 | call %l7 | ||
237 | add %sp, (2047 + 128), %o0 ! argument array | ||
238 | |||
239 | add %sp, (192 + 128), %sp | ||
240 | |||
241 | sparc64_boot_after_remap: | ||
92 | BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) | 242 | BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) |
93 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) | 243 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) |
94 | ba,pt %xcc, spitfire_boot | 244 | ba,pt %xcc, spitfire_boot |
@@ -125,185 +275,7 @@ cheetah_generic_boot: | |||
125 | stxa %g0, [%g3] ASI_IMMU | 275 | stxa %g0, [%g3] ASI_IMMU |
126 | membar #Sync | 276 | membar #Sync |
127 | 277 | ||
128 | wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate | 278 | ba,a,pt %xcc, jump_to_sun4u_init |
129 | wr %g0, 0, %fprs | ||
130 | |||
131 | /* Just like for Spitfire, we probe itlb-2 for a mapping which | ||
132 | * matches our current %pc. We take the physical address in | ||
133 | * that mapping and use it to make our own. | ||
134 | */ | ||
135 | |||
136 | /* %g5 holds the tlb data */ | ||
137 | sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5 | ||
138 | sllx %g5, 32, %g5 | ||
139 | or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5 | ||
140 | |||
141 | /* Put PADDR tlb data mask into %g3. */ | ||
142 | sethi %uhi(_PAGE_PADDR), %g3 | ||
143 | or %g3, %ulo(_PAGE_PADDR), %g3 | ||
144 | sllx %g3, 32, %g3 | ||
145 | sethi %hi(_PAGE_PADDR), %g7 | ||
146 | or %g7, %lo(_PAGE_PADDR), %g7 | ||
147 | or %g3, %g7, %g3 | ||
148 | |||
149 | set 2 << 16, %l0 /* TLB entry walker. */ | ||
150 | set 0x1fff, %l2 /* Page mask. */ | ||
151 | rd %pc, %l3 | ||
152 | andn %l3, %l2, %g2 /* vaddr comparator */ | ||
153 | |||
154 | 1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1 | ||
155 | membar #Sync | ||
156 | andn %g1, %l2, %g1 | ||
157 | cmp %g1, %g2 | ||
158 | be,pn %xcc, cheetah_got_tlbentry | ||
159 | nop | ||
160 | and %l0, (127 << 3), %g1 | ||
161 | cmp %g1, (127 << 3) | ||
162 | blu,pt %xcc, 1b | ||
163 | add %l0, (1 << 3), %l0 | ||
164 | |||
165 | /* Search the small TLB. OBP never maps us like that but | ||
166 | * newer SILO can. | ||
167 | */ | ||
168 | clr %l0 | ||
169 | |||
170 | 1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1 | ||
171 | membar #Sync | ||
172 | andn %g1, %l2, %g1 | ||
173 | cmp %g1, %g2 | ||
174 | be,pn %xcc, cheetah_got_tlbentry | ||
175 | nop | ||
176 | cmp %l0, (15 << 3) | ||
177 | blu,pt %xcc, 1b | ||
178 | add %l0, (1 << 3), %l0 | ||
179 | |||
180 | /* BUG() if we get here... */ | ||
181 | ta 0x5 | ||
182 | |||
183 | cheetah_got_tlbentry: | ||
184 | ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g0 | ||
185 | ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1 | ||
186 | membar #Sync | ||
187 | and %g1, %g3, %g1 | ||
188 | set 0x5fff, %l0 | ||
189 | andn %g1, %l0, %g1 | ||
190 | or %g5, %g1, %g5 | ||
191 | |||
192 | /* Clear out any KERNBASE area entries. */ | ||
193 | set 2 << 16, %l0 | ||
194 | sethi %hi(KERNBASE), %g3 | ||
195 | sethi %hi(KERNBASE<<1), %g7 | ||
196 | mov TLB_TAG_ACCESS, %l7 | ||
197 | |||
198 | /* First, check ITLB */ | ||
199 | 1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1 | ||
200 | membar #Sync | ||
201 | andn %g1, %l2, %g1 | ||
202 | cmp %g1, %g3 | ||
203 | blu,pn %xcc, 2f | ||
204 | cmp %g1, %g7 | ||
205 | bgeu,pn %xcc, 2f | ||
206 | nop | ||
207 | stxa %g0, [%l7] ASI_IMMU | ||
208 | membar #Sync | ||
209 | stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS | ||
210 | membar #Sync | ||
211 | |||
212 | 2: and %l0, (127 << 3), %g1 | ||
213 | cmp %g1, (127 << 3) | ||
214 | blu,pt %xcc, 1b | ||
215 | add %l0, (1 << 3), %l0 | ||
216 | |||
217 | /* Next, check DTLB */ | ||
218 | set 2 << 16, %l0 | ||
219 | 1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1 | ||
220 | membar #Sync | ||
221 | andn %g1, %l2, %g1 | ||
222 | cmp %g1, %g3 | ||
223 | blu,pn %xcc, 2f | ||
224 | cmp %g1, %g7 | ||
225 | bgeu,pn %xcc, 2f | ||
226 | nop | ||
227 | stxa %g0, [%l7] ASI_DMMU | ||
228 | membar #Sync | ||
229 | stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS | ||
230 | membar #Sync | ||
231 | |||
232 | 2: and %l0, (511 << 3), %g1 | ||
233 | cmp %g1, (511 << 3) | ||
234 | blu,pt %xcc, 1b | ||
235 | add %l0, (1 << 3), %l0 | ||
236 | |||
237 | /* On Cheetah+, have to check second DTLB. */ | ||
238 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,l0,2f) | ||
239 | ba,pt %xcc, 9f | ||
240 | nop | ||
241 | |||
242 | 2: set 3 << 16, %l0 | ||
243 | 1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1 | ||
244 | membar #Sync | ||
245 | andn %g1, %l2, %g1 | ||
246 | cmp %g1, %g3 | ||
247 | blu,pn %xcc, 2f | ||
248 | cmp %g1, %g7 | ||
249 | bgeu,pn %xcc, 2f | ||
250 | nop | ||
251 | stxa %g0, [%l7] ASI_DMMU | ||
252 | membar #Sync | ||
253 | stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS | ||
254 | membar #Sync | ||
255 | |||
256 | 2: and %l0, (511 << 3), %g1 | ||
257 | cmp %g1, (511 << 3) | ||
258 | blu,pt %xcc, 1b | ||
259 | add %l0, (1 << 3), %l0 | ||
260 | |||
261 | 9: | ||
262 | |||
263 | /* Now lock the TTE we created into ITLB-0 and DTLB-0, | ||
264 | * entry 15 (and maybe 14 too). | ||
265 | */ | ||
266 | sethi %hi(KERNBASE), %g3 | ||
267 | set (0 << 16) | (15 << 3), %g7 | ||
268 | stxa %g3, [%l7] ASI_DMMU | ||
269 | membar #Sync | ||
270 | stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS | ||
271 | membar #Sync | ||
272 | stxa %g3, [%l7] ASI_IMMU | ||
273 | membar #Sync | ||
274 | stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS | ||
275 | membar #Sync | ||
276 | flush %g3 | ||
277 | membar #Sync | ||
278 | sethi %hi(_end), %g3 /* Check for bigkernel case */ | ||
279 | or %g3, %lo(_end), %g3 | ||
280 | srl %g3, 23, %g3 /* Check if _end > 8M */ | ||
281 | brz,pt %g3, 1f | ||
282 | sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */ | ||
283 | sethi %hi(0x400000), %g3 | ||
284 | or %g3, %lo(0x400000), %g3 | ||
285 | add %g5, %g3, %g5 /* New tte data */ | ||
286 | andn %g5, (_PAGE_G), %g5 | ||
287 | sethi %hi(KERNBASE+0x400000), %g3 | ||
288 | or %g3, %lo(KERNBASE+0x400000), %g3 | ||
289 | set (0 << 16) | (14 << 3), %g7 | ||
290 | stxa %g3, [%l7] ASI_DMMU | ||
291 | membar #Sync | ||
292 | stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS | ||
293 | membar #Sync | ||
294 | stxa %g3, [%l7] ASI_IMMU | ||
295 | membar #Sync | ||
296 | stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS | ||
297 | membar #Sync | ||
298 | flush %g3 | ||
299 | membar #Sync | ||
300 | sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */ | ||
301 | ba,pt %xcc, 1f | ||
302 | nop | ||
303 | |||
304 | 1: set sun4u_init, %g2 | ||
305 | jmpl %g2 + %g0, %g0 | ||
306 | nop | ||
307 | 279 | ||
308 | spitfire_boot: | 280 | spitfire_boot: |
309 | /* Typically PROM has already enabled both MMU's and both on-chip | 281 | /* Typically PROM has already enabled both MMU's and both on-chip |
@@ -313,6 +285,7 @@ spitfire_boot: | |||
313 | stxa %g1, [%g0] ASI_LSU_CONTROL | 285 | stxa %g1, [%g0] ASI_LSU_CONTROL |
314 | membar #Sync | 286 | membar #Sync |
315 | 287 | ||
288 | jump_to_sun4u_init: | ||
316 | /* | 289 | /* |
317 | * Make sure we are in privileged mode, have address masking, | 290 | * Make sure we are in privileged mode, have address masking, |
318 | * using the ordinary globals and have enabled floating | 291 | * using the ordinary globals and have enabled floating |
@@ -324,151 +297,6 @@ spitfire_boot: | |||
324 | wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate | 297 | wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate |
325 | wr %g0, 0, %fprs | 298 | wr %g0, 0, %fprs |
326 | 299 | ||
327 | spitfire_create_mappings: | ||
328 | /* %g5 holds the tlb data */ | ||
329 | sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5 | ||
330 | sllx %g5, 32, %g5 | ||
331 | or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5 | ||
332 | |||
333 | /* Base of physical memory cannot reliably be assumed to be | ||
334 | * at 0x0! Figure out where it happens to be. -DaveM | ||
335 | */ | ||
336 | |||
337 | /* Put PADDR tlb data mask into %g3. */ | ||
338 | sethi %uhi(_PAGE_PADDR_SF), %g3 | ||
339 | or %g3, %ulo(_PAGE_PADDR_SF), %g3 | ||
340 | sllx %g3, 32, %g3 | ||
341 | sethi %hi(_PAGE_PADDR_SF), %g7 | ||
342 | or %g7, %lo(_PAGE_PADDR_SF), %g7 | ||
343 | or %g3, %g7, %g3 | ||
344 | |||
345 | /* Walk through entire ITLB, looking for entry which maps | ||
346 | * our %pc currently, stick PADDR from there into %g5 tlb data. | ||
347 | */ | ||
348 | clr %l0 /* TLB entry walker. */ | ||
349 | set 0x1fff, %l2 /* Page mask. */ | ||
350 | rd %pc, %l3 | ||
351 | andn %l3, %l2, %g2 /* vaddr comparator */ | ||
352 | 1: | ||
353 | /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */ | ||
354 | ldxa [%l0] ASI_ITLB_TAG_READ, %g1 | ||
355 | nop | ||
356 | nop | ||
357 | nop | ||
358 | andn %g1, %l2, %g1 /* Get vaddr */ | ||
359 | cmp %g1, %g2 | ||
360 | be,a,pn %xcc, spitfire_got_tlbentry | ||
361 | ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1 | ||
362 | cmp %l0, (63 << 3) | ||
363 | blu,pt %xcc, 1b | ||
364 | add %l0, (1 << 3), %l0 | ||
365 | |||
366 | /* BUG() if we get here... */ | ||
367 | ta 0x5 | ||
368 | |||
369 | spitfire_got_tlbentry: | ||
370 | /* Nops here again, perhaps Cheetah/Blackbird are better behaved... */ | ||
371 | nop | ||
372 | nop | ||
373 | nop | ||
374 | and %g1, %g3, %g1 /* Mask to just get paddr bits. */ | ||
375 | set 0x5fff, %l3 /* Mask offset to get phys base. */ | ||
376 | andn %g1, %l3, %g1 | ||
377 | |||
378 | /* NOTE: We hold on to %g1 paddr base as we need it below to lock | ||
379 | * NOTE: the PROM cif code into the TLB. | ||
380 | */ | ||
381 | |||
382 | or %g5, %g1, %g5 /* Or it into TAG being built. */ | ||
383 | |||
384 | clr %l0 /* TLB entry walker. */ | ||
385 | sethi %hi(KERNBASE), %g3 /* 4M lower limit */ | ||
386 | sethi %hi(KERNBASE<<1), %g7 /* 8M upper limit */ | ||
387 | mov TLB_TAG_ACCESS, %l7 | ||
388 | 1: | ||
389 | /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */ | ||
390 | ldxa [%l0] ASI_ITLB_TAG_READ, %g1 | ||
391 | nop | ||
392 | nop | ||
393 | nop | ||
394 | andn %g1, %l2, %g1 /* Get vaddr */ | ||
395 | cmp %g1, %g3 | ||
396 | blu,pn %xcc, 2f | ||
397 | cmp %g1, %g7 | ||
398 | bgeu,pn %xcc, 2f | ||
399 | nop | ||
400 | stxa %g0, [%l7] ASI_IMMU | ||
401 | stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS | ||
402 | membar #Sync | ||
403 | 2: | ||
404 | cmp %l0, (63 << 3) | ||
405 | blu,pt %xcc, 1b | ||
406 | add %l0, (1 << 3), %l0 | ||
407 | |||
408 | nop; nop; nop | ||
409 | |||
410 | clr %l0 /* TLB entry walker. */ | ||
411 | 1: | ||
412 | /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */ | ||
413 | ldxa [%l0] ASI_DTLB_TAG_READ, %g1 | ||
414 | nop | ||
415 | nop | ||
416 | nop | ||
417 | andn %g1, %l2, %g1 /* Get vaddr */ | ||
418 | cmp %g1, %g3 | ||
419 | blu,pn %xcc, 2f | ||
420 | cmp %g1, %g7 | ||
421 | bgeu,pn %xcc, 2f | ||
422 | nop | ||
423 | stxa %g0, [%l7] ASI_DMMU | ||
424 | stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS | ||
425 | membar #Sync | ||
426 | 2: | ||
427 | cmp %l0, (63 << 3) | ||
428 | blu,pt %xcc, 1b | ||
429 | add %l0, (1 << 3), %l0 | ||
430 | |||
431 | nop; nop; nop | ||
432 | |||
433 | |||
434 | /* PROM never puts any TLB entries into the MMU with the lock bit | ||
435 | * set. So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too. | ||
436 | */ | ||
437 | |||
438 | sethi %hi(KERNBASE), %g3 | ||
439 | mov (63 << 3), %g7 | ||
440 | stxa %g3, [%l7] ASI_DMMU /* KERNBASE into TLB TAG */ | ||
441 | stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */ | ||
442 | membar #Sync | ||
443 | stxa %g3, [%l7] ASI_IMMU /* KERNBASE into TLB TAG */ | ||
444 | stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */ | ||
445 | membar #Sync | ||
446 | flush %g3 | ||
447 | membar #Sync | ||
448 | sethi %hi(_end), %g3 /* Check for bigkernel case */ | ||
449 | or %g3, %lo(_end), %g3 | ||
450 | srl %g3, 23, %g3 /* Check if _end > 8M */ | ||
451 | brz,pt %g3, 2f | ||
452 | sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */ | ||
453 | sethi %hi(0x400000), %g3 | ||
454 | or %g3, %lo(0x400000), %g3 | ||
455 | add %g5, %g3, %g5 /* New tte data */ | ||
456 | andn %g5, (_PAGE_G), %g5 | ||
457 | sethi %hi(KERNBASE+0x400000), %g3 | ||
458 | or %g3, %lo(KERNBASE+0x400000), %g3 | ||
459 | mov (62 << 3), %g7 | ||
460 | stxa %g3, [%l7] ASI_DMMU | ||
461 | stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS | ||
462 | membar #Sync | ||
463 | stxa %g3, [%l7] ASI_IMMU | ||
464 | stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS | ||
465 | membar #Sync | ||
466 | flush %g3 | ||
467 | membar #Sync | ||
468 | sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */ | ||
469 | 2: ba,pt %xcc, 1f | ||
470 | nop | ||
471 | 1: | ||
472 | set sun4u_init, %g2 | 300 | set sun4u_init, %g2 |
473 | jmpl %g2 + %g0, %g0 | 301 | jmpl %g2 + %g0, %g0 |
474 | nop | 302 | nop |
@@ -483,38 +311,12 @@ sun4u_init: | |||
483 | stxa %g0, [%g7] ASI_DMMU | 311 | stxa %g0, [%g7] ASI_DMMU |
484 | membar #Sync | 312 | membar #Sync |
485 | 313 | ||
486 | /* We are now safely (we hope) in Nucleus context (0), rewrite | ||
487 | * the KERNBASE TTE's so they no longer have the global bit set. | ||
488 | * Don't forget to setup TAG_ACCESS first 8-) | ||
489 | */ | ||
490 | mov TLB_TAG_ACCESS, %g2 | ||
491 | stxa %g3, [%g2] ASI_IMMU | ||
492 | stxa %g3, [%g2] ASI_DMMU | ||
493 | membar #Sync | ||
494 | |||
495 | BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup) | 314 | BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup) |
496 | 315 | ||
497 | ba,pt %xcc, spitfire_tlb_fixup | 316 | ba,pt %xcc, spitfire_tlb_fixup |
498 | nop | 317 | nop |
499 | 318 | ||
500 | cheetah_tlb_fixup: | 319 | cheetah_tlb_fixup: |
501 | set (0 << 16) | (15 << 3), %g7 | ||
502 | ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g0 | ||
503 | ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1 | ||
504 | andn %g1, (_PAGE_G), %g1 | ||
505 | stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS | ||
506 | membar #Sync | ||
507 | |||
508 | ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g0 | ||
509 | ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1 | ||
510 | andn %g1, (_PAGE_G), %g1 | ||
511 | stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS | ||
512 | membar #Sync | ||
513 | |||
514 | /* Kill instruction prefetch queues. */ | ||
515 | flush %g3 | ||
516 | membar #Sync | ||
517 | |||
518 | mov 2, %g2 /* Set TLB type to cheetah+. */ | 320 | mov 2, %g2 /* Set TLB type to cheetah+. */ |
519 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) | 321 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) |
520 | 322 | ||
@@ -551,21 +353,6 @@ cheetah_tlb_fixup: | |||
551 | nop | 353 | nop |
552 | 354 | ||
553 | spitfire_tlb_fixup: | 355 | spitfire_tlb_fixup: |
554 | mov (63 << 3), %g7 | ||
555 | ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1 | ||
556 | andn %g1, (_PAGE_G), %g1 | ||
557 | stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS | ||
558 | membar #Sync | ||
559 | |||
560 | ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1 | ||
561 | andn %g1, (_PAGE_G), %g1 | ||
562 | stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS | ||
563 | membar #Sync | ||
564 | |||
565 | /* Kill instruction prefetch queues. */ | ||
566 | flush %g3 | ||
567 | membar #Sync | ||
568 | |||
569 | /* Set TLB type to spitfire. */ | 356 | /* Set TLB type to spitfire. */ |
570 | mov 0, %g2 | 357 | mov 0, %g2 |
571 | sethi %hi(tlb_type), %g1 | 358 | sethi %hi(tlb_type), %g1 |
@@ -578,24 +365,6 @@ tlb_fixup_done: | |||
578 | mov %sp, %l6 | 365 | mov %sp, %l6 |
579 | mov %o4, %l7 | 366 | mov %o4, %l7 |
580 | 367 | ||
581 | #if 0 /* We don't do it like this anymore, but for historical hack value | ||
582 | * I leave this snippet here to show how crazy we can be sometimes. 8-) | ||
583 | */ | ||
584 | |||
585 | /* Setup "Linux Current Register", thanks Sun 8-) */ | ||
586 | wr %g0, 0x1, %pcr | ||
587 | |||
588 | /* Blackbird errata workaround. See commentary in | ||
589 | * smp.c:smp_percpu_timer_interrupt() for more | ||
590 | * information. | ||
591 | */ | ||
592 | ba,pt %xcc, 99f | ||
593 | nop | ||
594 | .align 64 | ||
595 | 99: wr %g6, %g0, %pic | ||
596 | rd %pic, %g0 | ||
597 | #endif | ||
598 | |||
599 | wr %g0, ASI_P, %asi | 368 | wr %g0, ASI_P, %asi |
600 | mov 1, %g1 | 369 | mov 1, %g1 |
601 | sllx %g1, THREAD_SHIFT, %g1 | 370 | sllx %g1, THREAD_SHIFT, %g1 |
@@ -756,12 +525,7 @@ bootup_user_stack_end: | |||
756 | 525 | ||
757 | #include "ttable.S" | 526 | #include "ttable.S" |
758 | #include "systbls.S" | 527 | #include "systbls.S" |
759 | 528 | #include "ktlb.S" | |
760 | .align 1024 | ||
761 | .globl swapper_pg_dir | ||
762 | swapper_pg_dir: | ||
763 | .word 0 | ||
764 | |||
765 | #include "etrap.S" | 529 | #include "etrap.S" |
766 | #include "rtrap.S" | 530 | #include "rtrap.S" |
767 | #include "winfixup.S" | 531 | #include "winfixup.S" |
@@ -776,8 +540,11 @@ swapper_pg_dir: | |||
776 | prom_tba: .xword 0 | 540 | prom_tba: .xword 0 |
777 | tlb_type: .word 0 /* Must NOT end up in BSS */ | 541 | tlb_type: .word 0 /* Must NOT end up in BSS */ |
778 | .section ".fixup",#alloc,#execinstr | 542 | .section ".fixup",#alloc,#execinstr |
779 | .globl __ret_efault | 543 | |
544 | .globl __ret_efault, __retl_efault | ||
780 | __ret_efault: | 545 | __ret_efault: |
781 | ret | 546 | ret |
782 | restore %g0, -EFAULT, %o0 | 547 | restore %g0, -EFAULT, %o0 |
783 | 548 | __retl_efault: | |
549 | retl | ||
550 | mov -EFAULT, %o0 | ||
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S new file mode 100644 index 000000000000..7796b37f478c --- /dev/null +++ b/arch/sparc64/kernel/ktlb.S | |||
@@ -0,0 +1,198 @@ | |||
1 | /* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling. | ||
2 | * | ||
3 | * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net> | ||
4 | * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) | ||
5 | * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) | ||
6 | * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <asm/head.h> | ||
11 | #include <asm/asi.h> | ||
12 | #include <asm/page.h> | ||
13 | #include <asm/pgtable.h> | ||
14 | |||
15 | .text | ||
16 | .align 32 | ||
17 | |||
18 | /* | ||
19 | * On a second level vpte miss, check whether the original fault is to the OBP | ||
20 | * range (note that this is only possible for instruction miss, data misses to | ||
21 | * obp range do not use vpte). If so, go back directly to the faulting address. | ||
22 | * This is because we want to read the tpc, otherwise we have no way of knowing | ||
23 | * the 8k aligned faulting address if we are using >8k kernel pagesize. This | ||
24 | * also ensures no vpte range addresses are dropped into tlb while obp is | ||
25 | * executing (see inherit_locked_prom_mappings() rant). | ||
26 | */ | ||
27 | sparc64_vpte_nucleus: | ||
28 | /* Note that kvmap below has verified that the address is | ||
29 | * in the range MODULES_VADDR --> VMALLOC_END already. So | ||
30 | * here we need only check if it is an OBP address or not. | ||
31 | */ | ||
32 | sethi %hi(LOW_OBP_ADDRESS), %g5 | ||
33 | cmp %g4, %g5 | ||
34 | blu,pn %xcc, kern_vpte | ||
35 | mov 0x1, %g5 | ||
36 | sllx %g5, 32, %g5 | ||
37 | cmp %g4, %g5 | ||
38 | blu,pn %xcc, vpte_insn_obp | ||
39 | nop | ||
40 | |||
41 | /* These two instructions are patched by paginig_init(). */ | ||
42 | kern_vpte: | ||
43 | sethi %hi(swapper_pgd_zero), %g5 | ||
44 | lduw [%g5 + %lo(swapper_pgd_zero)], %g5 | ||
45 | |||
46 | /* With kernel PGD in %g5, branch back into dtlb_backend. */ | ||
47 | ba,pt %xcc, sparc64_kpte_continue | ||
48 | andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */ | ||
49 | |||
50 | vpte_noent: | ||
51 | /* Restore previous TAG_ACCESS, %g5 is zero, and we will | ||
52 | * skip over the trap instruction so that the top level | ||
53 | * TLB miss handler will thing this %g5 value is just an | ||
54 | * invalid PTE, thus branching to full fault processing. | ||
55 | */ | ||
56 | mov TLB_SFSR, %g1 | ||
57 | stxa %g4, [%g1 + %g1] ASI_DMMU | ||
58 | done | ||
59 | |||
60 | vpte_insn_obp: | ||
61 | sethi %hi(prom_pmd_phys), %g5 | ||
62 | ldx [%g5 + %lo(prom_pmd_phys)], %g5 | ||
63 | |||
64 | /* Behave as if we are at TL0. */ | ||
65 | wrpr %g0, 1, %tl | ||
66 | rdpr %tpc, %g4 /* Find original faulting iaddr */ | ||
67 | srlx %g4, 13, %g4 /* Throw out context bits */ | ||
68 | sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */ | ||
69 | |||
70 | /* Restore previous TAG_ACCESS. */ | ||
71 | mov TLB_SFSR, %g1 | ||
72 | stxa %g4, [%g1 + %g1] ASI_IMMU | ||
73 | |||
74 | /* Get PMD offset. */ | ||
75 | srlx %g4, 23, %g6 | ||
76 | and %g6, 0x7ff, %g6 | ||
77 | sllx %g6, 2, %g6 | ||
78 | |||
79 | /* Load PMD, is it valid? */ | ||
80 | lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
81 | brz,pn %g5, longpath | ||
82 | sllx %g5, 11, %g5 | ||
83 | |||
84 | /* Get PTE offset. */ | ||
85 | srlx %g4, 13, %g6 | ||
86 | and %g6, 0x3ff, %g6 | ||
87 | sllx %g6, 3, %g6 | ||
88 | |||
89 | /* Load PTE. */ | ||
90 | ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
91 | brgez,pn %g5, longpath | ||
92 | nop | ||
93 | |||
94 | /* TLB load and return from trap. */ | ||
95 | stxa %g5, [%g0] ASI_ITLB_DATA_IN | ||
96 | retry | ||
97 | |||
98 | kvmap_do_obp: | ||
99 | sethi %hi(prom_pmd_phys), %g5 | ||
100 | ldx [%g5 + %lo(prom_pmd_phys)], %g5 | ||
101 | |||
102 | /* Get PMD offset. */ | ||
103 | srlx %g4, 23, %g6 | ||
104 | and %g6, 0x7ff, %g6 | ||
105 | sllx %g6, 2, %g6 | ||
106 | |||
107 | /* Load PMD, is it valid? */ | ||
108 | lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
109 | brz,pn %g5, longpath | ||
110 | sllx %g5, 11, %g5 | ||
111 | |||
112 | /* Get PTE offset. */ | ||
113 | srlx %g4, 13, %g6 | ||
114 | and %g6, 0x3ff, %g6 | ||
115 | sllx %g6, 3, %g6 | ||
116 | |||
117 | /* Load PTE. */ | ||
118 | ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
119 | brgez,pn %g5, longpath | ||
120 | nop | ||
121 | |||
122 | /* TLB load and return from trap. */ | ||
123 | stxa %g5, [%g0] ASI_DTLB_DATA_IN | ||
124 | retry | ||
125 | |||
126 | /* | ||
127 | * On a first level data miss, check whether this is to the OBP range (note | ||
128 | * that such accesses can be made by prom, as well as by kernel using | ||
129 | * prom_getproperty on "address"), and if so, do not use vpte access ... | ||
130 | * rather, use information saved during inherit_prom_mappings() using 8k | ||
131 | * pagesize. | ||
132 | */ | ||
133 | .align 32 | ||
134 | kvmap: | ||
135 | brgez,pn %g4, kvmap_nonlinear | ||
136 | nop | ||
137 | |||
138 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
139 | .globl kvmap_linear_patch | ||
140 | kvmap_linear_patch: | ||
141 | #endif | ||
142 | ba,pt %xcc, kvmap_load | ||
143 | xor %g2, %g4, %g5 | ||
144 | |||
145 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
146 | sethi %hi(swapper_pg_dir), %g5 | ||
147 | or %g5, %lo(swapper_pg_dir), %g5 | ||
148 | sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6 | ||
149 | srlx %g6, 64 - PAGE_SHIFT, %g6 | ||
150 | andn %g6, 0x3, %g6 | ||
151 | lduw [%g5 + %g6], %g5 | ||
152 | brz,pn %g5, longpath | ||
153 | sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6 | ||
154 | srlx %g6, 64 - PAGE_SHIFT, %g6 | ||
155 | sllx %g5, 11, %g5 | ||
156 | andn %g6, 0x3, %g6 | ||
157 | lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
158 | brz,pn %g5, longpath | ||
159 | sllx %g4, 64 - PMD_SHIFT, %g6 | ||
160 | srlx %g6, 64 - PAGE_SHIFT, %g6 | ||
161 | sllx %g5, 11, %g5 | ||
162 | andn %g6, 0x7, %g6 | ||
163 | ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
164 | brz,pn %g5, longpath | ||
165 | nop | ||
166 | ba,a,pt %xcc, kvmap_load | ||
167 | #endif | ||
168 | |||
169 | kvmap_nonlinear: | ||
170 | sethi %hi(MODULES_VADDR), %g5 | ||
171 | cmp %g4, %g5 | ||
172 | blu,pn %xcc, longpath | ||
173 | mov (VMALLOC_END >> 24), %g5 | ||
174 | sllx %g5, 24, %g5 | ||
175 | cmp %g4, %g5 | ||
176 | bgeu,pn %xcc, longpath | ||
177 | nop | ||
178 | |||
179 | kvmap_check_obp: | ||
180 | sethi %hi(LOW_OBP_ADDRESS), %g5 | ||
181 | cmp %g4, %g5 | ||
182 | blu,pn %xcc, kvmap_vmalloc_addr | ||
183 | mov 0x1, %g5 | ||
184 | sllx %g5, 32, %g5 | ||
185 | cmp %g4, %g5 | ||
186 | blu,pn %xcc, kvmap_do_obp | ||
187 | nop | ||
188 | |||
189 | kvmap_vmalloc_addr: | ||
190 | /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */ | ||
191 | ldxa [%g3 + %g6] ASI_N, %g5 | ||
192 | brgez,pn %g5, longpath | ||
193 | nop | ||
194 | |||
195 | kvmap_load: | ||
196 | /* PTE is valid, load into TLB and return from trap. */ | ||
197 | stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB | ||
198 | retry | ||
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index 331382e1a75d..cae5b61fe2f0 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c | |||
@@ -330,7 +330,7 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
330 | static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2) | 330 | static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2) |
331 | { | 331 | { |
332 | unsigned long sync_reg = (unsigned long) _arg2; | 332 | unsigned long sync_reg = (unsigned long) _arg2; |
333 | u64 mask = 1 << (__irq_ino(__irq(bucket)) & IMAP_INO); | 333 | u64 mask = 1UL << (__irq_ino(__irq(bucket)) & IMAP_INO); |
334 | u64 val; | 334 | u64 val; |
335 | int limit; | 335 | int limit; |
336 | 336 | ||
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c index 5efbff90d668..774ecbb8a031 100644 --- a/arch/sparc64/kernel/ptrace.c +++ b/arch/sparc64/kernel/ptrace.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/visasm.h> | 31 | #include <asm/visasm.h> |
32 | #include <asm/spitfire.h> | 32 | #include <asm/spitfire.h> |
33 | #include <asm/page.h> | 33 | #include <asm/page.h> |
34 | #include <asm/cpudata.h> | ||
34 | 35 | ||
35 | /* Returning from ptrace is a bit tricky because the syscall return | 36 | /* Returning from ptrace is a bit tricky because the syscall return |
36 | * low level code assumes any value returned which is negative and | 37 | * low level code assumes any value returned which is negative and |
@@ -132,12 +133,16 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
132 | if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) { | 133 | if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) { |
133 | unsigned long start = __pa(kaddr); | 134 | unsigned long start = __pa(kaddr); |
134 | unsigned long end = start + len; | 135 | unsigned long end = start + len; |
136 | unsigned long dcache_line_size; | ||
137 | |||
138 | dcache_line_size = local_cpu_data().dcache_line_size; | ||
135 | 139 | ||
136 | if (tlb_type == spitfire) { | 140 | if (tlb_type == spitfire) { |
137 | for (; start < end; start += 32) | 141 | for (; start < end; start += dcache_line_size) |
138 | spitfire_put_dcache_tag(start & 0x3fe0, 0x0); | 142 | spitfire_put_dcache_tag(start & 0x3fe0, 0x0); |
139 | } else { | 143 | } else { |
140 | for (; start < end; start += 32) | 144 | start &= ~(dcache_line_size - 1); |
145 | for (; start < end; start += dcache_line_size) | ||
141 | __asm__ __volatile__( | 146 | __asm__ __volatile__( |
142 | "stxa %%g0, [%0] %1\n\t" | 147 | "stxa %%g0, [%0] %1\n\t" |
143 | "membar #Sync" | 148 | "membar #Sync" |
@@ -150,8 +155,11 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
150 | if (write && tlb_type == spitfire) { | 155 | if (write && tlb_type == spitfire) { |
151 | unsigned long start = (unsigned long) kaddr; | 156 | unsigned long start = (unsigned long) kaddr; |
152 | unsigned long end = start + len; | 157 | unsigned long end = start + len; |
158 | unsigned long icache_line_size; | ||
159 | |||
160 | icache_line_size = local_cpu_data().icache_line_size; | ||
153 | 161 | ||
154 | for (; start < end; start += 32) | 162 | for (; start < end; start += icache_line_size) |
155 | flushi(start); | 163 | flushi(start); |
156 | } | 164 | } |
157 | } | 165 | } |
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index ddbed3341a23..4c9c8f241748 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c | |||
@@ -464,8 +464,6 @@ static void __init boot_flags_init(char *commands) | |||
464 | } | 464 | } |
465 | } | 465 | } |
466 | 466 | ||
467 | extern int prom_probe_memory(void); | ||
468 | extern unsigned long start, end; | ||
469 | extern void panic_setup(char *, int *); | 467 | extern void panic_setup(char *, int *); |
470 | 468 | ||
471 | extern unsigned short root_flags; | 469 | extern unsigned short root_flags; |
@@ -492,13 +490,8 @@ void register_prom_callbacks(void) | |||
492 | "' linux-.soft2 to .soft2"); | 490 | "' linux-.soft2 to .soft2"); |
493 | } | 491 | } |
494 | 492 | ||
495 | extern void paging_init(void); | ||
496 | |||
497 | void __init setup_arch(char **cmdline_p) | 493 | void __init setup_arch(char **cmdline_p) |
498 | { | 494 | { |
499 | unsigned long highest_paddr; | ||
500 | int i; | ||
501 | |||
502 | /* Initialize PROM console and command line. */ | 495 | /* Initialize PROM console and command line. */ |
503 | *cmdline_p = prom_getbootargs(); | 496 | *cmdline_p = prom_getbootargs(); |
504 | strcpy(saved_command_line, *cmdline_p); | 497 | strcpy(saved_command_line, *cmdline_p); |
@@ -517,40 +510,6 @@ void __init setup_arch(char **cmdline_p) | |||
517 | boot_flags_init(*cmdline_p); | 510 | boot_flags_init(*cmdline_p); |
518 | 511 | ||
519 | idprom_init(); | 512 | idprom_init(); |
520 | (void) prom_probe_memory(); | ||
521 | |||
522 | /* In paging_init() we tip off this value to see if we need | ||
523 | * to change init_mm.pgd to point to the real alias mapping. | ||
524 | */ | ||
525 | phys_base = 0xffffffffffffffffUL; | ||
526 | highest_paddr = 0UL; | ||
527 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | ||
528 | unsigned long top; | ||
529 | |||
530 | if (sp_banks[i].base_addr < phys_base) | ||
531 | phys_base = sp_banks[i].base_addr; | ||
532 | top = sp_banks[i].base_addr + | ||
533 | sp_banks[i].num_bytes; | ||
534 | if (highest_paddr < top) | ||
535 | highest_paddr = top; | ||
536 | } | ||
537 | pfn_base = phys_base >> PAGE_SHIFT; | ||
538 | |||
539 | switch (tlb_type) { | ||
540 | default: | ||
541 | case spitfire: | ||
542 | kern_base = spitfire_get_itlb_data(sparc64_highest_locked_tlbent()); | ||
543 | kern_base &= _PAGE_PADDR_SF; | ||
544 | break; | ||
545 | |||
546 | case cheetah: | ||
547 | case cheetah_plus: | ||
548 | kern_base = cheetah_get_litlb_data(sparc64_highest_locked_tlbent()); | ||
549 | kern_base &= _PAGE_PADDR; | ||
550 | break; | ||
551 | }; | ||
552 | |||
553 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; | ||
554 | 513 | ||
555 | if (!root_flags) | 514 | if (!root_flags) |
556 | root_mountflags &= ~MS_RDONLY; | 515 | root_mountflags &= ~MS_RDONLY; |
@@ -625,6 +584,9 @@ extern void smp_info(struct seq_file *); | |||
625 | extern void smp_bogo(struct seq_file *); | 584 | extern void smp_bogo(struct seq_file *); |
626 | extern void mmu_info(struct seq_file *); | 585 | extern void mmu_info(struct seq_file *); |
627 | 586 | ||
587 | unsigned int dcache_parity_tl1_occurred; | ||
588 | unsigned int icache_parity_tl1_occurred; | ||
589 | |||
628 | static int show_cpuinfo(struct seq_file *m, void *__unused) | 590 | static int show_cpuinfo(struct seq_file *m, void *__unused) |
629 | { | 591 | { |
630 | seq_printf(m, | 592 | seq_printf(m, |
@@ -635,6 +597,8 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) | |||
635 | "type\t\t: sun4u\n" | 597 | "type\t\t: sun4u\n" |
636 | "ncpus probed\t: %ld\n" | 598 | "ncpus probed\t: %ld\n" |
637 | "ncpus active\t: %ld\n" | 599 | "ncpus active\t: %ld\n" |
600 | "D$ parity tl1\t: %u\n" | ||
601 | "I$ parity tl1\t: %u\n" | ||
638 | #ifndef CONFIG_SMP | 602 | #ifndef CONFIG_SMP |
639 | "Cpu0Bogo\t: %lu.%02lu\n" | 603 | "Cpu0Bogo\t: %lu.%02lu\n" |
640 | "Cpu0ClkTck\t: %016lx\n" | 604 | "Cpu0ClkTck\t: %016lx\n" |
@@ -647,7 +611,9 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) | |||
647 | (prom_prev >> 8) & 0xff, | 611 | (prom_prev >> 8) & 0xff, |
648 | prom_prev & 0xff, | 612 | prom_prev & 0xff, |
649 | (long)num_possible_cpus(), | 613 | (long)num_possible_cpus(), |
650 | (long)num_online_cpus() | 614 | (long)num_online_cpus(), |
615 | dcache_parity_tl1_occurred, | ||
616 | icache_parity_tl1_occurred | ||
651 | #ifndef CONFIG_SMP | 617 | #ifndef CONFIG_SMP |
652 | , cpu_data(0).udelay_val/(500000/HZ), | 618 | , cpu_data(0).udelay_val/(500000/HZ), |
653 | (cpu_data(0).udelay_val/(5000/HZ)) % 100, | 619 | (cpu_data(0).udelay_val/(5000/HZ)) % 100, |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index b4fc6a5462b2..590df5a16f5a 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -93,6 +93,27 @@ void __init smp_store_cpu_info(int id) | |||
93 | cpu_data(id).pte_cache[1] = NULL; | 93 | cpu_data(id).pte_cache[1] = NULL; |
94 | cpu_data(id).pgd_cache = NULL; | 94 | cpu_data(id).pgd_cache = NULL; |
95 | cpu_data(id).idle_volume = 1; | 95 | cpu_data(id).idle_volume = 1; |
96 | |||
97 | cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size", | ||
98 | 16 * 1024); | ||
99 | cpu_data(id).dcache_line_size = | ||
100 | prom_getintdefault(cpu_node, "dcache-line-size", 32); | ||
101 | cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size", | ||
102 | 16 * 1024); | ||
103 | cpu_data(id).icache_line_size = | ||
104 | prom_getintdefault(cpu_node, "icache-line-size", 32); | ||
105 | cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size", | ||
106 | 4 * 1024 * 1024); | ||
107 | cpu_data(id).ecache_line_size = | ||
108 | prom_getintdefault(cpu_node, "ecache-line-size", 64); | ||
109 | printk("CPU[%d]: Caches " | ||
110 | "D[sz(%d):line_sz(%d)] " | ||
111 | "I[sz(%d):line_sz(%d)] " | ||
112 | "E[sz(%d):line_sz(%d)]\n", | ||
113 | id, | ||
114 | cpu_data(id).dcache_size, cpu_data(id).dcache_line_size, | ||
115 | cpu_data(id).icache_size, cpu_data(id).icache_line_size, | ||
116 | cpu_data(id).ecache_size, cpu_data(id).ecache_line_size); | ||
96 | } | 117 | } |
97 | 118 | ||
98 | static void smp_setup_percpu_timer(void); | 119 | static void smp_setup_percpu_timer(void); |
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S index 5f9e4fae612e..9cd272ac3ac1 100644 --- a/arch/sparc64/kernel/sys32.S +++ b/arch/sparc64/kernel/sys32.S | |||
@@ -157,173 +157,199 @@ sys32_socketcall: /* %o0=call, %o1=args */ | |||
157 | or %g2, %lo(__socketcall_table_begin), %g2 | 157 | or %g2, %lo(__socketcall_table_begin), %g2 |
158 | jmpl %g2 + %o0, %g0 | 158 | jmpl %g2 + %o0, %g0 |
159 | nop | 159 | nop |
160 | do_einval: | ||
161 | retl | ||
162 | mov -EINVAL, %o0 | ||
160 | 163 | ||
161 | /* Each entry is exactly 32 bytes. */ | ||
162 | .align 32 | 164 | .align 32 |
163 | __socketcall_table_begin: | 165 | __socketcall_table_begin: |
166 | |||
167 | /* Each entry is exactly 32 bytes. */ | ||
164 | do_sys_socket: /* sys_socket(int, int, int) */ | 168 | do_sys_socket: /* sys_socket(int, int, int) */ |
165 | ldswa [%o1 + 0x0] %asi, %o0 | 169 | 1: ldswa [%o1 + 0x0] %asi, %o0 |
166 | sethi %hi(sys_socket), %g1 | 170 | sethi %hi(sys_socket), %g1 |
167 | ldswa [%o1 + 0x8] %asi, %o2 | 171 | 2: ldswa [%o1 + 0x8] %asi, %o2 |
168 | jmpl %g1 + %lo(sys_socket), %g0 | 172 | jmpl %g1 + %lo(sys_socket), %g0 |
169 | ldswa [%o1 + 0x4] %asi, %o1 | 173 | 3: ldswa [%o1 + 0x4] %asi, %o1 |
170 | nop | 174 | nop |
171 | nop | 175 | nop |
172 | nop | 176 | nop |
173 | do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */ | 177 | do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */ |
174 | ldswa [%o1 + 0x0] %asi, %o0 | 178 | 4: ldswa [%o1 + 0x0] %asi, %o0 |
175 | sethi %hi(sys_bind), %g1 | 179 | sethi %hi(sys_bind), %g1 |
176 | ldswa [%o1 + 0x8] %asi, %o2 | 180 | 5: ldswa [%o1 + 0x8] %asi, %o2 |
177 | jmpl %g1 + %lo(sys_bind), %g0 | 181 | jmpl %g1 + %lo(sys_bind), %g0 |
178 | lduwa [%o1 + 0x4] %asi, %o1 | 182 | 6: lduwa [%o1 + 0x4] %asi, %o1 |
179 | nop | 183 | nop |
180 | nop | 184 | nop |
181 | nop | 185 | nop |
182 | do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */ | 186 | do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */ |
183 | ldswa [%o1 + 0x0] %asi, %o0 | 187 | 7: ldswa [%o1 + 0x0] %asi, %o0 |
184 | sethi %hi(sys_connect), %g1 | 188 | sethi %hi(sys_connect), %g1 |
185 | ldswa [%o1 + 0x8] %asi, %o2 | 189 | 8: ldswa [%o1 + 0x8] %asi, %o2 |
186 | jmpl %g1 + %lo(sys_connect), %g0 | 190 | jmpl %g1 + %lo(sys_connect), %g0 |
187 | lduwa [%o1 + 0x4] %asi, %o1 | 191 | 9: lduwa [%o1 + 0x4] %asi, %o1 |
188 | nop | 192 | nop |
189 | nop | 193 | nop |
190 | nop | 194 | nop |
191 | do_sys_listen: /* sys_listen(int, int) */ | 195 | do_sys_listen: /* sys_listen(int, int) */ |
192 | ldswa [%o1 + 0x0] %asi, %o0 | 196 | 10: ldswa [%o1 + 0x0] %asi, %o0 |
193 | sethi %hi(sys_listen), %g1 | 197 | sethi %hi(sys_listen), %g1 |
194 | jmpl %g1 + %lo(sys_listen), %g0 | 198 | jmpl %g1 + %lo(sys_listen), %g0 |
195 | ldswa [%o1 + 0x4] %asi, %o1 | 199 | 11: ldswa [%o1 + 0x4] %asi, %o1 |
196 | nop | 200 | nop |
197 | nop | 201 | nop |
198 | nop | 202 | nop |
199 | nop | 203 | nop |
200 | do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */ | 204 | do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */ |
201 | ldswa [%o1 + 0x0] %asi, %o0 | 205 | 12: ldswa [%o1 + 0x0] %asi, %o0 |
202 | sethi %hi(sys_accept), %g1 | 206 | sethi %hi(sys_accept), %g1 |
203 | lduwa [%o1 + 0x8] %asi, %o2 | 207 | 13: lduwa [%o1 + 0x8] %asi, %o2 |
204 | jmpl %g1 + %lo(sys_accept), %g0 | 208 | jmpl %g1 + %lo(sys_accept), %g0 |
205 | lduwa [%o1 + 0x4] %asi, %o1 | 209 | 14: lduwa [%o1 + 0x4] %asi, %o1 |
206 | nop | 210 | nop |
207 | nop | 211 | nop |
208 | nop | 212 | nop |
209 | do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */ | 213 | do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */ |
210 | ldswa [%o1 + 0x0] %asi, %o0 | 214 | 15: ldswa [%o1 + 0x0] %asi, %o0 |
211 | sethi %hi(sys_getsockname), %g1 | 215 | sethi %hi(sys_getsockname), %g1 |
212 | lduwa [%o1 + 0x8] %asi, %o2 | 216 | 16: lduwa [%o1 + 0x8] %asi, %o2 |
213 | jmpl %g1 + %lo(sys_getsockname), %g0 | 217 | jmpl %g1 + %lo(sys_getsockname), %g0 |
214 | lduwa [%o1 + 0x4] %asi, %o1 | 218 | 17: lduwa [%o1 + 0x4] %asi, %o1 |
215 | nop | 219 | nop |
216 | nop | 220 | nop |
217 | nop | 221 | nop |
218 | do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */ | 222 | do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */ |
219 | ldswa [%o1 + 0x0] %asi, %o0 | 223 | 18: ldswa [%o1 + 0x0] %asi, %o0 |
220 | sethi %hi(sys_getpeername), %g1 | 224 | sethi %hi(sys_getpeername), %g1 |
221 | lduwa [%o1 + 0x8] %asi, %o2 | 225 | 19: lduwa [%o1 + 0x8] %asi, %o2 |
222 | jmpl %g1 + %lo(sys_getpeername), %g0 | 226 | jmpl %g1 + %lo(sys_getpeername), %g0 |
223 | lduwa [%o1 + 0x4] %asi, %o1 | 227 | 20: lduwa [%o1 + 0x4] %asi, %o1 |
224 | nop | 228 | nop |
225 | nop | 229 | nop |
226 | nop | 230 | nop |
227 | do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */ | 231 | do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */ |
228 | ldswa [%o1 + 0x0] %asi, %o0 | 232 | 21: ldswa [%o1 + 0x0] %asi, %o0 |
229 | sethi %hi(sys_socketpair), %g1 | 233 | sethi %hi(sys_socketpair), %g1 |
230 | ldswa [%o1 + 0x8] %asi, %o2 | 234 | 22: ldswa [%o1 + 0x8] %asi, %o2 |
231 | lduwa [%o1 + 0xc] %asi, %o3 | 235 | 23: lduwa [%o1 + 0xc] %asi, %o3 |
232 | jmpl %g1 + %lo(sys_socketpair), %g0 | 236 | jmpl %g1 + %lo(sys_socketpair), %g0 |
233 | ldswa [%o1 + 0x4] %asi, %o1 | 237 | 24: ldswa [%o1 + 0x4] %asi, %o1 |
234 | nop | 238 | nop |
235 | nop | 239 | nop |
236 | do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */ | 240 | do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */ |
237 | ldswa [%o1 + 0x0] %asi, %o0 | 241 | 25: ldswa [%o1 + 0x0] %asi, %o0 |
238 | sethi %hi(sys_send), %g1 | 242 | sethi %hi(sys_send), %g1 |
239 | lduwa [%o1 + 0x8] %asi, %o2 | 243 | 26: lduwa [%o1 + 0x8] %asi, %o2 |
240 | lduwa [%o1 + 0xc] %asi, %o3 | 244 | 27: lduwa [%o1 + 0xc] %asi, %o3 |
241 | jmpl %g1 + %lo(sys_send), %g0 | 245 | jmpl %g1 + %lo(sys_send), %g0 |
242 | lduwa [%o1 + 0x4] %asi, %o1 | 246 | 28: lduwa [%o1 + 0x4] %asi, %o1 |
243 | nop | 247 | nop |
244 | nop | 248 | nop |
245 | do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */ | 249 | do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */ |
246 | ldswa [%o1 + 0x0] %asi, %o0 | 250 | 29: ldswa [%o1 + 0x0] %asi, %o0 |
247 | sethi %hi(sys_recv), %g1 | 251 | sethi %hi(sys_recv), %g1 |
248 | lduwa [%o1 + 0x8] %asi, %o2 | 252 | 30: lduwa [%o1 + 0x8] %asi, %o2 |
249 | lduwa [%o1 + 0xc] %asi, %o3 | 253 | 31: lduwa [%o1 + 0xc] %asi, %o3 |
250 | jmpl %g1 + %lo(sys_recv), %g0 | 254 | jmpl %g1 + %lo(sys_recv), %g0 |
251 | lduwa [%o1 + 0x4] %asi, %o1 | 255 | 32: lduwa [%o1 + 0x4] %asi, %o1 |
252 | nop | 256 | nop |
253 | nop | 257 | nop |
254 | do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */ | 258 | do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */ |
255 | ldswa [%o1 + 0x0] %asi, %o0 | 259 | 33: ldswa [%o1 + 0x0] %asi, %o0 |
256 | sethi %hi(sys_sendto), %g1 | 260 | sethi %hi(sys_sendto), %g1 |
257 | lduwa [%o1 + 0x8] %asi, %o2 | 261 | 34: lduwa [%o1 + 0x8] %asi, %o2 |
258 | lduwa [%o1 + 0xc] %asi, %o3 | 262 | 35: lduwa [%o1 + 0xc] %asi, %o3 |
259 | lduwa [%o1 + 0x10] %asi, %o4 | 263 | 36: lduwa [%o1 + 0x10] %asi, %o4 |
260 | ldswa [%o1 + 0x14] %asi, %o5 | 264 | 37: ldswa [%o1 + 0x14] %asi, %o5 |
261 | jmpl %g1 + %lo(sys_sendto), %g0 | 265 | jmpl %g1 + %lo(sys_sendto), %g0 |
262 | lduwa [%o1 + 0x4] %asi, %o1 | 266 | 38: lduwa [%o1 + 0x4] %asi, %o1 |
263 | do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */ | 267 | do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */ |
264 | ldswa [%o1 + 0x0] %asi, %o0 | 268 | 39: ldswa [%o1 + 0x0] %asi, %o0 |
265 | sethi %hi(sys_recvfrom), %g1 | 269 | sethi %hi(sys_recvfrom), %g1 |
266 | lduwa [%o1 + 0x8] %asi, %o2 | 270 | 40: lduwa [%o1 + 0x8] %asi, %o2 |
267 | lduwa [%o1 + 0xc] %asi, %o3 | 271 | 41: lduwa [%o1 + 0xc] %asi, %o3 |
268 | lduwa [%o1 + 0x10] %asi, %o4 | 272 | 42: lduwa [%o1 + 0x10] %asi, %o4 |
269 | lduwa [%o1 + 0x14] %asi, %o5 | 273 | 43: lduwa [%o1 + 0x14] %asi, %o5 |
270 | jmpl %g1 + %lo(sys_recvfrom), %g0 | 274 | jmpl %g1 + %lo(sys_recvfrom), %g0 |
271 | lduwa [%o1 + 0x4] %asi, %o1 | 275 | 44: lduwa [%o1 + 0x4] %asi, %o1 |
272 | do_sys_shutdown: /* sys_shutdown(int, int) */ | 276 | do_sys_shutdown: /* sys_shutdown(int, int) */ |
273 | ldswa [%o1 + 0x0] %asi, %o0 | 277 | 45: ldswa [%o1 + 0x0] %asi, %o0 |
274 | sethi %hi(sys_shutdown), %g1 | 278 | sethi %hi(sys_shutdown), %g1 |
275 | jmpl %g1 + %lo(sys_shutdown), %g0 | 279 | jmpl %g1 + %lo(sys_shutdown), %g0 |
276 | ldswa [%o1 + 0x4] %asi, %o1 | 280 | 46: ldswa [%o1 + 0x4] %asi, %o1 |
277 | nop | 281 | nop |
278 | nop | 282 | nop |
279 | nop | 283 | nop |
280 | nop | 284 | nop |
281 | do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */ | 285 | do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */ |
282 | ldswa [%o1 + 0x0] %asi, %o0 | 286 | 47: ldswa [%o1 + 0x0] %asi, %o0 |
283 | sethi %hi(compat_sys_setsockopt), %g1 | 287 | sethi %hi(compat_sys_setsockopt), %g1 |
284 | ldswa [%o1 + 0x8] %asi, %o2 | 288 | 48: ldswa [%o1 + 0x8] %asi, %o2 |
285 | lduwa [%o1 + 0xc] %asi, %o3 | 289 | 49: lduwa [%o1 + 0xc] %asi, %o3 |
286 | ldswa [%o1 + 0x10] %asi, %o4 | 290 | 50: ldswa [%o1 + 0x10] %asi, %o4 |
287 | jmpl %g1 + %lo(compat_sys_setsockopt), %g0 | 291 | jmpl %g1 + %lo(compat_sys_setsockopt), %g0 |
288 | ldswa [%o1 + 0x4] %asi, %o1 | 292 | 51: ldswa [%o1 + 0x4] %asi, %o1 |
289 | nop | 293 | nop |
290 | do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */ | 294 | do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */ |
291 | ldswa [%o1 + 0x0] %asi, %o0 | 295 | 52: ldswa [%o1 + 0x0] %asi, %o0 |
292 | sethi %hi(compat_sys_getsockopt), %g1 | 296 | sethi %hi(compat_sys_getsockopt), %g1 |
293 | ldswa [%o1 + 0x8] %asi, %o2 | 297 | 53: ldswa [%o1 + 0x8] %asi, %o2 |
294 | lduwa [%o1 + 0xc] %asi, %o3 | 298 | 54: lduwa [%o1 + 0xc] %asi, %o3 |
295 | lduwa [%o1 + 0x10] %asi, %o4 | 299 | 55: lduwa [%o1 + 0x10] %asi, %o4 |
296 | jmpl %g1 + %lo(compat_sys_getsockopt), %g0 | 300 | jmpl %g1 + %lo(compat_sys_getsockopt), %g0 |
297 | ldswa [%o1 + 0x4] %asi, %o1 | 301 | 56: ldswa [%o1 + 0x4] %asi, %o1 |
298 | nop | 302 | nop |
299 | do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */ | 303 | do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */ |
300 | ldswa [%o1 + 0x0] %asi, %o0 | 304 | 57: ldswa [%o1 + 0x0] %asi, %o0 |
301 | sethi %hi(compat_sys_sendmsg), %g1 | 305 | sethi %hi(compat_sys_sendmsg), %g1 |
302 | lduwa [%o1 + 0x8] %asi, %o2 | 306 | 58: lduwa [%o1 + 0x8] %asi, %o2 |
303 | jmpl %g1 + %lo(compat_sys_sendmsg), %g0 | 307 | jmpl %g1 + %lo(compat_sys_sendmsg), %g0 |
304 | lduwa [%o1 + 0x4] %asi, %o1 | 308 | 59: lduwa [%o1 + 0x4] %asi, %o1 |
305 | nop | 309 | nop |
306 | nop | 310 | nop |
307 | nop | 311 | nop |
308 | do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */ | 312 | do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */ |
309 | ldswa [%o1 + 0x0] %asi, %o0 | 313 | 60: ldswa [%o1 + 0x0] %asi, %o0 |
310 | sethi %hi(compat_sys_recvmsg), %g1 | 314 | sethi %hi(compat_sys_recvmsg), %g1 |
311 | lduwa [%o1 + 0x8] %asi, %o2 | 315 | 61: lduwa [%o1 + 0x8] %asi, %o2 |
312 | jmpl %g1 + %lo(compat_sys_recvmsg), %g0 | 316 | jmpl %g1 + %lo(compat_sys_recvmsg), %g0 |
313 | lduwa [%o1 + 0x4] %asi, %o1 | 317 | 62: lduwa [%o1 + 0x4] %asi, %o1 |
314 | nop | 318 | nop |
315 | nop | 319 | nop |
316 | nop | 320 | nop |
317 | __socketcall_table_end: | ||
318 | |||
319 | do_einval: | ||
320 | retl | ||
321 | mov -EINVAL, %o0 | ||
322 | do_efault: | ||
323 | retl | ||
324 | mov -EFAULT, %o0 | ||
325 | 321 | ||
326 | .section __ex_table | 322 | .section __ex_table |
327 | .align 4 | 323 | .align 4 |
328 | .word __socketcall_table_begin, 0, __socketcall_table_end, do_efault | 324 | .word 1b, __retl_efault, 2b, __retl_efault |
325 | .word 3b, __retl_efault, 4b, __retl_efault | ||
326 | .word 5b, __retl_efault, 6b, __retl_efault | ||
327 | .word 7b, __retl_efault, 8b, __retl_efault | ||
328 | .word 9b, __retl_efault, 10b, __retl_efault | ||
329 | .word 11b, __retl_efault, 12b, __retl_efault | ||
330 | .word 13b, __retl_efault, 14b, __retl_efault | ||
331 | .word 15b, __retl_efault, 16b, __retl_efault | ||
332 | .word 17b, __retl_efault, 18b, __retl_efault | ||
333 | .word 19b, __retl_efault, 20b, __retl_efault | ||
334 | .word 21b, __retl_efault, 22b, __retl_efault | ||
335 | .word 23b, __retl_efault, 24b, __retl_efault | ||
336 | .word 25b, __retl_efault, 26b, __retl_efault | ||
337 | .word 27b, __retl_efault, 28b, __retl_efault | ||
338 | .word 29b, __retl_efault, 30b, __retl_efault | ||
339 | .word 31b, __retl_efault, 32b, __retl_efault | ||
340 | .word 33b, __retl_efault, 34b, __retl_efault | ||
341 | .word 35b, __retl_efault, 36b, __retl_efault | ||
342 | .word 37b, __retl_efault, 38b, __retl_efault | ||
343 | .word 39b, __retl_efault, 40b, __retl_efault | ||
344 | .word 41b, __retl_efault, 42b, __retl_efault | ||
345 | .word 43b, __retl_efault, 44b, __retl_efault | ||
346 | .word 45b, __retl_efault, 46b, __retl_efault | ||
347 | .word 47b, __retl_efault, 48b, __retl_efault | ||
348 | .word 49b, __retl_efault, 50b, __retl_efault | ||
349 | .word 51b, __retl_efault, 52b, __retl_efault | ||
350 | .word 53b, __retl_efault, 54b, __retl_efault | ||
351 | .word 55b, __retl_efault, 56b, __retl_efault | ||
352 | .word 57b, __retl_efault, 58b, __retl_efault | ||
353 | .word 59b, __retl_efault, 60b, __retl_efault | ||
354 | .word 61b, __retl_efault, 62b, __retl_efault | ||
329 | .previous | 355 | .previous |
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 3a145fc39cf2..89f2fcfcd662 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S | |||
@@ -119,8 +119,8 @@ startup_continue: | |||
119 | sethi %hi(itlb_load), %g2 | 119 | sethi %hi(itlb_load), %g2 |
120 | or %g2, %lo(itlb_load), %g2 | 120 | or %g2, %lo(itlb_load), %g2 |
121 | stx %g2, [%sp + 2047 + 128 + 0x18] | 121 | stx %g2, [%sp + 2047 + 128 + 0x18] |
122 | sethi %hi(mmu_ihandle_cache), %g2 | 122 | sethi %hi(prom_mmu_ihandle_cache), %g2 |
123 | lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 | 123 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 |
124 | stx %g2, [%sp + 2047 + 128 + 0x20] | 124 | stx %g2, [%sp + 2047 + 128 + 0x20] |
125 | sethi %hi(KERNBASE), %g2 | 125 | sethi %hi(KERNBASE), %g2 |
126 | stx %g2, [%sp + 2047 + 128 + 0x28] | 126 | stx %g2, [%sp + 2047 + 128 + 0x28] |
@@ -156,8 +156,8 @@ startup_continue: | |||
156 | sethi %hi(itlb_load), %g2 | 156 | sethi %hi(itlb_load), %g2 |
157 | or %g2, %lo(itlb_load), %g2 | 157 | or %g2, %lo(itlb_load), %g2 |
158 | stx %g2, [%sp + 2047 + 128 + 0x18] | 158 | stx %g2, [%sp + 2047 + 128 + 0x18] |
159 | sethi %hi(mmu_ihandle_cache), %g2 | 159 | sethi %hi(prom_mmu_ihandle_cache), %g2 |
160 | lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 | 160 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 |
161 | stx %g2, [%sp + 2047 + 128 + 0x20] | 161 | stx %g2, [%sp + 2047 + 128 + 0x20] |
162 | sethi %hi(KERNBASE + 0x400000), %g2 | 162 | sethi %hi(KERNBASE + 0x400000), %g2 |
163 | stx %g2, [%sp + 2047 + 128 + 0x28] | 163 | stx %g2, [%sp + 2047 + 128 + 0x28] |
@@ -190,8 +190,8 @@ do_dtlb: | |||
190 | sethi %hi(dtlb_load), %g2 | 190 | sethi %hi(dtlb_load), %g2 |
191 | or %g2, %lo(dtlb_load), %g2 | 191 | or %g2, %lo(dtlb_load), %g2 |
192 | stx %g2, [%sp + 2047 + 128 + 0x18] | 192 | stx %g2, [%sp + 2047 + 128 + 0x18] |
193 | sethi %hi(mmu_ihandle_cache), %g2 | 193 | sethi %hi(prom_mmu_ihandle_cache), %g2 |
194 | lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 | 194 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 |
195 | stx %g2, [%sp + 2047 + 128 + 0x20] | 195 | stx %g2, [%sp + 2047 + 128 + 0x20] |
196 | sethi %hi(KERNBASE), %g2 | 196 | sethi %hi(KERNBASE), %g2 |
197 | stx %g2, [%sp + 2047 + 128 + 0x28] | 197 | stx %g2, [%sp + 2047 + 128 + 0x28] |
@@ -228,8 +228,8 @@ do_dtlb: | |||
228 | sethi %hi(dtlb_load), %g2 | 228 | sethi %hi(dtlb_load), %g2 |
229 | or %g2, %lo(dtlb_load), %g2 | 229 | or %g2, %lo(dtlb_load), %g2 |
230 | stx %g2, [%sp + 2047 + 128 + 0x18] | 230 | stx %g2, [%sp + 2047 + 128 + 0x18] |
231 | sethi %hi(mmu_ihandle_cache), %g2 | 231 | sethi %hi(prom_mmu_ihandle_cache), %g2 |
232 | lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 | 232 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 |
233 | stx %g2, [%sp + 2047 + 128 + 0x20] | 233 | stx %g2, [%sp + 2047 + 128 + 0x20] |
234 | sethi %hi(KERNBASE + 0x400000), %g2 | 234 | sethi %hi(KERNBASE + 0x400000), %g2 |
235 | stx %g2, [%sp + 2047 + 128 + 0x28] | 235 | stx %g2, [%sp + 2047 + 128 + 0x28] |
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index b280b2ef674f..5570e7bb22bb 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c | |||
@@ -189,19 +189,18 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un | |||
189 | 189 | ||
190 | if (regs->tstate & TSTATE_PRIV) { | 190 | if (regs->tstate & TSTATE_PRIV) { |
191 | /* Test if this comes from uaccess places. */ | 191 | /* Test if this comes from uaccess places. */ |
192 | unsigned long fixup; | 192 | const struct exception_table_entry *entry; |
193 | unsigned long g2 = regs->u_regs[UREG_G2]; | ||
194 | 193 | ||
195 | if ((fixup = search_extables_range(regs->tpc, &g2))) { | 194 | entry = search_exception_tables(regs->tpc); |
196 | /* Ouch, somebody is trying ugly VM hole tricks on us... */ | 195 | if (entry) { |
196 | /* Ouch, somebody is trying VM hole tricks on us... */ | ||
197 | #ifdef DEBUG_EXCEPTIONS | 197 | #ifdef DEBUG_EXCEPTIONS |
198 | printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); | 198 | printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); |
199 | printk("EX_TABLE: insn<%016lx> fixup<%016lx> " | 199 | printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n", |
200 | "g2<%016lx>\n", regs->tpc, fixup, g2); | 200 | regs->tpc, entry->fixup); |
201 | #endif | 201 | #endif |
202 | regs->tpc = fixup; | 202 | regs->tpc = entry->fixup; |
203 | regs->tnpc = regs->tpc + 4; | 203 | regs->tnpc = regs->tpc + 4; |
204 | regs->u_regs[UREG_G2] = g2; | ||
205 | return; | 204 | return; |
206 | } | 205 | } |
207 | /* Shit... */ | 206 | /* Shit... */ |
@@ -758,26 +757,12 @@ void __init cheetah_ecache_flush_init(void) | |||
758 | ecache_flush_size = (2 * largest_size); | 757 | ecache_flush_size = (2 * largest_size); |
759 | ecache_flush_linesize = smallest_linesize; | 758 | ecache_flush_linesize = smallest_linesize; |
760 | 759 | ||
761 | /* Discover a physically contiguous chunk of physical | 760 | ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size); |
762 | * memory in 'sp_banks' of size ecache_flush_size calculated | ||
763 | * above. Store the physical base of this area at | ||
764 | * ecache_flush_physbase. | ||
765 | */ | ||
766 | for (node = 0; ; node++) { | ||
767 | if (sp_banks[node].num_bytes == 0) | ||
768 | break; | ||
769 | if (sp_banks[node].num_bytes >= ecache_flush_size) { | ||
770 | ecache_flush_physbase = sp_banks[node].base_addr; | ||
771 | break; | ||
772 | } | ||
773 | } | ||
774 | 761 | ||
775 | /* Note: Zero would be a valid value of ecache_flush_physbase so | 762 | if (ecache_flush_physbase == ~0UL) { |
776 | * don't use that as the success test. :-) | ||
777 | */ | ||
778 | if (sp_banks[node].num_bytes == 0) { | ||
779 | prom_printf("cheetah_ecache_flush_init: Cannot find %d byte " | 763 | prom_printf("cheetah_ecache_flush_init: Cannot find %d byte " |
780 | "contiguous physical memory.\n", ecache_flush_size); | 764 | "contiguous physical memory.\n", |
765 | ecache_flush_size); | ||
781 | prom_halt(); | 766 | prom_halt(); |
782 | } | 767 | } |
783 | 768 | ||
@@ -869,14 +854,19 @@ static void cheetah_flush_ecache_line(unsigned long physaddr) | |||
869 | */ | 854 | */ |
870 | static void __cheetah_flush_icache(void) | 855 | static void __cheetah_flush_icache(void) |
871 | { | 856 | { |
872 | unsigned long i; | 857 | unsigned int icache_size, icache_line_size; |
858 | unsigned long addr; | ||
859 | |||
860 | icache_size = local_cpu_data().icache_size; | ||
861 | icache_line_size = local_cpu_data().icache_line_size; | ||
873 | 862 | ||
874 | /* Clear the valid bits in all the tags. */ | 863 | /* Clear the valid bits in all the tags. */ |
875 | for (i = 0; i < (1 << 15); i += (1 << 5)) { | 864 | for (addr = 0; addr < icache_size; addr += icache_line_size) { |
876 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | 865 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" |
877 | "membar #Sync" | 866 | "membar #Sync" |
878 | : /* no outputs */ | 867 | : /* no outputs */ |
879 | : "r" (i | (2 << 3)), "i" (ASI_IC_TAG)); | 868 | : "r" (addr | (2 << 3)), |
869 | "i" (ASI_IC_TAG)); | ||
880 | } | 870 | } |
881 | } | 871 | } |
882 | 872 | ||
@@ -904,13 +894,17 @@ static void cheetah_flush_icache(void) | |||
904 | 894 | ||
905 | static void cheetah_flush_dcache(void) | 895 | static void cheetah_flush_dcache(void) |
906 | { | 896 | { |
907 | unsigned long i; | 897 | unsigned int dcache_size, dcache_line_size; |
898 | unsigned long addr; | ||
908 | 899 | ||
909 | for (i = 0; i < (1 << 16); i += (1 << 5)) { | 900 | dcache_size = local_cpu_data().dcache_size; |
901 | dcache_line_size = local_cpu_data().dcache_line_size; | ||
902 | |||
903 | for (addr = 0; addr < dcache_size; addr += dcache_line_size) { | ||
910 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | 904 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" |
911 | "membar #Sync" | 905 | "membar #Sync" |
912 | : /* no outputs */ | 906 | : /* no outputs */ |
913 | : "r" (i), "i" (ASI_DCACHE_TAG)); | 907 | : "r" (addr), "i" (ASI_DCACHE_TAG)); |
914 | } | 908 | } |
915 | } | 909 | } |
916 | 910 | ||
@@ -921,24 +915,29 @@ static void cheetah_flush_dcache(void) | |||
921 | */ | 915 | */ |
922 | static void cheetah_plus_zap_dcache_parity(void) | 916 | static void cheetah_plus_zap_dcache_parity(void) |
923 | { | 917 | { |
924 | unsigned long i; | 918 | unsigned int dcache_size, dcache_line_size; |
919 | unsigned long addr; | ||
920 | |||
921 | dcache_size = local_cpu_data().dcache_size; | ||
922 | dcache_line_size = local_cpu_data().dcache_line_size; | ||
925 | 923 | ||
926 | for (i = 0; i < (1 << 16); i += (1 << 5)) { | 924 | for (addr = 0; addr < dcache_size; addr += dcache_line_size) { |
927 | unsigned long tag = (i >> 14); | 925 | unsigned long tag = (addr >> 14); |
928 | unsigned long j; | 926 | unsigned long line; |
929 | 927 | ||
930 | __asm__ __volatile__("membar #Sync\n\t" | 928 | __asm__ __volatile__("membar #Sync\n\t" |
931 | "stxa %0, [%1] %2\n\t" | 929 | "stxa %0, [%1] %2\n\t" |
932 | "membar #Sync" | 930 | "membar #Sync" |
933 | : /* no outputs */ | 931 | : /* no outputs */ |
934 | : "r" (tag), "r" (i), | 932 | : "r" (tag), "r" (addr), |
935 | "i" (ASI_DCACHE_UTAG)); | 933 | "i" (ASI_DCACHE_UTAG)); |
936 | for (j = i; j < i + (1 << 5); j += (1 << 3)) | 934 | for (line = addr; line < addr + dcache_line_size; line += 8) |
937 | __asm__ __volatile__("membar #Sync\n\t" | 935 | __asm__ __volatile__("membar #Sync\n\t" |
938 | "stxa %%g0, [%0] %1\n\t" | 936 | "stxa %%g0, [%0] %1\n\t" |
939 | "membar #Sync" | 937 | "membar #Sync" |
940 | : /* no outputs */ | 938 | : /* no outputs */ |
941 | : "r" (j), "i" (ASI_DCACHE_DATA)); | 939 | : "r" (line), |
940 | "i" (ASI_DCACHE_DATA)); | ||
942 | } | 941 | } |
943 | } | 942 | } |
944 | 943 | ||
@@ -1332,16 +1331,12 @@ static int cheetah_fix_ce(unsigned long physaddr) | |||
1332 | /* Return non-zero if PADDR is a valid physical memory address. */ | 1331 | /* Return non-zero if PADDR is a valid physical memory address. */ |
1333 | static int cheetah_check_main_memory(unsigned long paddr) | 1332 | static int cheetah_check_main_memory(unsigned long paddr) |
1334 | { | 1333 | { |
1335 | int i; | 1334 | unsigned long vaddr = PAGE_OFFSET + paddr; |
1336 | 1335 | ||
1337 | for (i = 0; ; i++) { | 1336 | if (vaddr > (unsigned long) high_memory) |
1338 | if (sp_banks[i].num_bytes == 0) | 1337 | return 0; |
1339 | break; | 1338 | |
1340 | if (paddr >= sp_banks[i].base_addr && | 1339 | return kern_addr_valid(vaddr); |
1341 | paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes)) | ||
1342 | return 1; | ||
1343 | } | ||
1344 | return 0; | ||
1345 | } | 1340 | } |
1346 | 1341 | ||
1347 | void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) | 1342 | void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) |
@@ -1596,10 +1591,10 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned | |||
1596 | /* OK, usermode access. */ | 1591 | /* OK, usermode access. */ |
1597 | recoverable = 1; | 1592 | recoverable = 1; |
1598 | } else { | 1593 | } else { |
1599 | unsigned long g2 = regs->u_regs[UREG_G2]; | 1594 | const struct exception_table_entry *entry; |
1600 | unsigned long fixup = search_extables_range(regs->tpc, &g2); | ||
1601 | 1595 | ||
1602 | if (fixup != 0UL) { | 1596 | entry = search_exception_tables(regs->tpc); |
1597 | if (entry) { | ||
1603 | /* OK, kernel access to userspace. */ | 1598 | /* OK, kernel access to userspace. */ |
1604 | recoverable = 1; | 1599 | recoverable = 1; |
1605 | 1600 | ||
@@ -1618,9 +1613,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned | |||
1618 | * recoverable condition. | 1613 | * recoverable condition. |
1619 | */ | 1614 | */ |
1620 | if (recoverable) { | 1615 | if (recoverable) { |
1621 | regs->tpc = fixup; | 1616 | regs->tpc = entry->fixup; |
1622 | regs->tnpc = regs->tpc + 4; | 1617 | regs->tnpc = regs->tpc + 4; |
1623 | regs->u_regs[UREG_G2] = g2; | ||
1624 | } | 1618 | } |
1625 | } | 1619 | } |
1626 | } | 1620 | } |
diff --git a/arch/sparc64/kernel/una_asm.S b/arch/sparc64/kernel/una_asm.S index da48400bcc95..1f5b5b708ce7 100644 --- a/arch/sparc64/kernel/una_asm.S +++ b/arch/sparc64/kernel/una_asm.S | |||
@@ -6,13 +6,6 @@ | |||
6 | 6 | ||
7 | .text | 7 | .text |
8 | 8 | ||
9 | kernel_unaligned_trap_fault: | ||
10 | call kernel_mna_trap_fault | ||
11 | nop | ||
12 | retl | ||
13 | nop | ||
14 | .size kern_unaligned_trap_fault, .-kern_unaligned_trap_fault | ||
15 | |||
16 | .globl __do_int_store | 9 | .globl __do_int_store |
17 | __do_int_store: | 10 | __do_int_store: |
18 | rd %asi, %o4 | 11 | rd %asi, %o4 |
@@ -51,24 +44,24 @@ __do_int_store: | |||
51 | 0: | 44 | 0: |
52 | wr %o4, 0x0, %asi | 45 | wr %o4, 0x0, %asi |
53 | retl | 46 | retl |
54 | nop | 47 | mov 0, %o0 |
55 | .size __do_int_store, .-__do_int_store | 48 | .size __do_int_store, .-__do_int_store |
56 | 49 | ||
57 | .section __ex_table | 50 | .section __ex_table |
58 | .word 4b, kernel_unaligned_trap_fault | 51 | .word 4b, __retl_efault |
59 | .word 5b, kernel_unaligned_trap_fault | 52 | .word 5b, __retl_efault |
60 | .word 6b, kernel_unaligned_trap_fault | 53 | .word 6b, __retl_efault |
61 | .word 7b, kernel_unaligned_trap_fault | 54 | .word 7b, __retl_efault |
62 | .word 8b, kernel_unaligned_trap_fault | 55 | .word 8b, __retl_efault |
63 | .word 9b, kernel_unaligned_trap_fault | 56 | .word 9b, __retl_efault |
64 | .word 10b, kernel_unaligned_trap_fault | 57 | .word 10b, __retl_efault |
65 | .word 11b, kernel_unaligned_trap_fault | 58 | .word 11b, __retl_efault |
66 | .word 12b, kernel_unaligned_trap_fault | 59 | .word 12b, __retl_efault |
67 | .word 13b, kernel_unaligned_trap_fault | 60 | .word 13b, __retl_efault |
68 | .word 14b, kernel_unaligned_trap_fault | 61 | .word 14b, __retl_efault |
69 | .word 15b, kernel_unaligned_trap_fault | 62 | .word 15b, __retl_efault |
70 | .word 16b, kernel_unaligned_trap_fault | 63 | .word 16b, __retl_efault |
71 | .word 17b, kernel_unaligned_trap_fault | 64 | .word 17b, __retl_efault |
72 | .previous | 65 | .previous |
73 | 66 | ||
74 | .globl do_int_load | 67 | .globl do_int_load |
@@ -133,21 +126,21 @@ do_int_load: | |||
133 | 0: | 126 | 0: |
134 | wr %o5, 0x0, %asi | 127 | wr %o5, 0x0, %asi |
135 | retl | 128 | retl |
136 | nop | 129 | mov 0, %o0 |
137 | .size __do_int_load, .-__do_int_load | 130 | .size __do_int_load, .-__do_int_load |
138 | 131 | ||
139 | .section __ex_table | 132 | .section __ex_table |
140 | .word 4b, kernel_unaligned_trap_fault | 133 | .word 4b, __retl_efault |
141 | .word 5b, kernel_unaligned_trap_fault | 134 | .word 5b, __retl_efault |
142 | .word 6b, kernel_unaligned_trap_fault | 135 | .word 6b, __retl_efault |
143 | .word 7b, kernel_unaligned_trap_fault | 136 | .word 7b, __retl_efault |
144 | .word 8b, kernel_unaligned_trap_fault | 137 | .word 8b, __retl_efault |
145 | .word 9b, kernel_unaligned_trap_fault | 138 | .word 9b, __retl_efault |
146 | .word 10b, kernel_unaligned_trap_fault | 139 | .word 10b, __retl_efault |
147 | .word 11b, kernel_unaligned_trap_fault | 140 | .word 11b, __retl_efault |
148 | .word 12b, kernel_unaligned_trap_fault | 141 | .word 12b, __retl_efault |
149 | .word 13b, kernel_unaligned_trap_fault | 142 | .word 13b, __retl_efault |
150 | .word 14b, kernel_unaligned_trap_fault | 143 | .word 14b, __retl_efault |
151 | .word 15b, kernel_unaligned_trap_fault | 144 | .word 15b, __retl_efault |
152 | .word 16b, kernel_unaligned_trap_fault | 145 | .word 16b, __retl_efault |
153 | .previous | 146 | .previous |
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c index 42718f6a7d36..70faf630603b 100644 --- a/arch/sparc64/kernel/unaligned.c +++ b/arch/sparc64/kernel/unaligned.c | |||
@@ -180,14 +180,14 @@ static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs) | |||
180 | die_if_kernel(str, regs); | 180 | die_if_kernel(str, regs); |
181 | } | 181 | } |
182 | 182 | ||
183 | extern void do_int_load(unsigned long *dest_reg, int size, | 183 | extern int do_int_load(unsigned long *dest_reg, int size, |
184 | unsigned long *saddr, int is_signed, int asi); | 184 | unsigned long *saddr, int is_signed, int asi); |
185 | 185 | ||
186 | extern void __do_int_store(unsigned long *dst_addr, int size, | 186 | extern int __do_int_store(unsigned long *dst_addr, int size, |
187 | unsigned long src_val, int asi); | 187 | unsigned long src_val, int asi); |
188 | 188 | ||
189 | static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, | 189 | static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr, |
190 | struct pt_regs *regs, int asi, int orig_asi) | 190 | struct pt_regs *regs, int asi, int orig_asi) |
191 | { | 191 | { |
192 | unsigned long zero = 0; | 192 | unsigned long zero = 0; |
193 | unsigned long *src_val_p = &zero; | 193 | unsigned long *src_val_p = &zero; |
@@ -219,7 +219,7 @@ static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, | |||
219 | break; | 219 | break; |
220 | }; | 220 | }; |
221 | } | 221 | } |
222 | __do_int_store(dst_addr, size, src_val, asi); | 222 | return __do_int_store(dst_addr, size, src_val, asi); |
223 | } | 223 | } |
224 | 224 | ||
225 | static inline void advance(struct pt_regs *regs) | 225 | static inline void advance(struct pt_regs *regs) |
@@ -242,14 +242,14 @@ static inline int ok_for_kernel(unsigned int insn) | |||
242 | return !floating_point_load_or_store_p(insn); | 242 | return !floating_point_load_or_store_p(insn); |
243 | } | 243 | } |
244 | 244 | ||
245 | void kernel_mna_trap_fault(void) | 245 | static void kernel_mna_trap_fault(void) |
246 | { | 246 | { |
247 | struct pt_regs *regs = current_thread_info()->kern_una_regs; | 247 | struct pt_regs *regs = current_thread_info()->kern_una_regs; |
248 | unsigned int insn = current_thread_info()->kern_una_insn; | 248 | unsigned int insn = current_thread_info()->kern_una_insn; |
249 | unsigned long g2 = regs->u_regs[UREG_G2]; | 249 | const struct exception_table_entry *entry; |
250 | unsigned long fixup = search_extables_range(regs->tpc, &g2); | ||
251 | 250 | ||
252 | if (!fixup) { | 251 | entry = search_exception_tables(regs->tpc); |
252 | if (!entry) { | ||
253 | unsigned long address; | 253 | unsigned long address; |
254 | 254 | ||
255 | address = compute_effective_address(regs, insn, | 255 | address = compute_effective_address(regs, insn, |
@@ -270,9 +270,8 @@ void kernel_mna_trap_fault(void) | |||
270 | die_if_kernel("Oops", regs); | 270 | die_if_kernel("Oops", regs); |
271 | /* Not reached */ | 271 | /* Not reached */ |
272 | } | 272 | } |
273 | regs->tpc = fixup; | 273 | regs->tpc = entry->fixup; |
274 | regs->tnpc = regs->tpc + 4; | 274 | regs->tnpc = regs->tpc + 4; |
275 | regs->u_regs [UREG_G2] = g2; | ||
276 | 275 | ||
277 | regs->tstate &= ~TSTATE_ASI; | 276 | regs->tstate &= ~TSTATE_ASI; |
278 | regs->tstate |= (ASI_AIUS << 24UL); | 277 | regs->tstate |= (ASI_AIUS << 24UL); |
@@ -294,8 +293,8 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u | |||
294 | 293 | ||
295 | kernel_mna_trap_fault(); | 294 | kernel_mna_trap_fault(); |
296 | } else { | 295 | } else { |
297 | unsigned long addr; | 296 | unsigned long addr, *reg_addr; |
298 | int orig_asi, asi; | 297 | int orig_asi, asi, err; |
299 | 298 | ||
300 | addr = compute_effective_address(regs, insn, | 299 | addr = compute_effective_address(regs, insn, |
301 | ((insn >> 25) & 0x1f)); | 300 | ((insn >> 25) & 0x1f)); |
@@ -319,11 +318,12 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u | |||
319 | }; | 318 | }; |
320 | switch (dir) { | 319 | switch (dir) { |
321 | case load: | 320 | case load: |
322 | do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), | 321 | reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs); |
323 | size, (unsigned long *) addr, | 322 | err = do_int_load(reg_addr, size, |
324 | decode_signedness(insn), asi); | 323 | (unsigned long *) addr, |
325 | if (unlikely(asi != orig_asi)) { | 324 | decode_signedness(insn), asi); |
326 | unsigned long val_in = *(unsigned long *) addr; | 325 | if (likely(!err) && unlikely(asi != orig_asi)) { |
326 | unsigned long val_in = *reg_addr; | ||
327 | switch (size) { | 327 | switch (size) { |
328 | case 2: | 328 | case 2: |
329 | val_in = swab16(val_in); | 329 | val_in = swab16(val_in); |
@@ -339,21 +339,24 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u | |||
339 | BUG(); | 339 | BUG(); |
340 | break; | 340 | break; |
341 | }; | 341 | }; |
342 | *(unsigned long *) addr = val_in; | 342 | *reg_addr = val_in; |
343 | } | 343 | } |
344 | break; | 344 | break; |
345 | 345 | ||
346 | case store: | 346 | case store: |
347 | do_int_store(((insn>>25)&0x1f), size, | 347 | err = do_int_store(((insn>>25)&0x1f), size, |
348 | (unsigned long *) addr, regs, | 348 | (unsigned long *) addr, regs, |
349 | asi, orig_asi); | 349 | asi, orig_asi); |
350 | break; | 350 | break; |
351 | 351 | ||
352 | default: | 352 | default: |
353 | panic("Impossible kernel unaligned trap."); | 353 | panic("Impossible kernel unaligned trap."); |
354 | /* Not reached... */ | 354 | /* Not reached... */ |
355 | } | 355 | } |
356 | advance(regs); | 356 | if (unlikely(err)) |
357 | kernel_mna_trap_fault(); | ||
358 | else | ||
359 | advance(regs); | ||
357 | } | 360 | } |
358 | } | 361 | } |
359 | 362 | ||
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c index 9080e7cd4bb0..0340041f6143 100644 --- a/arch/sparc64/kernel/us3_cpufreq.c +++ b/arch/sparc64/kernel/us3_cpufreq.c | |||
@@ -208,7 +208,10 @@ static int __init us3_freq_init(void) | |||
208 | impl = ((ver >> 32) & 0xffff); | 208 | impl = ((ver >> 32) & 0xffff); |
209 | 209 | ||
210 | if (manuf == CHEETAH_MANUF && | 210 | if (manuf == CHEETAH_MANUF && |
211 | (impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL)) { | 211 | (impl == CHEETAH_IMPL || |
212 | impl == CHEETAH_PLUS_IMPL || | ||
213 | impl == JAGUAR_IMPL || | ||
214 | impl == PANTHER_IMPL)) { | ||
212 | struct cpufreq_driver *driver; | 215 | struct cpufreq_driver *driver; |
213 | 216 | ||
214 | ret = -ENOMEM; | 217 | ret = -ENOMEM; |
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index f47d0be39378..2af0cf0a8640 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S | |||
@@ -9,8 +9,7 @@ ENTRY(_start) | |||
9 | jiffies = jiffies_64; | 9 | jiffies = jiffies_64; |
10 | SECTIONS | 10 | SECTIONS |
11 | { | 11 | { |
12 | swapper_pmd_dir = 0x0000000000402000; | 12 | swapper_low_pmd_dir = 0x0000000000402000; |
13 | empty_pg_dir = 0x0000000000403000; | ||
14 | . = 0x4000; | 13 | . = 0x4000; |
15 | .text 0x0000000000404000 : | 14 | .text 0x0000000000404000 : |
16 | { | 15 | { |
diff --git a/arch/sparc64/lib/strncpy_from_user.S b/arch/sparc64/lib/strncpy_from_user.S index 09cbbaa0ebf4..e1264650ca7a 100644 --- a/arch/sparc64/lib/strncpy_from_user.S +++ b/arch/sparc64/lib/strncpy_from_user.S | |||
@@ -125,15 +125,11 @@ __strncpy_from_user: | |||
125 | add %o2, %o3, %o0 | 125 | add %o2, %o3, %o0 |
126 | .size __strncpy_from_user, .-__strncpy_from_user | 126 | .size __strncpy_from_user, .-__strncpy_from_user |
127 | 127 | ||
128 | .section .fixup,#alloc,#execinstr | ||
129 | .align 4 | ||
130 | 4: retl | ||
131 | mov -EFAULT, %o0 | ||
132 | |||
133 | .section __ex_table,#alloc | 128 | .section __ex_table,#alloc |
134 | .align 4 | 129 | .align 4 |
135 | .word 60b, 4b | 130 | .word 60b, __retl_efault |
136 | .word 61b, 4b | 131 | .word 61b, __retl_efault |
137 | .word 62b, 4b | 132 | .word 62b, __retl_efault |
138 | .word 63b, 4b | 133 | .word 63b, __retl_efault |
139 | .word 64b, 4b | 134 | .word 64b, __retl_efault |
135 | .previous | ||
diff --git a/arch/sparc64/lib/user_fixup.c b/arch/sparc64/lib/user_fixup.c index 0278e34125db..19d1fdb17d0e 100644 --- a/arch/sparc64/lib/user_fixup.c +++ b/arch/sparc64/lib/user_fixup.c | |||
@@ -11,61 +11,56 @@ | |||
11 | 11 | ||
12 | /* Calculating the exact fault address when using | 12 | /* Calculating the exact fault address when using |
13 | * block loads and stores can be very complicated. | 13 | * block loads and stores can be very complicated. |
14 | * | ||
14 | * Instead of trying to be clever and handling all | 15 | * Instead of trying to be clever and handling all |
15 | * of the cases, just fix things up simply here. | 16 | * of the cases, just fix things up simply here. |
16 | */ | 17 | */ |
17 | 18 | ||
18 | unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size) | 19 | static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset) |
19 | { | 20 | { |
20 | char *dst = to; | 21 | unsigned long fault_addr = current_thread_info()->fault_address; |
21 | const char __user *src = from; | 22 | unsigned long end = start + size; |
22 | 23 | ||
23 | while (size) { | 24 | if (fault_addr < start || fault_addr >= end) { |
24 | if (__get_user(*dst, src)) | 25 | *offset = 0; |
25 | break; | 26 | } else { |
26 | dst++; | 27 | *offset = start - fault_addr; |
27 | src++; | 28 | size = end - fault_addr; |
28 | size--; | ||
29 | } | 29 | } |
30 | return size; | ||
31 | } | ||
30 | 32 | ||
31 | if (size) | 33 | unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size) |
32 | memset(dst, 0, size); | 34 | { |
35 | unsigned long offset; | ||
36 | |||
37 | size = compute_size((unsigned long) from, size, &offset); | ||
38 | if (likely(size)) | ||
39 | memset(to + offset, 0, size); | ||
33 | 40 | ||
34 | return size; | 41 | return size; |
35 | } | 42 | } |
36 | 43 | ||
37 | unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size) | 44 | unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size) |
38 | { | 45 | { |
39 | char __user *dst = to; | 46 | unsigned long offset; |
40 | const char *src = from; | ||
41 | |||
42 | while (size) { | ||
43 | if (__put_user(*src, dst)) | ||
44 | break; | ||
45 | dst++; | ||
46 | src++; | ||
47 | size--; | ||
48 | } | ||
49 | 47 | ||
50 | return size; | 48 | return compute_size((unsigned long) to, size, &offset); |
51 | } | 49 | } |
52 | 50 | ||
53 | unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size) | 51 | unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size) |
54 | { | 52 | { |
55 | char __user *dst = to; | 53 | unsigned long fault_addr = current_thread_info()->fault_address; |
56 | char __user *src = from; | 54 | unsigned long start = (unsigned long) to; |
55 | unsigned long end = start + size; | ||
57 | 56 | ||
58 | while (size) { | 57 | if (fault_addr >= start && fault_addr < end) |
59 | char tmp; | 58 | return end - fault_addr; |
60 | 59 | ||
61 | if (__get_user(tmp, src)) | 60 | start = (unsigned long) from; |
62 | break; | 61 | end = start + size; |
63 | if (__put_user(tmp, dst)) | 62 | if (fault_addr >= start && fault_addr < end) |
64 | break; | 63 | return end - fault_addr; |
65 | dst++; | ||
66 | src++; | ||
67 | size--; | ||
68 | } | ||
69 | 64 | ||
70 | return size; | 65 | return size; |
71 | } | 66 | } |
diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile index cda87333a77b..9d0960e69f48 100644 --- a/arch/sparc64/mm/Makefile +++ b/arch/sparc64/mm/Makefile | |||
@@ -5,6 +5,6 @@ | |||
5 | EXTRA_AFLAGS := -ansi | 5 | EXTRA_AFLAGS := -ansi |
6 | EXTRA_CFLAGS := -Werror | 6 | EXTRA_CFLAGS := -Werror |
7 | 7 | ||
8 | obj-y := ultra.o tlb.o fault.o init.o generic.o extable.o | 8 | obj-y := ultra.o tlb.o fault.o init.o generic.o |
9 | 9 | ||
10 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 10 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
diff --git a/arch/sparc64/mm/extable.c b/arch/sparc64/mm/extable.c deleted file mode 100644 index ec334297ff4f..000000000000 --- a/arch/sparc64/mm/extable.c +++ /dev/null | |||
@@ -1,80 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/sparc64/mm/extable.c | ||
3 | */ | ||
4 | |||
5 | #include <linux/config.h> | ||
6 | #include <linux/module.h> | ||
7 | #include <asm/uaccess.h> | ||
8 | |||
9 | extern const struct exception_table_entry __start___ex_table[]; | ||
10 | extern const struct exception_table_entry __stop___ex_table[]; | ||
11 | |||
12 | void sort_extable(struct exception_table_entry *start, | ||
13 | struct exception_table_entry *finish) | ||
14 | { | ||
15 | } | ||
16 | |||
17 | /* Caller knows they are in a range if ret->fixup == 0 */ | ||
18 | const struct exception_table_entry * | ||
19 | search_extable(const struct exception_table_entry *start, | ||
20 | const struct exception_table_entry *last, | ||
21 | unsigned long value) | ||
22 | { | ||
23 | const struct exception_table_entry *walk; | ||
24 | |||
25 | /* Single insn entries are encoded as: | ||
26 | * word 1: insn address | ||
27 | * word 2: fixup code address | ||
28 | * | ||
29 | * Range entries are encoded as: | ||
30 | * word 1: first insn address | ||
31 | * word 2: 0 | ||
32 | * word 3: last insn address + 4 bytes | ||
33 | * word 4: fixup code address | ||
34 | * | ||
35 | * See asm/uaccess.h for more details. | ||
36 | */ | ||
37 | |||
38 | /* 1. Try to find an exact match. */ | ||
39 | for (walk = start; walk <= last; walk++) { | ||
40 | if (walk->fixup == 0) { | ||
41 | /* A range entry, skip both parts. */ | ||
42 | walk++; | ||
43 | continue; | ||
44 | } | ||
45 | |||
46 | if (walk->insn == value) | ||
47 | return walk; | ||
48 | } | ||
49 | |||
50 | /* 2. Try to find a range match. */ | ||
51 | for (walk = start; walk <= (last - 1); walk++) { | ||
52 | if (walk->fixup) | ||
53 | continue; | ||
54 | |||
55 | if (walk[0].insn <= value && walk[1].insn > value) | ||
56 | return walk; | ||
57 | |||
58 | walk++; | ||
59 | } | ||
60 | |||
61 | return NULL; | ||
62 | } | ||
63 | |||
64 | /* Special extable search, which handles ranges. Returns fixup */ | ||
65 | unsigned long search_extables_range(unsigned long addr, unsigned long *g2) | ||
66 | { | ||
67 | const struct exception_table_entry *entry; | ||
68 | |||
69 | entry = search_exception_tables(addr); | ||
70 | if (!entry) | ||
71 | return 0; | ||
72 | |||
73 | /* Inside range? Fix g2 and return correct fixup */ | ||
74 | if (!entry->fixup) { | ||
75 | *g2 = (addr - entry->insn) / 4; | ||
76 | return (entry + 1)->fixup; | ||
77 | } | ||
78 | |||
79 | return entry->fixup; | ||
80 | } | ||
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index db1e3310e907..31fbc67719a1 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c | |||
@@ -32,8 +32,6 @@ | |||
32 | 32 | ||
33 | #define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0])) | 33 | #define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0])) |
34 | 34 | ||
35 | extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; | ||
36 | |||
37 | /* | 35 | /* |
38 | * To debug kernel to catch accesses to certain virtual/physical addresses. | 36 | * To debug kernel to catch accesses to certain virtual/physical addresses. |
39 | * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints. | 37 | * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints. |
@@ -71,53 +69,6 @@ void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode) | |||
71 | : "memory"); | 69 | : "memory"); |
72 | } | 70 | } |
73 | 71 | ||
74 | /* Nice, simple, prom library does all the sweating for us. ;) */ | ||
75 | unsigned long __init prom_probe_memory (void) | ||
76 | { | ||
77 | register struct linux_mlist_p1275 *mlist; | ||
78 | register unsigned long bytes, base_paddr, tally; | ||
79 | register int i; | ||
80 | |||
81 | i = 0; | ||
82 | mlist = *prom_meminfo()->p1275_available; | ||
83 | bytes = tally = mlist->num_bytes; | ||
84 | base_paddr = mlist->start_adr; | ||
85 | |||
86 | sp_banks[0].base_addr = base_paddr; | ||
87 | sp_banks[0].num_bytes = bytes; | ||
88 | |||
89 | while (mlist->theres_more != (void *) 0) { | ||
90 | i++; | ||
91 | mlist = mlist->theres_more; | ||
92 | bytes = mlist->num_bytes; | ||
93 | tally += bytes; | ||
94 | if (i >= SPARC_PHYS_BANKS-1) { | ||
95 | printk ("The machine has more banks than " | ||
96 | "this kernel can support\n" | ||
97 | "Increase the SPARC_PHYS_BANKS " | ||
98 | "setting (currently %d)\n", | ||
99 | SPARC_PHYS_BANKS); | ||
100 | i = SPARC_PHYS_BANKS-1; | ||
101 | break; | ||
102 | } | ||
103 | |||
104 | sp_banks[i].base_addr = mlist->start_adr; | ||
105 | sp_banks[i].num_bytes = mlist->num_bytes; | ||
106 | } | ||
107 | |||
108 | i++; | ||
109 | sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL; | ||
110 | sp_banks[i].num_bytes = 0; | ||
111 | |||
112 | /* Now mask all bank sizes on a page boundary, it is all we can | ||
113 | * use anyways. | ||
114 | */ | ||
115 | for (i = 0; sp_banks[i].num_bytes != 0; i++) | ||
116 | sp_banks[i].num_bytes &= PAGE_MASK; | ||
117 | |||
118 | return tally; | ||
119 | } | ||
120 | |||
121 | static void __kprobes unhandled_fault(unsigned long address, | 72 | static void __kprobes unhandled_fault(unsigned long address, |
122 | struct task_struct *tsk, | 73 | struct task_struct *tsk, |
123 | struct pt_regs *regs) | 74 | struct pt_regs *regs) |
@@ -242,7 +193,6 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn) | |||
242 | static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, | 193 | static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, |
243 | unsigned int insn, unsigned long address) | 194 | unsigned int insn, unsigned long address) |
244 | { | 195 | { |
245 | unsigned long g2; | ||
246 | unsigned char asi = ASI_P; | 196 | unsigned char asi = ASI_P; |
247 | 197 | ||
248 | if ((!insn) && (regs->tstate & TSTATE_PRIV)) | 198 | if ((!insn) && (regs->tstate & TSTATE_PRIV)) |
@@ -273,11 +223,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, | |||
273 | } | 223 | } |
274 | } | 224 | } |
275 | 225 | ||
276 | g2 = regs->u_regs[UREG_G2]; | ||
277 | |||
278 | /* Is this in ex_table? */ | 226 | /* Is this in ex_table? */ |
279 | if (regs->tstate & TSTATE_PRIV) { | 227 | if (regs->tstate & TSTATE_PRIV) { |
280 | unsigned long fixup; | 228 | const struct exception_table_entry *entry; |
281 | 229 | ||
282 | if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) { | 230 | if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) { |
283 | if (insn & 0x2000) | 231 | if (insn & 0x2000) |
@@ -288,10 +236,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, | |||
288 | 236 | ||
289 | /* Look in asi.h: All _S asis have LS bit set */ | 237 | /* Look in asi.h: All _S asis have LS bit set */ |
290 | if ((asi & 0x1) && | 238 | if ((asi & 0x1) && |
291 | (fixup = search_extables_range(regs->tpc, &g2))) { | 239 | (entry = search_exception_tables(regs->tpc))) { |
292 | regs->tpc = fixup; | 240 | regs->tpc = entry->fixup; |
293 | regs->tnpc = regs->tpc + 4; | 241 | regs->tnpc = regs->tpc + 4; |
294 | regs->u_regs[UREG_G2] = g2; | ||
295 | return; | 242 | return; |
296 | } | 243 | } |
297 | } else { | 244 | } else { |
@@ -461,7 +408,7 @@ good_area: | |||
461 | } | 408 | } |
462 | 409 | ||
463 | up_read(&mm->mmap_sem); | 410 | up_read(&mm->mmap_sem); |
464 | goto fault_done; | 411 | return; |
465 | 412 | ||
466 | /* | 413 | /* |
467 | * Something tried to access memory that isn't in our memory map.. | 414 | * Something tried to access memory that isn't in our memory map.. |
@@ -473,8 +420,7 @@ bad_area: | |||
473 | 420 | ||
474 | handle_kernel_fault: | 421 | handle_kernel_fault: |
475 | do_kernel_fault(regs, si_code, fault_code, insn, address); | 422 | do_kernel_fault(regs, si_code, fault_code, insn, address); |
476 | 423 | return; | |
477 | goto fault_done; | ||
478 | 424 | ||
479 | /* | 425 | /* |
480 | * We ran out of memory, or some other thing happened to us that made | 426 | * We ran out of memory, or some other thing happened to us that made |
@@ -505,9 +451,4 @@ do_sigbus: | |||
505 | /* Kernel mode? Handle exceptions or die */ | 451 | /* Kernel mode? Handle exceptions or die */ |
506 | if (regs->tstate & TSTATE_PRIV) | 452 | if (regs->tstate & TSTATE_PRIV) |
507 | goto handle_kernel_fault; | 453 | goto handle_kernel_fault; |
508 | |||
509 | fault_done: | ||
510 | /* These values are no longer needed, clear them. */ | ||
511 | set_thread_fault_code(0); | ||
512 | current_thread_info()->fault_address = 0; | ||
513 | } | 454 | } |
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index fdb1ebb308c9..5db50524f20d 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
21 | #include <linux/seq_file.h> | 21 | #include <linux/seq_file.h> |
22 | #include <linux/kprobes.h> | 22 | #include <linux/kprobes.h> |
23 | #include <linux/cache.h> | ||
24 | #include <linux/sort.h> | ||
23 | 25 | ||
24 | #include <asm/head.h> | 26 | #include <asm/head.h> |
25 | #include <asm/system.h> | 27 | #include <asm/system.h> |
@@ -40,24 +42,80 @@ | |||
40 | 42 | ||
41 | extern void device_scan(void); | 43 | extern void device_scan(void); |
42 | 44 | ||
43 | struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; | 45 | #define MAX_BANKS 32 |
44 | 46 | ||
45 | unsigned long *sparc64_valid_addr_bitmap; | 47 | static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; |
48 | static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; | ||
49 | static int pavail_ents __initdata; | ||
50 | static int pavail_rescan_ents __initdata; | ||
51 | |||
52 | static int cmp_p64(const void *a, const void *b) | ||
53 | { | ||
54 | const struct linux_prom64_registers *x = a, *y = b; | ||
55 | |||
56 | if (x->phys_addr > y->phys_addr) | ||
57 | return 1; | ||
58 | if (x->phys_addr < y->phys_addr) | ||
59 | return -1; | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static void __init read_obp_memory(const char *property, | ||
64 | struct linux_prom64_registers *regs, | ||
65 | int *num_ents) | ||
66 | { | ||
67 | int node = prom_finddevice("/memory"); | ||
68 | int prop_size = prom_getproplen(node, property); | ||
69 | int ents, ret, i; | ||
70 | |||
71 | ents = prop_size / sizeof(struct linux_prom64_registers); | ||
72 | if (ents > MAX_BANKS) { | ||
73 | prom_printf("The machine has more %s property entries than " | ||
74 | "this kernel can support (%d).\n", | ||
75 | property, MAX_BANKS); | ||
76 | prom_halt(); | ||
77 | } | ||
78 | |||
79 | ret = prom_getproperty(node, property, (char *) regs, prop_size); | ||
80 | if (ret == -1) { | ||
81 | prom_printf("Couldn't get %s property from /memory.\n"); | ||
82 | prom_halt(); | ||
83 | } | ||
84 | |||
85 | *num_ents = ents; | ||
86 | |||
87 | /* Sanitize what we got from the firmware, by page aligning | ||
88 | * everything. | ||
89 | */ | ||
90 | for (i = 0; i < ents; i++) { | ||
91 | unsigned long base, size; | ||
92 | |||
93 | base = regs[i].phys_addr; | ||
94 | size = regs[i].reg_size; | ||
95 | |||
96 | size &= PAGE_MASK; | ||
97 | if (base & ~PAGE_MASK) { | ||
98 | unsigned long new_base = PAGE_ALIGN(base); | ||
99 | |||
100 | size -= new_base - base; | ||
101 | if ((long) size < 0L) | ||
102 | size = 0UL; | ||
103 | base = new_base; | ||
104 | } | ||
105 | regs[i].phys_addr = base; | ||
106 | regs[i].reg_size = size; | ||
107 | } | ||
108 | sort(regs, ents, sizeof(struct linux_prom64_registers), | ||
109 | cmp_p64, NULL); | ||
110 | } | ||
111 | |||
112 | unsigned long *sparc64_valid_addr_bitmap __read_mostly; | ||
46 | 113 | ||
47 | /* Ugly, but necessary... -DaveM */ | 114 | /* Ugly, but necessary... -DaveM */ |
48 | unsigned long phys_base; | 115 | unsigned long phys_base __read_mostly; |
49 | unsigned long kern_base; | 116 | unsigned long kern_base __read_mostly; |
50 | unsigned long kern_size; | 117 | unsigned long kern_size __read_mostly; |
51 | unsigned long pfn_base; | 118 | unsigned long pfn_base __read_mostly; |
52 | |||
53 | /* This is even uglier. We have a problem where the kernel may not be | ||
54 | * located at phys_base. However, initial __alloc_bootmem() calls need to | ||
55 | * be adjusted to be within the 4-8Megs that the kernel is mapped to, else | ||
56 | * those page mappings wont work. Things are ok after inherit_prom_mappings | ||
57 | * is called though. Dave says he'll clean this up some other time. | ||
58 | * -- BenC | ||
59 | */ | ||
60 | static unsigned long bootmap_base; | ||
61 | 119 | ||
62 | /* get_new_mmu_context() uses "cache + 1". */ | 120 | /* get_new_mmu_context() uses "cache + 1". */ |
63 | DEFINE_SPINLOCK(ctx_alloc_lock); | 121 | DEFINE_SPINLOCK(ctx_alloc_lock); |
@@ -73,7 +131,7 @@ extern unsigned long sparc_ramdisk_image64; | |||
73 | extern unsigned int sparc_ramdisk_image; | 131 | extern unsigned int sparc_ramdisk_image; |
74 | extern unsigned int sparc_ramdisk_size; | 132 | extern unsigned int sparc_ramdisk_size; |
75 | 133 | ||
76 | struct page *mem_map_zero; | 134 | struct page *mem_map_zero __read_mostly; |
77 | 135 | ||
78 | int bigkernel = 0; | 136 | int bigkernel = 0; |
79 | 137 | ||
@@ -179,8 +237,6 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c | |||
179 | : "g1", "g7"); | 237 | : "g1", "g7"); |
180 | } | 238 | } |
181 | 239 | ||
182 | extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code); | ||
183 | |||
184 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) | 240 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
185 | { | 241 | { |
186 | struct page *page; | 242 | struct page *page; |
@@ -207,10 +263,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p | |||
207 | 263 | ||
208 | put_cpu(); | 264 | put_cpu(); |
209 | } | 265 | } |
210 | |||
211 | if (get_thread_fault_code()) | ||
212 | __update_mmu_cache(CTX_NRBITS(vma->vm_mm->context), | ||
213 | address, pte, get_thread_fault_code()); | ||
214 | } | 266 | } |
215 | 267 | ||
216 | void flush_dcache_page(struct page *page) | 268 | void flush_dcache_page(struct page *page) |
@@ -309,6 +361,7 @@ struct linux_prom_translation { | |||
309 | unsigned long size; | 361 | unsigned long size; |
310 | unsigned long data; | 362 | unsigned long data; |
311 | }; | 363 | }; |
364 | static struct linux_prom_translation prom_trans[512] __initdata; | ||
312 | 365 | ||
313 | extern unsigned long prom_boot_page; | 366 | extern unsigned long prom_boot_page; |
314 | extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); | 367 | extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); |
@@ -318,14 +371,63 @@ extern void register_prom_callbacks(void); | |||
318 | /* Exported for SMP bootup purposes. */ | 371 | /* Exported for SMP bootup purposes. */ |
319 | unsigned long kern_locked_tte_data; | 372 | unsigned long kern_locked_tte_data; |
320 | 373 | ||
321 | void __init early_pgtable_allocfail(char *type) | 374 | /* Exported for kernel TLB miss handling in ktlb.S */ |
375 | unsigned long prom_pmd_phys __read_mostly; | ||
376 | unsigned int swapper_pgd_zero __read_mostly; | ||
377 | |||
378 | /* Allocate power-of-2 aligned chunks from the end of the | ||
379 | * kernel image. Return physical address. | ||
380 | */ | ||
381 | static inline unsigned long early_alloc_phys(unsigned long size) | ||
322 | { | 382 | { |
323 | prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); | 383 | unsigned long base; |
324 | prom_halt(); | 384 | |
385 | BUILD_BUG_ON(size & (size - 1)); | ||
386 | |||
387 | kern_size = (kern_size + (size - 1)) & ~(size - 1); | ||
388 | base = kern_base + kern_size; | ||
389 | kern_size += size; | ||
390 | |||
391 | return base; | ||
392 | } | ||
393 | |||
394 | static inline unsigned long load_phys32(unsigned long pa) | ||
395 | { | ||
396 | unsigned long val; | ||
397 | |||
398 | __asm__ __volatile__("lduwa [%1] %2, %0" | ||
399 | : "=&r" (val) | ||
400 | : "r" (pa), "i" (ASI_PHYS_USE_EC)); | ||
401 | |||
402 | return val; | ||
403 | } | ||
404 | |||
405 | static inline unsigned long load_phys64(unsigned long pa) | ||
406 | { | ||
407 | unsigned long val; | ||
408 | |||
409 | __asm__ __volatile__("ldxa [%1] %2, %0" | ||
410 | : "=&r" (val) | ||
411 | : "r" (pa), "i" (ASI_PHYS_USE_EC)); | ||
412 | |||
413 | return val; | ||
414 | } | ||
415 | |||
416 | static inline void store_phys32(unsigned long pa, unsigned long val) | ||
417 | { | ||
418 | __asm__ __volatile__("stwa %0, [%1] %2" | ||
419 | : /* no outputs */ | ||
420 | : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC)); | ||
421 | } | ||
422 | |||
423 | static inline void store_phys64(unsigned long pa, unsigned long val) | ||
424 | { | ||
425 | __asm__ __volatile__("stxa %0, [%1] %2" | ||
426 | : /* no outputs */ | ||
427 | : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC)); | ||
325 | } | 428 | } |
326 | 429 | ||
327 | #define BASE_PAGE_SIZE 8192 | 430 | #define BASE_PAGE_SIZE 8192 |
328 | static pmd_t *prompmd; | ||
329 | 431 | ||
330 | /* | 432 | /* |
331 | * Translate PROM's mapping we capture at boot time into physical address. | 433 | * Translate PROM's mapping we capture at boot time into physical address. |
@@ -333,278 +435,172 @@ static pmd_t *prompmd; | |||
333 | */ | 435 | */ |
334 | unsigned long prom_virt_to_phys(unsigned long promva, int *error) | 436 | unsigned long prom_virt_to_phys(unsigned long promva, int *error) |
335 | { | 437 | { |
336 | pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff); | 438 | unsigned long pmd_phys = (prom_pmd_phys + |
337 | pte_t *ptep; | 439 | ((promva >> 23) & 0x7ff) * sizeof(pmd_t)); |
440 | unsigned long pte_phys; | ||
441 | pmd_t pmd_ent; | ||
442 | pte_t pte_ent; | ||
338 | unsigned long base; | 443 | unsigned long base; |
339 | 444 | ||
340 | if (pmd_none(*pmdp)) { | 445 | pmd_val(pmd_ent) = load_phys32(pmd_phys); |
446 | if (pmd_none(pmd_ent)) { | ||
341 | if (error) | 447 | if (error) |
342 | *error = 1; | 448 | *error = 1; |
343 | return(0); | 449 | return 0; |
344 | } | 450 | } |
345 | ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff); | 451 | |
346 | if (!pte_present(*ptep)) { | 452 | pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL; |
453 | pte_phys += ((promva >> 13) & 0x3ff) * sizeof(pte_t); | ||
454 | pte_val(pte_ent) = load_phys64(pte_phys); | ||
455 | if (!pte_present(pte_ent)) { | ||
347 | if (error) | 456 | if (error) |
348 | *error = 1; | 457 | *error = 1; |
349 | return(0); | 458 | return 0; |
350 | } | 459 | } |
351 | if (error) { | 460 | if (error) { |
352 | *error = 0; | 461 | *error = 0; |
353 | return(pte_val(*ptep)); | 462 | return pte_val(pte_ent); |
354 | } | 463 | } |
355 | base = pte_val(*ptep) & _PAGE_PADDR; | 464 | base = pte_val(pte_ent) & _PAGE_PADDR; |
356 | return(base + (promva & (BASE_PAGE_SIZE - 1))); | 465 | return (base + (promva & (BASE_PAGE_SIZE - 1))); |
357 | } | 466 | } |
358 | 467 | ||
359 | static void inherit_prom_mappings(void) | 468 | /* The obp translations are saved based on 8k pagesize, since obp can |
469 | * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> | ||
470 | * HI_OBP_ADDRESS range are handled in entry.S and do not use the vpte | ||
471 | * scheme (also, see rant in inherit_locked_prom_mappings()). | ||
472 | */ | ||
473 | static void __init build_obp_range(unsigned long start, unsigned long end, unsigned long data) | ||
360 | { | 474 | { |
361 | struct linux_prom_translation *trans; | 475 | unsigned long vaddr; |
362 | unsigned long phys_page, tte_vaddr, tte_data; | ||
363 | void (*remap_func)(unsigned long, unsigned long, int); | ||
364 | pmd_t *pmdp; | ||
365 | pte_t *ptep; | ||
366 | int node, n, i, tsz; | ||
367 | extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2]; | ||
368 | 476 | ||
369 | node = prom_finddevice("/virtual-memory"); | 477 | for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) { |
370 | n = prom_getproplen(node, "translations"); | 478 | unsigned long val, pte_phys, pmd_phys; |
371 | if (n == 0 || n == -1) { | 479 | pmd_t pmd_ent; |
372 | prom_printf("Couldn't get translation property\n"); | 480 | int i; |
373 | prom_halt(); | ||
374 | } | ||
375 | n += 5 * sizeof(struct linux_prom_translation); | ||
376 | for (tsz = 1; tsz < n; tsz <<= 1) | ||
377 | /* empty */; | ||
378 | trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base); | ||
379 | if (trans == NULL) { | ||
380 | prom_printf("inherit_prom_mappings: Cannot alloc translations.\n"); | ||
381 | prom_halt(); | ||
382 | } | ||
383 | memset(trans, 0, tsz); | ||
384 | 481 | ||
385 | if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) { | 482 | pmd_phys = (prom_pmd_phys + |
386 | prom_printf("Couldn't get translation property\n"); | 483 | (((vaddr >> 23) & 0x7ff) * sizeof(pmd_t))); |
387 | prom_halt(); | 484 | pmd_val(pmd_ent) = load_phys32(pmd_phys); |
388 | } | 485 | if (pmd_none(pmd_ent)) { |
389 | n = n / sizeof(*trans); | 486 | pte_phys = early_alloc_phys(BASE_PAGE_SIZE); |
390 | 487 | ||
391 | /* | 488 | for (i = 0; i < BASE_PAGE_SIZE / sizeof(pte_t); i++) |
392 | * The obp translations are saved based on 8k pagesize, since obp can | 489 | store_phys64(pte_phys+i*sizeof(pte_t),0); |
393 | * use a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000, | ||
394 | * ie obp range, are handled in entry.S and do not use the vpte scheme | ||
395 | * (see rant in inherit_locked_prom_mappings()). | ||
396 | */ | ||
397 | #define OBP_PMD_SIZE 2048 | ||
398 | prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base); | ||
399 | if (prompmd == NULL) | ||
400 | early_pgtable_allocfail("pmd"); | ||
401 | memset(prompmd, 0, OBP_PMD_SIZE); | ||
402 | for (i = 0; i < n; i++) { | ||
403 | unsigned long vaddr; | ||
404 | |||
405 | if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) { | ||
406 | for (vaddr = trans[i].virt; | ||
407 | ((vaddr < trans[i].virt + trans[i].size) && | ||
408 | (vaddr < HI_OBP_ADDRESS)); | ||
409 | vaddr += BASE_PAGE_SIZE) { | ||
410 | unsigned long val; | ||
411 | |||
412 | pmdp = prompmd + ((vaddr >> 23) & 0x7ff); | ||
413 | if (pmd_none(*pmdp)) { | ||
414 | ptep = __alloc_bootmem(BASE_PAGE_SIZE, | ||
415 | BASE_PAGE_SIZE, | ||
416 | bootmap_base); | ||
417 | if (ptep == NULL) | ||
418 | early_pgtable_allocfail("pte"); | ||
419 | memset(ptep, 0, BASE_PAGE_SIZE); | ||
420 | pmd_set(pmdp, ptep); | ||
421 | } | ||
422 | ptep = (pte_t *)__pmd_page(*pmdp) + | ||
423 | ((vaddr >> 13) & 0x3ff); | ||
424 | 490 | ||
425 | val = trans[i].data; | 491 | pmd_val(pmd_ent) = pte_phys >> 11UL; |
492 | store_phys32(pmd_phys, pmd_val(pmd_ent)); | ||
493 | } | ||
426 | 494 | ||
427 | /* Clear diag TTE bits. */ | 495 | pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL; |
428 | if (tlb_type == spitfire) | 496 | pte_phys += (((vaddr >> 13) & 0x3ff) * sizeof(pte_t)); |
429 | val &= ~0x0003fe0000000000UL; | ||
430 | 497 | ||
431 | set_pte_at(&init_mm, vaddr, | 498 | val = data; |
432 | ptep, __pte(val | _PAGE_MODIFIED)); | ||
433 | trans[i].data += BASE_PAGE_SIZE; | ||
434 | } | ||
435 | } | ||
436 | } | ||
437 | phys_page = __pa(prompmd); | ||
438 | obp_iaddr_patch[0] |= (phys_page >> 10); | ||
439 | obp_iaddr_patch[1] |= (phys_page & 0x3ff); | ||
440 | flushi((long)&obp_iaddr_patch[0]); | ||
441 | obp_daddr_patch[0] |= (phys_page >> 10); | ||
442 | obp_daddr_patch[1] |= (phys_page & 0x3ff); | ||
443 | flushi((long)&obp_daddr_patch[0]); | ||
444 | 499 | ||
445 | /* Now fixup OBP's idea about where we really are mapped. */ | 500 | /* Clear diag TTE bits. */ |
446 | prom_printf("Remapping the kernel... "); | 501 | if (tlb_type == spitfire) |
502 | val &= ~0x0003fe0000000000UL; | ||
447 | 503 | ||
448 | /* Spitfire Errata #32 workaround */ | 504 | store_phys64(pte_phys, val | _PAGE_MODIFIED); |
449 | /* NOTE: Using plain zero for the context value is | ||
450 | * correct here, we are not using the Linux trap | ||
451 | * tables yet so we should not use the special | ||
452 | * UltraSPARC-III+ page size encodings yet. | ||
453 | */ | ||
454 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
455 | "flush %%g6" | ||
456 | : /* No outputs */ | ||
457 | : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
458 | |||
459 | switch (tlb_type) { | ||
460 | default: | ||
461 | case spitfire: | ||
462 | phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()); | ||
463 | break; | ||
464 | |||
465 | case cheetah: | ||
466 | case cheetah_plus: | ||
467 | phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent()); | ||
468 | break; | ||
469 | }; | ||
470 | |||
471 | phys_page &= _PAGE_PADDR; | ||
472 | phys_page += ((unsigned long)&prom_boot_page - | ||
473 | (unsigned long)KERNBASE); | ||
474 | 505 | ||
475 | if (tlb_type == spitfire) { | 506 | data += BASE_PAGE_SIZE; |
476 | /* Lock this into i/d tlb entry 59 */ | ||
477 | __asm__ __volatile__( | ||
478 | "stxa %%g0, [%2] %3\n\t" | ||
479 | "stxa %0, [%1] %4\n\t" | ||
480 | "membar #Sync\n\t" | ||
481 | "flush %%g6\n\t" | ||
482 | "stxa %%g0, [%2] %5\n\t" | ||
483 | "stxa %0, [%1] %6\n\t" | ||
484 | "membar #Sync\n\t" | ||
485 | "flush %%g6" | ||
486 | : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP | | ||
487 | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W), | ||
488 | "r" (59 << 3), "r" (TLB_TAG_ACCESS), | ||
489 | "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), | ||
490 | "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS) | ||
491 | : "memory"); | ||
492 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
493 | /* Lock this into i/d tlb-0 entry 11 */ | ||
494 | __asm__ __volatile__( | ||
495 | "stxa %%g0, [%2] %3\n\t" | ||
496 | "stxa %0, [%1] %4\n\t" | ||
497 | "membar #Sync\n\t" | ||
498 | "flush %%g6\n\t" | ||
499 | "stxa %%g0, [%2] %5\n\t" | ||
500 | "stxa %0, [%1] %6\n\t" | ||
501 | "membar #Sync\n\t" | ||
502 | "flush %%g6" | ||
503 | : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP | | ||
504 | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W), | ||
505 | "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS), | ||
506 | "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), | ||
507 | "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS) | ||
508 | : "memory"); | ||
509 | } else { | ||
510 | /* Implement me :-) */ | ||
511 | BUG(); | ||
512 | } | 507 | } |
508 | } | ||
513 | 509 | ||
514 | tte_vaddr = (unsigned long) KERNBASE; | 510 | static inline int in_obp_range(unsigned long vaddr) |
511 | { | ||
512 | return (vaddr >= LOW_OBP_ADDRESS && | ||
513 | vaddr < HI_OBP_ADDRESS); | ||
514 | } | ||
515 | 515 | ||
516 | /* Spitfire Errata #32 workaround */ | 516 | #define OBP_PMD_SIZE 2048 |
517 | /* NOTE: Using plain zero for the context value is | 517 | static void __init build_obp_pgtable(int prom_trans_ents) |
518 | * correct here, we are not using the Linux trap | 518 | { |
519 | * tables yet so we should not use the special | 519 | unsigned long i; |
520 | * UltraSPARC-III+ page size encodings yet. | ||
521 | */ | ||
522 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
523 | "flush %%g6" | ||
524 | : /* No outputs */ | ||
525 | : "r" (0), | ||
526 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
527 | |||
528 | if (tlb_type == spitfire) | ||
529 | tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()); | ||
530 | else | ||
531 | tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent()); | ||
532 | 520 | ||
533 | kern_locked_tte_data = tte_data; | 521 | prom_pmd_phys = early_alloc_phys(OBP_PMD_SIZE); |
522 | for (i = 0; i < OBP_PMD_SIZE; i += 4) | ||
523 | store_phys32(prom_pmd_phys + i, 0); | ||
534 | 524 | ||
535 | remap_func = (void *) ((unsigned long) &prom_remap - | 525 | for (i = 0; i < prom_trans_ents; i++) { |
536 | (unsigned long) &prom_boot_page); | 526 | unsigned long start, end; |
537 | 527 | ||
528 | if (!in_obp_range(prom_trans[i].virt)) | ||
529 | continue; | ||
538 | 530 | ||
539 | /* Spitfire Errata #32 workaround */ | 531 | start = prom_trans[i].virt; |
540 | /* NOTE: Using plain zero for the context value is | 532 | end = start + prom_trans[i].size; |
541 | * correct here, we are not using the Linux trap | 533 | if (end > HI_OBP_ADDRESS) |
542 | * tables yet so we should not use the special | 534 | end = HI_OBP_ADDRESS; |
543 | * UltraSPARC-III+ page size encodings yet. | 535 | |
544 | */ | 536 | build_obp_range(start, end, prom_trans[i].data); |
545 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
546 | "flush %%g6" | ||
547 | : /* No outputs */ | ||
548 | : "r" (0), | ||
549 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
550 | |||
551 | remap_func((tlb_type == spitfire ? | ||
552 | (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) : | ||
553 | (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)), | ||
554 | (unsigned long) KERNBASE, | ||
555 | prom_get_mmu_ihandle()); | ||
556 | |||
557 | if (bigkernel) | ||
558 | remap_func(((tte_data + 0x400000) & _PAGE_PADDR), | ||
559 | (unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle()); | ||
560 | |||
561 | /* Flush out that temporary mapping. */ | ||
562 | spitfire_flush_dtlb_nucleus_page(0x0); | ||
563 | spitfire_flush_itlb_nucleus_page(0x0); | ||
564 | |||
565 | /* Now lock us back into the TLBs via OBP. */ | ||
566 | prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr); | ||
567 | prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr); | ||
568 | if (bigkernel) { | ||
569 | prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, | ||
570 | tte_vaddr + 0x400000); | ||
571 | prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, | ||
572 | tte_vaddr + 0x400000); | ||
573 | } | 537 | } |
538 | } | ||
539 | |||
540 | /* Read OBP translations property into 'prom_trans[]'. | ||
541 | * Return the number of entries. | ||
542 | */ | ||
543 | static int __init read_obp_translations(void) | ||
544 | { | ||
545 | int n, node; | ||
574 | 546 | ||
575 | /* Re-read translations property. */ | 547 | node = prom_finddevice("/virtual-memory"); |
576 | if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) { | 548 | n = prom_getproplen(node, "translations"); |
577 | prom_printf("Couldn't get translation property\n"); | 549 | if (unlikely(n == 0 || n == -1)) { |
550 | prom_printf("prom_mappings: Couldn't get size.\n"); | ||
551 | prom_halt(); | ||
552 | } | ||
553 | if (unlikely(n > sizeof(prom_trans))) { | ||
554 | prom_printf("prom_mappings: Size %Zd is too big.\n", n); | ||
578 | prom_halt(); | 555 | prom_halt(); |
579 | } | 556 | } |
580 | n = n / sizeof(*trans); | ||
581 | 557 | ||
582 | for (i = 0; i < n; i++) { | 558 | if ((n = prom_getproperty(node, "translations", |
583 | unsigned long vaddr = trans[i].virt; | 559 | (char *)&prom_trans[0], |
584 | unsigned long size = trans[i].size; | 560 | sizeof(prom_trans))) == -1) { |
561 | prom_printf("prom_mappings: Couldn't get property.\n"); | ||
562 | prom_halt(); | ||
563 | } | ||
564 | n = n / sizeof(struct linux_prom_translation); | ||
565 | return n; | ||
566 | } | ||
585 | 567 | ||
586 | if (vaddr < 0xf0000000UL) { | 568 | static void __init remap_kernel(void) |
587 | unsigned long avoid_start = (unsigned long) KERNBASE; | 569 | { |
588 | unsigned long avoid_end = avoid_start + (4 * 1024 * 1024); | 570 | unsigned long phys_page, tte_vaddr, tte_data; |
571 | int tlb_ent = sparc64_highest_locked_tlbent(); | ||
589 | 572 | ||
590 | if (bigkernel) | 573 | tte_vaddr = (unsigned long) KERNBASE; |
591 | avoid_end += (4 * 1024 * 1024); | 574 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; |
592 | if (vaddr < avoid_start) { | 575 | tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB | |
593 | unsigned long top = vaddr + size; | 576 | _PAGE_CP | _PAGE_CV | _PAGE_P | |
577 | _PAGE_L | _PAGE_W)); | ||
594 | 578 | ||
595 | if (top > avoid_start) | 579 | kern_locked_tte_data = tte_data; |
596 | top = avoid_start; | ||
597 | prom_unmap(top - vaddr, vaddr); | ||
598 | } | ||
599 | if ((vaddr + size) > avoid_end) { | ||
600 | unsigned long bottom = vaddr; | ||
601 | 580 | ||
602 | if (bottom < avoid_end) | 581 | /* Now lock us into the TLBs via OBP. */ |
603 | bottom = avoid_end; | 582 | prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); |
604 | prom_unmap((vaddr + size) - bottom, bottom); | 583 | prom_itlb_load(tlb_ent, tte_data, tte_vaddr); |
605 | } | 584 | if (bigkernel) { |
606 | } | 585 | prom_dtlb_load(tlb_ent - 1, |
586 | tte_data + 0x400000, | ||
587 | tte_vaddr + 0x400000); | ||
588 | prom_itlb_load(tlb_ent - 1, | ||
589 | tte_data + 0x400000, | ||
590 | tte_vaddr + 0x400000); | ||
607 | } | 591 | } |
592 | } | ||
593 | |||
594 | static void __init inherit_prom_mappings(void) | ||
595 | { | ||
596 | int n; | ||
597 | |||
598 | n = read_obp_translations(); | ||
599 | build_obp_pgtable(n); | ||
600 | |||
601 | /* Now fixup OBP's idea about where we really are mapped. */ | ||
602 | prom_printf("Remapping the kernel... "); | ||
603 | remap_kernel(); | ||
608 | 604 | ||
609 | prom_printf("done.\n"); | 605 | prom_printf("done.\n"); |
610 | 606 | ||
@@ -1276,14 +1272,14 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) | |||
1276 | int i; | 1272 | int i; |
1277 | 1273 | ||
1278 | #ifdef CONFIG_DEBUG_BOOTMEM | 1274 | #ifdef CONFIG_DEBUG_BOOTMEM |
1279 | prom_printf("bootmem_init: Scan sp_banks, "); | 1275 | prom_printf("bootmem_init: Scan pavail, "); |
1280 | #endif | 1276 | #endif |
1281 | 1277 | ||
1282 | bytes_avail = 0UL; | 1278 | bytes_avail = 0UL; |
1283 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | 1279 | for (i = 0; i < pavail_ents; i++) { |
1284 | end_of_phys_memory = sp_banks[i].base_addr + | 1280 | end_of_phys_memory = pavail[i].phys_addr + |
1285 | sp_banks[i].num_bytes; | 1281 | pavail[i].reg_size; |
1286 | bytes_avail += sp_banks[i].num_bytes; | 1282 | bytes_avail += pavail[i].reg_size; |
1287 | if (cmdline_memory_size) { | 1283 | if (cmdline_memory_size) { |
1288 | if (bytes_avail > cmdline_memory_size) { | 1284 | if (bytes_avail > cmdline_memory_size) { |
1289 | unsigned long slack = bytes_avail - cmdline_memory_size; | 1285 | unsigned long slack = bytes_avail - cmdline_memory_size; |
@@ -1291,12 +1287,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) | |||
1291 | bytes_avail -= slack; | 1287 | bytes_avail -= slack; |
1292 | end_of_phys_memory -= slack; | 1288 | end_of_phys_memory -= slack; |
1293 | 1289 | ||
1294 | sp_banks[i].num_bytes -= slack; | 1290 | pavail[i].reg_size -= slack; |
1295 | if (sp_banks[i].num_bytes == 0) { | 1291 | if ((long)pavail[i].reg_size <= 0L) { |
1296 | sp_banks[i].base_addr = 0xdeadbeef; | 1292 | pavail[i].phys_addr = 0xdeadbeefUL; |
1293 | pavail[i].reg_size = 0UL; | ||
1294 | pavail_ents = i; | ||
1297 | } else { | 1295 | } else { |
1298 | sp_banks[i+1].num_bytes = 0; | 1296 | pavail[i+1].reg_size = 0Ul; |
1299 | sp_banks[i+1].base_addr = 0xdeadbeef; | 1297 | pavail[i+1].phys_addr = 0xdeadbeefUL; |
1298 | pavail_ents = i + 1; | ||
1300 | } | 1299 | } |
1301 | break; | 1300 | break; |
1302 | } | 1301 | } |
@@ -1347,17 +1346,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) | |||
1347 | #endif | 1346 | #endif |
1348 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); | 1347 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); |
1349 | 1348 | ||
1350 | bootmap_base = bootmap_pfn << PAGE_SHIFT; | ||
1351 | |||
1352 | /* Now register the available physical memory with the | 1349 | /* Now register the available physical memory with the |
1353 | * allocator. | 1350 | * allocator. |
1354 | */ | 1351 | */ |
1355 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | 1352 | for (i = 0; i < pavail_ents; i++) { |
1356 | #ifdef CONFIG_DEBUG_BOOTMEM | 1353 | #ifdef CONFIG_DEBUG_BOOTMEM |
1357 | prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n", | 1354 | prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n", |
1358 | i, sp_banks[i].base_addr, sp_banks[i].num_bytes); | 1355 | i, pavail[i].phys_addr, pavail[i].reg_size); |
1359 | #endif | 1356 | #endif |
1360 | free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes); | 1357 | free_bootmem(pavail[i].phys_addr, pavail[i].reg_size); |
1361 | } | 1358 | } |
1362 | 1359 | ||
1363 | #ifdef CONFIG_BLK_DEV_INITRD | 1360 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -1398,120 +1395,167 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) | |||
1398 | return end_pfn; | 1395 | return end_pfn; |
1399 | } | 1396 | } |
1400 | 1397 | ||
1398 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1399 | static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) | ||
1400 | { | ||
1401 | unsigned long vstart = PAGE_OFFSET + pstart; | ||
1402 | unsigned long vend = PAGE_OFFSET + pend; | ||
1403 | unsigned long alloc_bytes = 0UL; | ||
1404 | |||
1405 | if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { | ||
1406 | prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", | ||
1407 | vstart, vend); | ||
1408 | prom_halt(); | ||
1409 | } | ||
1410 | |||
1411 | while (vstart < vend) { | ||
1412 | unsigned long this_end, paddr = __pa(vstart); | ||
1413 | pgd_t *pgd = pgd_offset_k(vstart); | ||
1414 | pud_t *pud; | ||
1415 | pmd_t *pmd; | ||
1416 | pte_t *pte; | ||
1417 | |||
1418 | pud = pud_offset(pgd, vstart); | ||
1419 | if (pud_none(*pud)) { | ||
1420 | pmd_t *new; | ||
1421 | |||
1422 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | ||
1423 | alloc_bytes += PAGE_SIZE; | ||
1424 | pud_populate(&init_mm, pud, new); | ||
1425 | } | ||
1426 | |||
1427 | pmd = pmd_offset(pud, vstart); | ||
1428 | if (!pmd_present(*pmd)) { | ||
1429 | pte_t *new; | ||
1430 | |||
1431 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | ||
1432 | alloc_bytes += PAGE_SIZE; | ||
1433 | pmd_populate_kernel(&init_mm, pmd, new); | ||
1434 | } | ||
1435 | |||
1436 | pte = pte_offset_kernel(pmd, vstart); | ||
1437 | this_end = (vstart + PMD_SIZE) & PMD_MASK; | ||
1438 | if (this_end > vend) | ||
1439 | this_end = vend; | ||
1440 | |||
1441 | while (vstart < this_end) { | ||
1442 | pte_val(*pte) = (paddr | pgprot_val(prot)); | ||
1443 | |||
1444 | vstart += PAGE_SIZE; | ||
1445 | paddr += PAGE_SIZE; | ||
1446 | pte++; | ||
1447 | } | ||
1448 | } | ||
1449 | |||
1450 | return alloc_bytes; | ||
1451 | } | ||
1452 | |||
1453 | static struct linux_prom64_registers pall[MAX_BANKS] __initdata; | ||
1454 | static int pall_ents __initdata; | ||
1455 | |||
1456 | extern unsigned int kvmap_linear_patch[1]; | ||
1457 | |||
1458 | static void __init kernel_physical_mapping_init(void) | ||
1459 | { | ||
1460 | unsigned long i, mem_alloced = 0UL; | ||
1461 | |||
1462 | read_obp_memory("reg", &pall[0], &pall_ents); | ||
1463 | |||
1464 | for (i = 0; i < pall_ents; i++) { | ||
1465 | unsigned long phys_start, phys_end; | ||
1466 | |||
1467 | phys_start = pall[i].phys_addr; | ||
1468 | phys_end = phys_start + pall[i].reg_size; | ||
1469 | mem_alloced += kernel_map_range(phys_start, phys_end, | ||
1470 | PAGE_KERNEL); | ||
1471 | } | ||
1472 | |||
1473 | printk("Allocated %ld bytes for kernel page tables.\n", | ||
1474 | mem_alloced); | ||
1475 | |||
1476 | kvmap_linear_patch[0] = 0x01000000; /* nop */ | ||
1477 | flushi(&kvmap_linear_patch[0]); | ||
1478 | |||
1479 | __flush_tlb_all(); | ||
1480 | } | ||
1481 | |||
1482 | void kernel_map_pages(struct page *page, int numpages, int enable) | ||
1483 | { | ||
1484 | unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; | ||
1485 | unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); | ||
1486 | |||
1487 | kernel_map_range(phys_start, phys_end, | ||
1488 | (enable ? PAGE_KERNEL : __pgprot(0))); | ||
1489 | |||
1490 | /* we should perform an IPI and flush all tlbs, | ||
1491 | * but that can deadlock->flush only current cpu. | ||
1492 | */ | ||
1493 | __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, | ||
1494 | PAGE_OFFSET + phys_end); | ||
1495 | } | ||
1496 | #endif | ||
1497 | |||
1498 | unsigned long __init find_ecache_flush_span(unsigned long size) | ||
1499 | { | ||
1500 | int i; | ||
1501 | |||
1502 | for (i = 0; i < pavail_ents; i++) { | ||
1503 | if (pavail[i].reg_size >= size) | ||
1504 | return pavail[i].phys_addr; | ||
1505 | } | ||
1506 | |||
1507 | return ~0UL; | ||
1508 | } | ||
1509 | |||
1401 | /* paging_init() sets up the page tables */ | 1510 | /* paging_init() sets up the page tables */ |
1402 | 1511 | ||
1403 | extern void cheetah_ecache_flush_init(void); | 1512 | extern void cheetah_ecache_flush_init(void); |
1404 | 1513 | ||
1405 | static unsigned long last_valid_pfn; | 1514 | static unsigned long last_valid_pfn; |
1515 | pgd_t swapper_pg_dir[2048]; | ||
1406 | 1516 | ||
1407 | void __init paging_init(void) | 1517 | void __init paging_init(void) |
1408 | { | 1518 | { |
1409 | extern pmd_t swapper_pmd_dir[1024]; | 1519 | unsigned long end_pfn, pages_avail, shift; |
1410 | extern unsigned int sparc64_vpte_patchme1[1]; | 1520 | unsigned long real_end, i; |
1411 | extern unsigned int sparc64_vpte_patchme2[1]; | 1521 | |
1412 | unsigned long alias_base = kern_base + PAGE_OFFSET; | 1522 | /* Find available physical memory... */ |
1413 | unsigned long second_alias_page = 0; | 1523 | read_obp_memory("available", &pavail[0], &pavail_ents); |
1414 | unsigned long pt, flags, end_pfn, pages_avail; | 1524 | |
1415 | unsigned long shift = alias_base - ((unsigned long)KERNBASE); | 1525 | phys_base = 0xffffffffffffffffUL; |
1416 | unsigned long real_end; | 1526 | for (i = 0; i < pavail_ents; i++) |
1527 | phys_base = min(phys_base, pavail[i].phys_addr); | ||
1528 | |||
1529 | pfn_base = phys_base >> PAGE_SHIFT; | ||
1530 | |||
1531 | kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | ||
1532 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; | ||
1417 | 1533 | ||
1418 | set_bit(0, mmu_context_bmap); | 1534 | set_bit(0, mmu_context_bmap); |
1419 | 1535 | ||
1536 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); | ||
1537 | |||
1420 | real_end = (unsigned long)_end; | 1538 | real_end = (unsigned long)_end; |
1421 | if ((real_end > ((unsigned long)KERNBASE + 0x400000))) | 1539 | if ((real_end > ((unsigned long)KERNBASE + 0x400000))) |
1422 | bigkernel = 1; | 1540 | bigkernel = 1; |
1423 | #ifdef CONFIG_BLK_DEV_INITRD | 1541 | if ((real_end > ((unsigned long)KERNBASE + 0x800000))) { |
1424 | if (sparc_ramdisk_image || sparc_ramdisk_image64) | 1542 | prom_printf("paging_init: Kernel > 8MB, too large.\n"); |
1425 | real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size)); | 1543 | prom_halt(); |
1426 | #endif | ||
1427 | |||
1428 | /* We assume physical memory starts at some 4mb multiple, | ||
1429 | * if this were not true we wouldn't boot up to this point | ||
1430 | * anyways. | ||
1431 | */ | ||
1432 | pt = kern_base | _PAGE_VALID | _PAGE_SZ4MB; | ||
1433 | pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W; | ||
1434 | local_irq_save(flags); | ||
1435 | if (tlb_type == spitfire) { | ||
1436 | __asm__ __volatile__( | ||
1437 | " stxa %1, [%0] %3\n" | ||
1438 | " stxa %2, [%5] %4\n" | ||
1439 | " membar #Sync\n" | ||
1440 | " flush %%g6\n" | ||
1441 | " nop\n" | ||
1442 | " nop\n" | ||
1443 | " nop\n" | ||
1444 | : /* No outputs */ | ||
1445 | : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt), | ||
1446 | "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3) | ||
1447 | : "memory"); | ||
1448 | if (real_end >= KERNBASE + 0x340000) { | ||
1449 | second_alias_page = alias_base + 0x400000; | ||
1450 | __asm__ __volatile__( | ||
1451 | " stxa %1, [%0] %3\n" | ||
1452 | " stxa %2, [%5] %4\n" | ||
1453 | " membar #Sync\n" | ||
1454 | " flush %%g6\n" | ||
1455 | " nop\n" | ||
1456 | " nop\n" | ||
1457 | " nop\n" | ||
1458 | : /* No outputs */ | ||
1459 | : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000), | ||
1460 | "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3) | ||
1461 | : "memory"); | ||
1462 | } | ||
1463 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
1464 | __asm__ __volatile__( | ||
1465 | " stxa %1, [%0] %3\n" | ||
1466 | " stxa %2, [%5] %4\n" | ||
1467 | " membar #Sync\n" | ||
1468 | " flush %%g6\n" | ||
1469 | " nop\n" | ||
1470 | " nop\n" | ||
1471 | " nop\n" | ||
1472 | : /* No outputs */ | ||
1473 | : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt), | ||
1474 | "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3)) | ||
1475 | : "memory"); | ||
1476 | if (real_end >= KERNBASE + 0x340000) { | ||
1477 | second_alias_page = alias_base + 0x400000; | ||
1478 | __asm__ __volatile__( | ||
1479 | " stxa %1, [%0] %3\n" | ||
1480 | " stxa %2, [%5] %4\n" | ||
1481 | " membar #Sync\n" | ||
1482 | " flush %%g6\n" | ||
1483 | " nop\n" | ||
1484 | " nop\n" | ||
1485 | " nop\n" | ||
1486 | : /* No outputs */ | ||
1487 | : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000), | ||
1488 | "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3)) | ||
1489 | : "memory"); | ||
1490 | } | ||
1491 | } | 1544 | } |
1492 | local_irq_restore(flags); | 1545 | |
1493 | 1546 | /* Set kernel pgd to upper alias so physical page computations | |
1494 | /* Now set kernel pgd to upper alias so physical page computations | ||
1495 | * work. | 1547 | * work. |
1496 | */ | 1548 | */ |
1497 | init_mm.pgd += ((shift) / (sizeof(pgd_t))); | 1549 | init_mm.pgd += ((shift) / (sizeof(pgd_t))); |
1498 | 1550 | ||
1499 | memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir)); | 1551 | memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); |
1500 | 1552 | ||
1501 | /* Now can init the kernel/bad page tables. */ | 1553 | /* Now can init the kernel/bad page tables. */ |
1502 | pud_set(pud_offset(&swapper_pg_dir[0], 0), | 1554 | pud_set(pud_offset(&swapper_pg_dir[0], 0), |
1503 | swapper_pmd_dir + (shift / sizeof(pgd_t))); | 1555 | swapper_low_pmd_dir + (shift / sizeof(pgd_t))); |
1504 | 1556 | ||
1505 | sparc64_vpte_patchme1[0] |= | 1557 | swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); |
1506 | (((unsigned long)pgd_val(init_mm.pgd[0])) >> 10); | ||
1507 | sparc64_vpte_patchme2[0] |= | ||
1508 | (((unsigned long)pgd_val(init_mm.pgd[0])) & 0x3ff); | ||
1509 | flushi((long)&sparc64_vpte_patchme1[0]); | ||
1510 | 1558 | ||
1511 | /* Setup bootmem... */ | ||
1512 | pages_avail = 0; | ||
1513 | last_valid_pfn = end_pfn = bootmem_init(&pages_avail); | ||
1514 | |||
1515 | /* Inherit non-locked OBP mappings. */ | 1559 | /* Inherit non-locked OBP mappings. */ |
1516 | inherit_prom_mappings(); | 1560 | inherit_prom_mappings(); |
1517 | 1561 | ||
@@ -1527,13 +1571,16 @@ void __init paging_init(void) | |||
1527 | 1571 | ||
1528 | inherit_locked_prom_mappings(1); | 1572 | inherit_locked_prom_mappings(1); |
1529 | 1573 | ||
1530 | /* We only created DTLB mapping of this stuff. */ | ||
1531 | spitfire_flush_dtlb_nucleus_page(alias_base); | ||
1532 | if (second_alias_page) | ||
1533 | spitfire_flush_dtlb_nucleus_page(second_alias_page); | ||
1534 | |||
1535 | __flush_tlb_all(); | 1574 | __flush_tlb_all(); |
1536 | 1575 | ||
1576 | /* Setup bootmem... */ | ||
1577 | pages_avail = 0; | ||
1578 | last_valid_pfn = end_pfn = bootmem_init(&pages_avail); | ||
1579 | |||
1580 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1581 | kernel_physical_mapping_init(); | ||
1582 | #endif | ||
1583 | |||
1537 | { | 1584 | { |
1538 | unsigned long zones_size[MAX_NR_ZONES]; | 1585 | unsigned long zones_size[MAX_NR_ZONES]; |
1539 | unsigned long zholes_size[MAX_NR_ZONES]; | 1586 | unsigned long zholes_size[MAX_NR_ZONES]; |
@@ -1554,128 +1601,35 @@ void __init paging_init(void) | |||
1554 | device_scan(); | 1601 | device_scan(); |
1555 | } | 1602 | } |
1556 | 1603 | ||
1557 | /* Ok, it seems that the prom can allocate some more memory chunks | ||
1558 | * as a side effect of some prom calls we perform during the | ||
1559 | * boot sequence. My most likely theory is that it is from the | ||
1560 | * prom_set_traptable() call, and OBP is allocating a scratchpad | ||
1561 | * for saving client program register state etc. | ||
1562 | */ | ||
1563 | static void __init sort_memlist(struct linux_mlist_p1275 *thislist) | ||
1564 | { | ||
1565 | int swapi = 0; | ||
1566 | int i, mitr; | ||
1567 | unsigned long tmpaddr, tmpsize; | ||
1568 | unsigned long lowest; | ||
1569 | |||
1570 | for (i = 0; thislist[i].theres_more != 0; i++) { | ||
1571 | lowest = thislist[i].start_adr; | ||
1572 | for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++) | ||
1573 | if (thislist[mitr].start_adr < lowest) { | ||
1574 | lowest = thislist[mitr].start_adr; | ||
1575 | swapi = mitr; | ||
1576 | } | ||
1577 | if (lowest == thislist[i].start_adr) | ||
1578 | continue; | ||
1579 | tmpaddr = thislist[swapi].start_adr; | ||
1580 | tmpsize = thislist[swapi].num_bytes; | ||
1581 | for (mitr = swapi; mitr > i; mitr--) { | ||
1582 | thislist[mitr].start_adr = thislist[mitr-1].start_adr; | ||
1583 | thislist[mitr].num_bytes = thislist[mitr-1].num_bytes; | ||
1584 | } | ||
1585 | thislist[i].start_adr = tmpaddr; | ||
1586 | thislist[i].num_bytes = tmpsize; | ||
1587 | } | ||
1588 | } | ||
1589 | |||
1590 | void __init rescan_sp_banks(void) | ||
1591 | { | ||
1592 | struct linux_prom64_registers memlist[64]; | ||
1593 | struct linux_mlist_p1275 avail[64], *mlist; | ||
1594 | unsigned long bytes, base_paddr; | ||
1595 | int num_regs, node = prom_finddevice("/memory"); | ||
1596 | int i; | ||
1597 | |||
1598 | num_regs = prom_getproperty(node, "available", | ||
1599 | (char *) memlist, sizeof(memlist)); | ||
1600 | num_regs = (num_regs / sizeof(struct linux_prom64_registers)); | ||
1601 | for (i = 0; i < num_regs; i++) { | ||
1602 | avail[i].start_adr = memlist[i].phys_addr; | ||
1603 | avail[i].num_bytes = memlist[i].reg_size; | ||
1604 | avail[i].theres_more = &avail[i + 1]; | ||
1605 | } | ||
1606 | avail[i - 1].theres_more = NULL; | ||
1607 | sort_memlist(avail); | ||
1608 | |||
1609 | mlist = &avail[0]; | ||
1610 | i = 0; | ||
1611 | bytes = mlist->num_bytes; | ||
1612 | base_paddr = mlist->start_adr; | ||
1613 | |||
1614 | sp_banks[0].base_addr = base_paddr; | ||
1615 | sp_banks[0].num_bytes = bytes; | ||
1616 | |||
1617 | while (mlist->theres_more != NULL){ | ||
1618 | i++; | ||
1619 | mlist = mlist->theres_more; | ||
1620 | bytes = mlist->num_bytes; | ||
1621 | if (i >= SPARC_PHYS_BANKS-1) { | ||
1622 | printk ("The machine has more banks than " | ||
1623 | "this kernel can support\n" | ||
1624 | "Increase the SPARC_PHYS_BANKS " | ||
1625 | "setting (currently %d)\n", | ||
1626 | SPARC_PHYS_BANKS); | ||
1627 | i = SPARC_PHYS_BANKS-1; | ||
1628 | break; | ||
1629 | } | ||
1630 | |||
1631 | sp_banks[i].base_addr = mlist->start_adr; | ||
1632 | sp_banks[i].num_bytes = mlist->num_bytes; | ||
1633 | } | ||
1634 | |||
1635 | i++; | ||
1636 | sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL; | ||
1637 | sp_banks[i].num_bytes = 0; | ||
1638 | |||
1639 | for (i = 0; sp_banks[i].num_bytes != 0; i++) | ||
1640 | sp_banks[i].num_bytes &= PAGE_MASK; | ||
1641 | } | ||
1642 | |||
1643 | static void __init taint_real_pages(void) | 1604 | static void __init taint_real_pages(void) |
1644 | { | 1605 | { |
1645 | struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS]; | ||
1646 | int i; | 1606 | int i; |
1647 | 1607 | ||
1648 | for (i = 0; i < SPARC_PHYS_BANKS; i++) { | 1608 | read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); |
1649 | saved_sp_banks[i].base_addr = | ||
1650 | sp_banks[i].base_addr; | ||
1651 | saved_sp_banks[i].num_bytes = | ||
1652 | sp_banks[i].num_bytes; | ||
1653 | } | ||
1654 | |||
1655 | rescan_sp_banks(); | ||
1656 | 1609 | ||
1657 | /* Find changes discovered in the sp_bank rescan and | 1610 | /* Find changes discovered in the physmem available rescan and |
1658 | * reserve the lost portions in the bootmem maps. | 1611 | * reserve the lost portions in the bootmem maps. |
1659 | */ | 1612 | */ |
1660 | for (i = 0; saved_sp_banks[i].num_bytes; i++) { | 1613 | for (i = 0; i < pavail_ents; i++) { |
1661 | unsigned long old_start, old_end; | 1614 | unsigned long old_start, old_end; |
1662 | 1615 | ||
1663 | old_start = saved_sp_banks[i].base_addr; | 1616 | old_start = pavail[i].phys_addr; |
1664 | old_end = old_start + | 1617 | old_end = old_start + |
1665 | saved_sp_banks[i].num_bytes; | 1618 | pavail[i].reg_size; |
1666 | while (old_start < old_end) { | 1619 | while (old_start < old_end) { |
1667 | int n; | 1620 | int n; |
1668 | 1621 | ||
1669 | for (n = 0; sp_banks[n].num_bytes; n++) { | 1622 | for (n = 0; pavail_rescan_ents; n++) { |
1670 | unsigned long new_start, new_end; | 1623 | unsigned long new_start, new_end; |
1671 | 1624 | ||
1672 | new_start = sp_banks[n].base_addr; | 1625 | new_start = pavail_rescan[n].phys_addr; |
1673 | new_end = new_start + sp_banks[n].num_bytes; | 1626 | new_end = new_start + |
1627 | pavail_rescan[n].reg_size; | ||
1674 | 1628 | ||
1675 | if (new_start <= old_start && | 1629 | if (new_start <= old_start && |
1676 | new_end >= (old_start + PAGE_SIZE)) { | 1630 | new_end >= (old_start + PAGE_SIZE)) { |
1677 | set_bit (old_start >> 22, | 1631 | set_bit(old_start >> 22, |
1678 | sparc64_valid_addr_bitmap); | 1632 | sparc64_valid_addr_bitmap); |
1679 | goto do_next_page; | 1633 | goto do_next_page; |
1680 | } | 1634 | } |
1681 | } | 1635 | } |
@@ -1695,8 +1649,7 @@ void __init mem_init(void) | |||
1695 | 1649 | ||
1696 | i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6); | 1650 | i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6); |
1697 | i += 1; | 1651 | i += 1; |
1698 | sparc64_valid_addr_bitmap = (unsigned long *) | 1652 | sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3); |
1699 | __alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base); | ||
1700 | if (sparc64_valid_addr_bitmap == NULL) { | 1653 | if (sparc64_valid_addr_bitmap == NULL) { |
1701 | prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); | 1654 | prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); |
1702 | prom_halt(); | 1655 | prom_halt(); |
@@ -1749,7 +1702,7 @@ void __init mem_init(void) | |||
1749 | cheetah_ecache_flush_init(); | 1702 | cheetah_ecache_flush_init(); |
1750 | } | 1703 | } |
1751 | 1704 | ||
1752 | void free_initmem (void) | 1705 | void free_initmem(void) |
1753 | { | 1706 | { |
1754 | unsigned long addr, initend; | 1707 | unsigned long addr, initend; |
1755 | 1708 | ||
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index b2ee9b53227f..058b8126c1a7 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S | |||
@@ -144,42 +144,29 @@ __flush_icache_page: /* %o0 = phys_page */ | |||
144 | 144 | ||
145 | #define DTAG_MASK 0x3 | 145 | #define DTAG_MASK 0x3 |
146 | 146 | ||
147 | /* This routine is Spitfire specific so the hardcoded | ||
148 | * D-cache size and line-size are OK. | ||
149 | */ | ||
147 | .align 64 | 150 | .align 64 |
148 | .globl __flush_dcache_page | 151 | .globl __flush_dcache_page |
149 | __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ | 152 | __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ |
150 | sethi %uhi(PAGE_OFFSET), %g1 | 153 | sethi %uhi(PAGE_OFFSET), %g1 |
151 | sllx %g1, 32, %g1 | 154 | sllx %g1, 32, %g1 |
152 | sub %o0, %g1, %o0 | 155 | sub %o0, %g1, %o0 ! physical address |
153 | clr %o4 | 156 | srlx %o0, 11, %o0 ! make D-cache TAG |
154 | srlx %o0, 11, %o0 | 157 | sethi %hi(1 << 14), %o2 ! D-cache size |
155 | sethi %hi(1 << 14), %o2 | 158 | sub %o2, (1 << 5), %o2 ! D-cache line size |
156 | 1: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group | 159 | 1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG |
157 | add %o4, (1 << 5), %o4 ! IEU0 | 160 | andcc %o3, DTAG_MASK, %g0 ! Valid? |
158 | ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group | 161 | be,pn %xcc, 2f ! Nope, branch |
159 | add %o4, (1 << 5), %o4 ! IEU0 | 162 | andn %o3, DTAG_MASK, %o3 ! Clear valid bits |
160 | ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available | 163 | cmp %o3, %o0 ! TAG match? |
161 | add %o4, (1 << 5), %o4 ! IEU0 | 164 | bne,pt %xcc, 2f ! Nope, branch |
162 | andn %o3, DTAG_MASK, %o3 ! IEU1 | 165 | nop |
163 | ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group | 166 | stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG |
164 | add %o4, (1 << 5), %o4 ! IEU0 | 167 | membar #Sync |
165 | andn %g1, DTAG_MASK, %g1 ! IEU1 | 168 | 2: brnz,pt %o2, 1b |
166 | cmp %o0, %o3 ! IEU1 Group | 169 | sub %o2, (1 << 5), %o2 ! D-cache line size |
167 | be,a,pn %xcc, dflush1 ! CTI | ||
168 | sub %o4, (4 << 5), %o4 ! IEU0 (Group) | ||
169 | cmp %o0, %g1 ! IEU1 Group | ||
170 | andn %g2, DTAG_MASK, %g2 ! IEU0 | ||
171 | be,a,pn %xcc, dflush2 ! CTI | ||
172 | sub %o4, (3 << 5), %o4 ! IEU0 (Group) | ||
173 | cmp %o0, %g2 ! IEU1 Group | ||
174 | andn %g3, DTAG_MASK, %g3 ! IEU0 | ||
175 | be,a,pn %xcc, dflush3 ! CTI | ||
176 | sub %o4, (2 << 5), %o4 ! IEU0 (Group) | ||
177 | cmp %o0, %g3 ! IEU1 Group | ||
178 | be,a,pn %xcc, dflush4 ! CTI | ||
179 | sub %o4, (1 << 5), %o4 ! IEU0 | ||
180 | 2: cmp %o4, %o2 ! IEU1 Group | ||
181 | bne,pt %xcc, 1b ! CTI | ||
182 | nop ! IEU0 | ||
183 | 170 | ||
184 | /* The I-cache does not snoop local stores so we | 171 | /* The I-cache does not snoop local stores so we |
185 | * better flush that too when necessary. | 172 | * better flush that too when necessary. |
@@ -189,48 +176,9 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ | |||
189 | retl | 176 | retl |
190 | nop | 177 | nop |
191 | 178 | ||
192 | dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG | ||
193 | add %o4, (1 << 5), %o4 | ||
194 | dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG | ||
195 | add %o4, (1 << 5), %o4 | ||
196 | dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG | ||
197 | add %o4, (1 << 5), %o4 | ||
198 | dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG | ||
199 | add %o4, (1 << 5), %o4 | ||
200 | membar #Sync | ||
201 | ba,pt %xcc, 2b | ||
202 | nop | ||
203 | #endif /* DCACHE_ALIASING_POSSIBLE */ | 179 | #endif /* DCACHE_ALIASING_POSSIBLE */ |
204 | 180 | ||
205 | .previous .text | 181 | .previous |
206 | .align 32 | ||
207 | __prefill_dtlb: | ||
208 | rdpr %pstate, %g7 | ||
209 | wrpr %g7, PSTATE_IE, %pstate | ||
210 | mov TLB_TAG_ACCESS, %g1 | ||
211 | stxa %o5, [%g1] ASI_DMMU | ||
212 | stxa %o2, [%g0] ASI_DTLB_DATA_IN | ||
213 | flush %g6 | ||
214 | retl | ||
215 | wrpr %g7, %pstate | ||
216 | __prefill_itlb: | ||
217 | rdpr %pstate, %g7 | ||
218 | wrpr %g7, PSTATE_IE, %pstate | ||
219 | mov TLB_TAG_ACCESS, %g1 | ||
220 | stxa %o5, [%g1] ASI_IMMU | ||
221 | stxa %o2, [%g0] ASI_ITLB_DATA_IN | ||
222 | flush %g6 | ||
223 | retl | ||
224 | wrpr %g7, %pstate | ||
225 | |||
226 | .globl __update_mmu_cache | ||
227 | __update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */ | ||
228 | srlx %o1, PAGE_SHIFT, %o1 | ||
229 | andcc %o3, FAULT_CODE_DTLB, %g0 | ||
230 | sllx %o1, PAGE_SHIFT, %o5 | ||
231 | bne,pt %xcc, __prefill_dtlb | ||
232 | or %o5, %o0, %o5 | ||
233 | ba,a,pt %xcc, __prefill_itlb | ||
234 | 182 | ||
235 | /* Cheetah specific versions, patched at boot time. */ | 183 | /* Cheetah specific versions, patched at boot time. */ |
236 | __cheetah_flush_tlb_mm: /* 18 insns */ | 184 | __cheetah_flush_tlb_mm: /* 18 insns */ |
@@ -283,7 +231,7 @@ __cheetah_flush_tlb_pending: /* 26 insns */ | |||
283 | wrpr %g7, 0x0, %pstate | 231 | wrpr %g7, 0x0, %pstate |
284 | 232 | ||
285 | #ifdef DCACHE_ALIASING_POSSIBLE | 233 | #ifdef DCACHE_ALIASING_POSSIBLE |
286 | flush_dcpage_cheetah: /* 11 insns */ | 234 | __cheetah_flush_dcache_page: /* 11 insns */ |
287 | sethi %uhi(PAGE_OFFSET), %g1 | 235 | sethi %uhi(PAGE_OFFSET), %g1 |
288 | sllx %g1, 32, %g1 | 236 | sllx %g1, 32, %g1 |
289 | sub %o0, %g1, %o0 | 237 | sub %o0, %g1, %o0 |
@@ -329,8 +277,8 @@ cheetah_patch_cachetlbops: | |||
329 | #ifdef DCACHE_ALIASING_POSSIBLE | 277 | #ifdef DCACHE_ALIASING_POSSIBLE |
330 | sethi %hi(__flush_dcache_page), %o0 | 278 | sethi %hi(__flush_dcache_page), %o0 |
331 | or %o0, %lo(__flush_dcache_page), %o0 | 279 | or %o0, %lo(__flush_dcache_page), %o0 |
332 | sethi %hi(flush_dcpage_cheetah), %o1 | 280 | sethi %hi(__cheetah_flush_dcache_page), %o1 |
333 | or %o1, %lo(flush_dcpage_cheetah), %o1 | 281 | or %o1, %lo(__cheetah_flush_dcache_page), %o1 |
334 | call cheetah_patch_one | 282 | call cheetah_patch_one |
335 | mov 11, %o2 | 283 | mov 11, %o2 |
336 | #endif /* DCACHE_ALIASING_POSSIBLE */ | 284 | #endif /* DCACHE_ALIASING_POSSIBLE */ |
diff --git a/arch/sparc64/prom/Makefile b/arch/sparc64/prom/Makefile index 8f2420d9e9e6..3d33ed27bc27 100644 --- a/arch/sparc64/prom/Makefile +++ b/arch/sparc64/prom/Makefile | |||
@@ -6,5 +6,5 @@ | |||
6 | EXTRA_AFLAGS := -ansi | 6 | EXTRA_AFLAGS := -ansi |
7 | EXTRA_CFLAGS := -Werror | 7 | EXTRA_CFLAGS := -Werror |
8 | 8 | ||
9 | lib-y := bootstr.o devops.o init.o memory.o misc.o \ | 9 | lib-y := bootstr.o devops.o init.o misc.o \ |
10 | tree.o console.o printf.o p1275.o map.o cif.o | 10 | tree.o console.o printf.o p1275.o cif.o |
diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c index 028a53fcb1ec..eae5db8dda56 100644 --- a/arch/sparc64/prom/console.c +++ b/arch/sparc64/prom/console.c | |||
@@ -67,7 +67,7 @@ prom_putchar(char c) | |||
67 | } | 67 | } |
68 | 68 | ||
69 | void | 69 | void |
70 | prom_puts(char *s, int len) | 70 | prom_puts(const char *s, int len) |
71 | { | 71 | { |
72 | p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)| | 72 | p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)| |
73 | P1275_INOUT(3,1), | 73 | P1275_INOUT(3,1), |
diff --git a/arch/sparc64/prom/devops.c b/arch/sparc64/prom/devops.c index 2c99b21b6981..4641839eb39a 100644 --- a/arch/sparc64/prom/devops.c +++ b/arch/sparc64/prom/devops.c | |||
@@ -16,7 +16,7 @@ | |||
16 | * Returns 0 on failure. | 16 | * Returns 0 on failure. |
17 | */ | 17 | */ |
18 | int | 18 | int |
19 | prom_devopen(char *dstr) | 19 | prom_devopen(const char *dstr) |
20 | { | 20 | { |
21 | return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)| | 21 | return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)| |
22 | P1275_INOUT(1,1), | 22 | P1275_INOUT(1,1), |
diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c index 817faae058cd..f3cc2d8578b2 100644 --- a/arch/sparc64/prom/init.c +++ b/arch/sparc64/prom/init.c | |||
@@ -27,7 +27,6 @@ int prom_chosen_node; | |||
27 | * failure. It gets passed the pointer to the PROM vector. | 27 | * failure. It gets passed the pointer to the PROM vector. |
28 | */ | 28 | */ |
29 | 29 | ||
30 | extern void prom_meminit(void); | ||
31 | extern void prom_cif_init(void *, void *); | 30 | extern void prom_cif_init(void *, void *); |
32 | 31 | ||
33 | void __init prom_init(void *cif_handler, void *cif_stack) | 32 | void __init prom_init(void *cif_handler, void *cif_stack) |
@@ -46,7 +45,7 @@ void __init prom_init(void *cif_handler, void *cif_stack) | |||
46 | if((prom_root_node == 0) || (prom_root_node == -1)) | 45 | if((prom_root_node == 0) || (prom_root_node == -1)) |
47 | prom_halt(); | 46 | prom_halt(); |
48 | 47 | ||
49 | prom_chosen_node = prom_finddevice("/chosen"); | 48 | prom_chosen_node = prom_finddevice(prom_chosen_path); |
50 | if (!prom_chosen_node || prom_chosen_node == -1) | 49 | if (!prom_chosen_node || prom_chosen_node == -1) |
51 | prom_halt(); | 50 | prom_halt(); |
52 | 51 | ||
@@ -90,8 +89,6 @@ void __init prom_init(void *cif_handler, void *cif_stack) | |||
90 | 89 | ||
91 | printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust); | 90 | printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust); |
92 | 91 | ||
93 | prom_meminit(); | ||
94 | |||
95 | /* Initialization successful. */ | 92 | /* Initialization successful. */ |
96 | return; | 93 | return; |
97 | 94 | ||
diff --git a/arch/sparc64/prom/map.S b/arch/sparc64/prom/map.S deleted file mode 100644 index 21b3f9c99ea7..000000000000 --- a/arch/sparc64/prom/map.S +++ /dev/null | |||
@@ -1,72 +0,0 @@ | |||
1 | /* $Id: map.S,v 1.2 1999/11/19 05:53:02 davem Exp $ | ||
2 | * map.S: Tricky coding required to fixup the kernel OBP maps | ||
3 | * properly. | ||
4 | * | ||
5 | * Copyright (C) 1999 David S. Miller (davem@redhat.com) | ||
6 | */ | ||
7 | |||
8 | .text | ||
9 | .align 8192 | ||
10 | .globl prom_boot_page | ||
11 | prom_boot_page: | ||
12 | call_method: | ||
13 | .asciz "call-method" | ||
14 | .align 8 | ||
15 | map: | ||
16 | .asciz "map" | ||
17 | .align 8 | ||
18 | |||
19 | /* When we are invoked, our caller has remapped us to | ||
20 | * page zero, therefore we must use PC relative addressing | ||
21 | * for everything after we begin performing the unmap/map | ||
22 | * calls. | ||
23 | */ | ||
24 | .globl prom_remap | ||
25 | prom_remap: /* %o0 = physpage, %o1 = virtpage, %o2 = mmu_ihandle */ | ||
26 | rd %pc, %g1 | ||
27 | srl %o2, 0, %o2 ! kill sign extension | ||
28 | sethi %hi(p1275buf), %g2 | ||
29 | or %g2, %lo(p1275buf), %g2 | ||
30 | ldx [%g2 + 0x10], %g3 ! prom_cif_stack | ||
31 | save %g3, -(192 + 128), %sp | ||
32 | ldx [%g2 + 0x08], %l0 ! prom_cif_handler | ||
33 | mov %g6, %i3 | ||
34 | mov %g4, %i4 | ||
35 | mov %g5, %i5 | ||
36 | flushw | ||
37 | |||
38 | sethi %hi(prom_remap - call_method), %g7 | ||
39 | or %g7, %lo(prom_remap - call_method), %g7 | ||
40 | sub %g1, %g7, %l2 ! call-method string | ||
41 | sethi %hi(prom_remap - map), %g7 | ||
42 | or %g7, %lo(prom_remap - map), %g7 | ||
43 | sub %g1, %g7, %l4 ! map string | ||
44 | |||
45 | /* OK, map the 4MB region we really live at. */ | ||
46 | stx %l2, [%sp + 2047 + 128 + 0x00] ! call-method | ||
47 | mov 7, %l5 | ||
48 | stx %l5, [%sp + 2047 + 128 + 0x08] ! num_args | ||
49 | mov 1, %l5 | ||
50 | stx %l5, [%sp + 2047 + 128 + 0x10] ! num_rets | ||
51 | stx %l4, [%sp + 2047 + 128 + 0x18] ! map | ||
52 | stx %i2, [%sp + 2047 + 128 + 0x20] ! mmu_ihandle | ||
53 | mov -1, %l5 | ||
54 | stx %l5, [%sp + 2047 + 128 + 0x28] ! mode == default | ||
55 | sethi %hi(4 * 1024 * 1024), %l5 | ||
56 | stx %l5, [%sp + 2047 + 128 + 0x30] ! size | ||
57 | stx %i1, [%sp + 2047 + 128 + 0x38] ! vaddr | ||
58 | stx %g0, [%sp + 2047 + 128 + 0x40] ! filler | ||
59 | stx %i0, [%sp + 2047 + 128 + 0x48] ! paddr | ||
60 | call %l0 | ||
61 | add %sp, (2047 + 128), %o0 ! argument array | ||
62 | |||
63 | /* Restore hard-coded globals. */ | ||
64 | mov %i3, %g6 | ||
65 | mov %i4, %g4 | ||
66 | mov %i5, %g5 | ||
67 | |||
68 | /* Wheee.... we are done. */ | ||
69 | ret | ||
70 | restore | ||
71 | |||
72 | .align 8192 | ||
diff --git a/arch/sparc64/prom/memory.c b/arch/sparc64/prom/memory.c deleted file mode 100644 index f4a8143e052c..000000000000 --- a/arch/sparc64/prom/memory.c +++ /dev/null | |||
@@ -1,152 +0,0 @@ | |||
1 | /* $Id: memory.c,v 1.5 1999/08/31 06:55:04 davem Exp $ | ||
2 | * memory.c: Prom routine for acquiring various bits of information | ||
3 | * about RAM on the machine, both virtual and physical. | ||
4 | * | ||
5 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
6 | * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/init.h> | ||
11 | |||
12 | #include <asm/openprom.h> | ||
13 | #include <asm/oplib.h> | ||
14 | |||
15 | /* This routine, for consistency, returns the ram parameters in the | ||
16 | * V0 prom memory descriptor format. I choose this format because I | ||
17 | * think it was the easiest to work with. I feel the religious | ||
18 | * arguments now... ;) Also, I return the linked lists sorted to | ||
19 | * prevent paging_init() upset stomach as I have not yet written | ||
20 | * the pepto-bismol kernel module yet. | ||
21 | */ | ||
22 | |||
23 | struct linux_prom64_registers prom_reg_memlist[64]; | ||
24 | struct linux_prom64_registers prom_reg_tmp[64]; | ||
25 | |||
26 | struct linux_mlist_p1275 prom_phys_total[64]; | ||
27 | struct linux_mlist_p1275 prom_prom_taken[64]; | ||
28 | struct linux_mlist_p1275 prom_phys_avail[64]; | ||
29 | |||
30 | struct linux_mlist_p1275 *prom_ptot_ptr = prom_phys_total; | ||
31 | struct linux_mlist_p1275 *prom_ptak_ptr = prom_prom_taken; | ||
32 | struct linux_mlist_p1275 *prom_pavl_ptr = prom_phys_avail; | ||
33 | |||
34 | struct linux_mem_p1275 prom_memlist; | ||
35 | |||
36 | |||
37 | /* Internal Prom library routine to sort a linux_mlist_p1275 memory | ||
38 | * list. Used below in initialization. | ||
39 | */ | ||
40 | static void __init | ||
41 | prom_sortmemlist(struct linux_mlist_p1275 *thislist) | ||
42 | { | ||
43 | int swapi = 0; | ||
44 | int i, mitr; | ||
45 | unsigned long tmpaddr, tmpsize; | ||
46 | unsigned long lowest; | ||
47 | |||
48 | for(i=0; thislist[i].theres_more; i++) { | ||
49 | lowest = thislist[i].start_adr; | ||
50 | for(mitr = i+1; thislist[mitr-1].theres_more; mitr++) | ||
51 | if(thislist[mitr].start_adr < lowest) { | ||
52 | lowest = thislist[mitr].start_adr; | ||
53 | swapi = mitr; | ||
54 | } | ||
55 | if(lowest == thislist[i].start_adr) continue; | ||
56 | tmpaddr = thislist[swapi].start_adr; | ||
57 | tmpsize = thislist[swapi].num_bytes; | ||
58 | for(mitr = swapi; mitr > i; mitr--) { | ||
59 | thislist[mitr].start_adr = thislist[mitr-1].start_adr; | ||
60 | thislist[mitr].num_bytes = thislist[mitr-1].num_bytes; | ||
61 | } | ||
62 | thislist[i].start_adr = tmpaddr; | ||
63 | thislist[i].num_bytes = tmpsize; | ||
64 | } | ||
65 | } | ||
66 | |||
67 | /* Initialize the memory lists based upon the prom version. */ | ||
68 | void __init prom_meminit(void) | ||
69 | { | ||
70 | int node = 0; | ||
71 | unsigned int iter, num_regs; | ||
72 | |||
73 | node = prom_finddevice("/memory"); | ||
74 | num_regs = prom_getproperty(node, "available", | ||
75 | (char *) prom_reg_memlist, | ||
76 | sizeof(prom_reg_memlist)); | ||
77 | num_regs = (num_regs/sizeof(struct linux_prom64_registers)); | ||
78 | for(iter=0; iter<num_regs; iter++) { | ||
79 | prom_phys_avail[iter].start_adr = | ||
80 | prom_reg_memlist[iter].phys_addr; | ||
81 | prom_phys_avail[iter].num_bytes = | ||
82 | prom_reg_memlist[iter].reg_size; | ||
83 | prom_phys_avail[iter].theres_more = | ||
84 | &prom_phys_avail[iter+1]; | ||
85 | } | ||
86 | prom_phys_avail[iter-1].theres_more = NULL; | ||
87 | |||
88 | num_regs = prom_getproperty(node, "reg", | ||
89 | (char *) prom_reg_memlist, | ||
90 | sizeof(prom_reg_memlist)); | ||
91 | num_regs = (num_regs/sizeof(struct linux_prom64_registers)); | ||
92 | for(iter=0; iter<num_regs; iter++) { | ||
93 | prom_phys_total[iter].start_adr = | ||
94 | prom_reg_memlist[iter].phys_addr; | ||
95 | prom_phys_total[iter].num_bytes = | ||
96 | prom_reg_memlist[iter].reg_size; | ||
97 | prom_phys_total[iter].theres_more = | ||
98 | &prom_phys_total[iter+1]; | ||
99 | } | ||
100 | prom_phys_total[iter-1].theres_more = NULL; | ||
101 | |||
102 | node = prom_finddevice("/virtual-memory"); | ||
103 | num_regs = prom_getproperty(node, "available", | ||
104 | (char *) prom_reg_memlist, | ||
105 | sizeof(prom_reg_memlist)); | ||
106 | num_regs = (num_regs/sizeof(struct linux_prom64_registers)); | ||
107 | |||
108 | /* Convert available virtual areas to taken virtual | ||
109 | * areas. First sort, then convert. | ||
110 | */ | ||
111 | for(iter=0; iter<num_regs; iter++) { | ||
112 | prom_prom_taken[iter].start_adr = | ||
113 | prom_reg_memlist[iter].phys_addr; | ||
114 | prom_prom_taken[iter].num_bytes = | ||
115 | prom_reg_memlist[iter].reg_size; | ||
116 | prom_prom_taken[iter].theres_more = | ||
117 | &prom_prom_taken[iter+1]; | ||
118 | } | ||
119 | prom_prom_taken[iter-1].theres_more = NULL; | ||
120 | |||
121 | prom_sortmemlist(prom_prom_taken); | ||
122 | |||
123 | /* Finally, convert. */ | ||
124 | for(iter=0; iter<num_regs; iter++) { | ||
125 | prom_prom_taken[iter].start_adr = | ||
126 | prom_prom_taken[iter].start_adr + | ||
127 | prom_prom_taken[iter].num_bytes; | ||
128 | prom_prom_taken[iter].num_bytes = | ||
129 | prom_prom_taken[iter+1].start_adr - | ||
130 | prom_prom_taken[iter].start_adr; | ||
131 | } | ||
132 | prom_prom_taken[iter-1].num_bytes = | ||
133 | -1UL - prom_prom_taken[iter-1].start_adr; | ||
134 | |||
135 | /* Sort the other two lists. */ | ||
136 | prom_sortmemlist(prom_phys_total); | ||
137 | prom_sortmemlist(prom_phys_avail); | ||
138 | |||
139 | /* Link all the lists into the top-level descriptor. */ | ||
140 | prom_memlist.p1275_totphys=&prom_ptot_ptr; | ||
141 | prom_memlist.p1275_prommap=&prom_ptak_ptr; | ||
142 | prom_memlist.p1275_available=&prom_pavl_ptr; | ||
143 | } | ||
144 | |||
145 | /* This returns a pointer to our libraries internal p1275 format | ||
146 | * memory descriptor. | ||
147 | */ | ||
148 | struct linux_mem_p1275 * | ||
149 | prom_meminfo(void) | ||
150 | { | ||
151 | return &prom_memlist; | ||
152 | } | ||
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c index 19c44e97e9ee..9b895faf077b 100644 --- a/arch/sparc64/prom/misc.c +++ b/arch/sparc64/prom/misc.c | |||
@@ -17,14 +17,14 @@ | |||
17 | #include <asm/system.h> | 17 | #include <asm/system.h> |
18 | 18 | ||
19 | /* Reset and reboot the machine with the command 'bcommand'. */ | 19 | /* Reset and reboot the machine with the command 'bcommand'. */ |
20 | void prom_reboot(char *bcommand) | 20 | void prom_reboot(const char *bcommand) |
21 | { | 21 | { |
22 | p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) | | 22 | p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) | |
23 | P1275_INOUT(1, 0), bcommand); | 23 | P1275_INOUT(1, 0), bcommand); |
24 | } | 24 | } |
25 | 25 | ||
26 | /* Forth evaluate the expression contained in 'fstring'. */ | 26 | /* Forth evaluate the expression contained in 'fstring'. */ |
27 | void prom_feval(char *fstring) | 27 | void prom_feval(const char *fstring) |
28 | { | 28 | { |
29 | if (!fstring || fstring[0] == 0) | 29 | if (!fstring || fstring[0] == 0) |
30 | return; | 30 | return; |
@@ -148,21 +148,19 @@ void prom_set_trap_table(unsigned long tba) | |||
148 | p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba); | 148 | p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba); |
149 | } | 149 | } |
150 | 150 | ||
151 | int mmu_ihandle_cache = 0; | ||
152 | |||
153 | int prom_get_mmu_ihandle(void) | 151 | int prom_get_mmu_ihandle(void) |
154 | { | 152 | { |
155 | int node, ret; | 153 | int node, ret; |
156 | 154 | ||
157 | if (mmu_ihandle_cache != 0) | 155 | if (prom_mmu_ihandle_cache != 0) |
158 | return mmu_ihandle_cache; | 156 | return prom_mmu_ihandle_cache; |
159 | 157 | ||
160 | node = prom_finddevice("/chosen"); | 158 | node = prom_finddevice(prom_chosen_path); |
161 | ret = prom_getint(node, "mmu"); | 159 | ret = prom_getint(node, prom_mmu_name); |
162 | if (ret == -1 || ret == 0) | 160 | if (ret == -1 || ret == 0) |
163 | mmu_ihandle_cache = -1; | 161 | prom_mmu_ihandle_cache = -1; |
164 | else | 162 | else |
165 | mmu_ihandle_cache = ret; | 163 | prom_mmu_ihandle_cache = ret; |
166 | 164 | ||
167 | return ret; | 165 | return ret; |
168 | } | 166 | } |
@@ -190,7 +188,7 @@ long prom_itlb_load(unsigned long index, | |||
190 | unsigned long tte_data, | 188 | unsigned long tte_data, |
191 | unsigned long vaddr) | 189 | unsigned long vaddr) |
192 | { | 190 | { |
193 | return p1275_cmd("call-method", | 191 | return p1275_cmd(prom_callmethod_name, |
194 | (P1275_ARG(0, P1275_ARG_IN_STRING) | | 192 | (P1275_ARG(0, P1275_ARG_IN_STRING) | |
195 | P1275_ARG(2, P1275_ARG_IN_64B) | | 193 | P1275_ARG(2, P1275_ARG_IN_64B) | |
196 | P1275_ARG(3, P1275_ARG_IN_64B) | | 194 | P1275_ARG(3, P1275_ARG_IN_64B) | |
@@ -207,7 +205,7 @@ long prom_dtlb_load(unsigned long index, | |||
207 | unsigned long tte_data, | 205 | unsigned long tte_data, |
208 | unsigned long vaddr) | 206 | unsigned long vaddr) |
209 | { | 207 | { |
210 | return p1275_cmd("call-method", | 208 | return p1275_cmd(prom_callmethod_name, |
211 | (P1275_ARG(0, P1275_ARG_IN_STRING) | | 209 | (P1275_ARG(0, P1275_ARG_IN_STRING) | |
212 | P1275_ARG(2, P1275_ARG_IN_64B) | | 210 | P1275_ARG(2, P1275_ARG_IN_64B) | |
213 | P1275_ARG(3, P1275_ARG_IN_64B) | | 211 | P1275_ARG(3, P1275_ARG_IN_64B) | |
@@ -223,13 +221,13 @@ long prom_dtlb_load(unsigned long index, | |||
223 | int prom_map(int mode, unsigned long size, | 221 | int prom_map(int mode, unsigned long size, |
224 | unsigned long vaddr, unsigned long paddr) | 222 | unsigned long vaddr, unsigned long paddr) |
225 | { | 223 | { |
226 | int ret = p1275_cmd("call-method", | 224 | int ret = p1275_cmd(prom_callmethod_name, |
227 | (P1275_ARG(0, P1275_ARG_IN_STRING) | | 225 | (P1275_ARG(0, P1275_ARG_IN_STRING) | |
228 | P1275_ARG(3, P1275_ARG_IN_64B) | | 226 | P1275_ARG(3, P1275_ARG_IN_64B) | |
229 | P1275_ARG(4, P1275_ARG_IN_64B) | | 227 | P1275_ARG(4, P1275_ARG_IN_64B) | |
230 | P1275_ARG(6, P1275_ARG_IN_64B) | | 228 | P1275_ARG(6, P1275_ARG_IN_64B) | |
231 | P1275_INOUT(7, 1)), | 229 | P1275_INOUT(7, 1)), |
232 | "map", | 230 | prom_map_name, |
233 | prom_get_mmu_ihandle(), | 231 | prom_get_mmu_ihandle(), |
234 | mode, | 232 | mode, |
235 | size, | 233 | size, |
@@ -244,12 +242,12 @@ int prom_map(int mode, unsigned long size, | |||
244 | 242 | ||
245 | void prom_unmap(unsigned long size, unsigned long vaddr) | 243 | void prom_unmap(unsigned long size, unsigned long vaddr) |
246 | { | 244 | { |
247 | p1275_cmd("call-method", | 245 | p1275_cmd(prom_callmethod_name, |
248 | (P1275_ARG(0, P1275_ARG_IN_STRING) | | 246 | (P1275_ARG(0, P1275_ARG_IN_STRING) | |
249 | P1275_ARG(2, P1275_ARG_IN_64B) | | 247 | P1275_ARG(2, P1275_ARG_IN_64B) | |
250 | P1275_ARG(3, P1275_ARG_IN_64B) | | 248 | P1275_ARG(3, P1275_ARG_IN_64B) | |
251 | P1275_INOUT(4, 0)), | 249 | P1275_INOUT(4, 0)), |
252 | "unmap", | 250 | prom_unmap_name, |
253 | prom_get_mmu_ihandle(), | 251 | prom_get_mmu_ihandle(), |
254 | size, | 252 | size, |
255 | vaddr); | 253 | vaddr); |
@@ -258,7 +256,7 @@ void prom_unmap(unsigned long size, unsigned long vaddr) | |||
258 | /* Set aside physical memory which is not touched or modified | 256 | /* Set aside physical memory which is not touched or modified |
259 | * across soft resets. | 257 | * across soft resets. |
260 | */ | 258 | */ |
261 | unsigned long prom_retain(char *name, | 259 | unsigned long prom_retain(const char *name, |
262 | unsigned long pa_low, unsigned long pa_high, | 260 | unsigned long pa_low, unsigned long pa_high, |
263 | long size, long align) | 261 | long size, long align) |
264 | { | 262 | { |
@@ -290,7 +288,7 @@ int prom_getunumber(int syndrome_code, | |||
290 | unsigned long phys_addr, | 288 | unsigned long phys_addr, |
291 | char *buf, int buflen) | 289 | char *buf, int buflen) |
292 | { | 290 | { |
293 | return p1275_cmd("call-method", | 291 | return p1275_cmd(prom_callmethod_name, |
294 | (P1275_ARG(0, P1275_ARG_IN_STRING) | | 292 | (P1275_ARG(0, P1275_ARG_IN_STRING) | |
295 | P1275_ARG(3, P1275_ARG_OUT_BUF) | | 293 | P1275_ARG(3, P1275_ARG_OUT_BUF) | |
296 | P1275_ARG(6, P1275_ARG_IN_64B) | | 294 | P1275_ARG(6, P1275_ARG_IN_64B) | |
diff --git a/arch/sparc64/prom/p1275.c b/arch/sparc64/prom/p1275.c index 59fe38bba39e..a5a7c5712028 100644 --- a/arch/sparc64/prom/p1275.c +++ b/arch/sparc64/prom/p1275.c | |||
@@ -46,7 +46,7 @@ static inline unsigned long spitfire_get_primary_context(void) | |||
46 | */ | 46 | */ |
47 | DEFINE_SPINLOCK(prom_entry_lock); | 47 | DEFINE_SPINLOCK(prom_entry_lock); |
48 | 48 | ||
49 | long p1275_cmd (char *service, long fmt, ...) | 49 | long p1275_cmd(const char *service, long fmt, ...) |
50 | { | 50 | { |
51 | char *p, *q; | 51 | char *p, *q; |
52 | unsigned long flags; | 52 | unsigned long flags; |
diff --git a/arch/sparc64/prom/printf.c b/arch/sparc64/prom/printf.c index a6df82cafa0d..660943ee4c2a 100644 --- a/arch/sparc64/prom/printf.c +++ b/arch/sparc64/prom/printf.c | |||
@@ -34,7 +34,7 @@ prom_write(const char *buf, unsigned int n) | |||
34 | } | 34 | } |
35 | 35 | ||
36 | void | 36 | void |
37 | prom_printf(char *fmt, ...) | 37 | prom_printf(const char *fmt, ...) |
38 | { | 38 | { |
39 | va_list args; | 39 | va_list args; |
40 | int i; | 40 | int i; |
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c index ccf73258ebf7..b1ff9e87dcc6 100644 --- a/arch/sparc64/prom/tree.c +++ b/arch/sparc64/prom/tree.c | |||
@@ -69,7 +69,7 @@ prom_getsibling(int node) | |||
69 | * Return -1 on error. | 69 | * Return -1 on error. |
70 | */ | 70 | */ |
71 | __inline__ int | 71 | __inline__ int |
72 | prom_getproplen(int node, char *prop) | 72 | prom_getproplen(int node, const char *prop) |
73 | { | 73 | { |
74 | if((!node) || (!prop)) return -1; | 74 | if((!node) || (!prop)) return -1; |
75 | return p1275_cmd ("getproplen", | 75 | return p1275_cmd ("getproplen", |
@@ -83,20 +83,20 @@ prom_getproplen(int node, char *prop) | |||
83 | * was successful the length will be returned, else -1 is returned. | 83 | * was successful the length will be returned, else -1 is returned. |
84 | */ | 84 | */ |
85 | __inline__ int | 85 | __inline__ int |
86 | prom_getproperty(int node, char *prop, char *buffer, int bufsize) | 86 | prom_getproperty(int node, const char *prop, char *buffer, int bufsize) |
87 | { | 87 | { |
88 | int plen; | 88 | int plen; |
89 | 89 | ||
90 | plen = prom_getproplen(node, prop); | 90 | plen = prom_getproplen(node, prop); |
91 | if((plen > bufsize) || (plen == 0) || (plen == -1)) | 91 | if ((plen > bufsize) || (plen == 0) || (plen == -1)) { |
92 | return -1; | 92 | return -1; |
93 | else { | 93 | } else { |
94 | /* Ok, things seem all right. */ | 94 | /* Ok, things seem all right. */ |
95 | return p1275_cmd ("getprop", | 95 | return p1275_cmd(prom_getprop_name, |
96 | P1275_ARG(1,P1275_ARG_IN_STRING)| | 96 | P1275_ARG(1,P1275_ARG_IN_STRING)| |
97 | P1275_ARG(2,P1275_ARG_OUT_BUF)| | 97 | P1275_ARG(2,P1275_ARG_OUT_BUF)| |
98 | P1275_INOUT(4, 1), | 98 | P1275_INOUT(4, 1), |
99 | node, prop, buffer, P1275_SIZE(plen)); | 99 | node, prop, buffer, P1275_SIZE(plen)); |
100 | } | 100 | } |
101 | } | 101 | } |
102 | 102 | ||
@@ -104,7 +104,7 @@ prom_getproperty(int node, char *prop, char *buffer, int bufsize) | |||
104 | * on failure. | 104 | * on failure. |
105 | */ | 105 | */ |
106 | __inline__ int | 106 | __inline__ int |
107 | prom_getint(int node, char *prop) | 107 | prom_getint(int node, const char *prop) |
108 | { | 108 | { |
109 | int intprop; | 109 | int intprop; |
110 | 110 | ||
@@ -119,7 +119,7 @@ prom_getint(int node, char *prop) | |||
119 | */ | 119 | */ |
120 | 120 | ||
121 | int | 121 | int |
122 | prom_getintdefault(int node, char *property, int deflt) | 122 | prom_getintdefault(int node, const char *property, int deflt) |
123 | { | 123 | { |
124 | int retval; | 124 | int retval; |
125 | 125 | ||
@@ -131,7 +131,7 @@ prom_getintdefault(int node, char *property, int deflt) | |||
131 | 131 | ||
132 | /* Acquire a boolean property, 1=TRUE 0=FALSE. */ | 132 | /* Acquire a boolean property, 1=TRUE 0=FALSE. */ |
133 | int | 133 | int |
134 | prom_getbool(int node, char *prop) | 134 | prom_getbool(int node, const char *prop) |
135 | { | 135 | { |
136 | int retval; | 136 | int retval; |
137 | 137 | ||
@@ -145,7 +145,7 @@ prom_getbool(int node, char *prop) | |||
145 | * buffer. | 145 | * buffer. |
146 | */ | 146 | */ |
147 | void | 147 | void |
148 | prom_getstring(int node, char *prop, char *user_buf, int ubuf_size) | 148 | prom_getstring(int node, const char *prop, char *user_buf, int ubuf_size) |
149 | { | 149 | { |
150 | int len; | 150 | int len; |
151 | 151 | ||
@@ -160,7 +160,7 @@ prom_getstring(int node, char *prop, char *user_buf, int ubuf_size) | |||
160 | * YES = 1 NO = 0 | 160 | * YES = 1 NO = 0 |
161 | */ | 161 | */ |
162 | int | 162 | int |
163 | prom_nodematch(int node, char *name) | 163 | prom_nodematch(int node, const char *name) |
164 | { | 164 | { |
165 | char namebuf[128]; | 165 | char namebuf[128]; |
166 | prom_getproperty(node, "name", namebuf, sizeof(namebuf)); | 166 | prom_getproperty(node, "name", namebuf, sizeof(namebuf)); |
@@ -172,7 +172,7 @@ prom_nodematch(int node, char *name) | |||
172 | * 'nodename'. Return node if successful, zero if not. | 172 | * 'nodename'. Return node if successful, zero if not. |
173 | */ | 173 | */ |
174 | int | 174 | int |
175 | prom_searchsiblings(int node_start, char *nodename) | 175 | prom_searchsiblings(int node_start, const char *nodename) |
176 | { | 176 | { |
177 | 177 | ||
178 | int thisnode, error; | 178 | int thisnode, error; |
@@ -294,7 +294,7 @@ prom_firstprop(int node, char *buffer) | |||
294 | * property types for this node. | 294 | * property types for this node. |
295 | */ | 295 | */ |
296 | __inline__ char * | 296 | __inline__ char * |
297 | prom_nextprop(int node, char *oprop, char *buffer) | 297 | prom_nextprop(int node, const char *oprop, char *buffer) |
298 | { | 298 | { |
299 | char buf[32]; | 299 | char buf[32]; |
300 | 300 | ||
@@ -314,15 +314,17 @@ prom_nextprop(int node, char *oprop, char *buffer) | |||
314 | } | 314 | } |
315 | 315 | ||
316 | int | 316 | int |
317 | prom_finddevice(char *name) | 317 | prom_finddevice(const char *name) |
318 | { | 318 | { |
319 | if(!name) return 0; | 319 | if (!name) |
320 | return p1275_cmd ("finddevice", P1275_ARG(0,P1275_ARG_IN_STRING)| | 320 | return 0; |
321 | P1275_INOUT(1, 1), | 321 | return p1275_cmd(prom_finddev_name, |
322 | name); | 322 | P1275_ARG(0,P1275_ARG_IN_STRING)| |
323 | P1275_INOUT(1, 1), | ||
324 | name); | ||
323 | } | 325 | } |
324 | 326 | ||
325 | int prom_node_has_property(int node, char *prop) | 327 | int prom_node_has_property(int node, const char *prop) |
326 | { | 328 | { |
327 | char buf [32]; | 329 | char buf [32]; |
328 | 330 | ||
@@ -339,7 +341,7 @@ int prom_node_has_property(int node, char *prop) | |||
339 | * of 'size' bytes. Return the number of bytes the prom accepted. | 341 | * of 'size' bytes. Return the number of bytes the prom accepted. |
340 | */ | 342 | */ |
341 | int | 343 | int |
342 | prom_setprop(int node, char *pname, char *value, int size) | 344 | prom_setprop(int node, const char *pname, char *value, int size) |
343 | { | 345 | { |
344 | if(size == 0) return 0; | 346 | if(size == 0) return 0; |
345 | if((pname == 0) || (value == 0)) return 0; | 347 | if((pname == 0) || (value == 0)) return 0; |
@@ -364,7 +366,7 @@ prom_inst2pkg(int inst) | |||
364 | * FIXME: Should work for v0 as well | 366 | * FIXME: Should work for v0 as well |
365 | */ | 367 | */ |
366 | int | 368 | int |
367 | prom_pathtoinode(char *path) | 369 | prom_pathtoinode(const char *path) |
368 | { | 370 | { |
369 | int node, inst; | 371 | int node, inst; |
370 | 372 | ||