aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/Kconfig.debug14
-rw-r--r--arch/sparc64/kernel/cpu.c4
-rw-r--r--arch/sparc64/kernel/devices.c22
-rw-r--r--arch/sparc64/kernel/dtlb_backend.S13
-rw-r--r--arch/sparc64/kernel/dtlb_base.S18
-rw-r--r--arch/sparc64/kernel/dtlb_prot.S12
-rw-r--r--arch/sparc64/kernel/entry.S263
-rw-r--r--arch/sparc64/kernel/etrap.S51
-rw-r--r--arch/sparc64/kernel/head.S748
-rw-r--r--arch/sparc64/kernel/irq.c1
-rw-r--r--arch/sparc64/kernel/itlb_base.S26
-rw-r--r--arch/sparc64/kernel/ktlb.S194
-rw-r--r--arch/sparc64/kernel/pci_iommu.c363
-rw-r--r--arch/sparc64/kernel/pci_psycho.c44
-rw-r--r--arch/sparc64/kernel/pci_sabre.c39
-rw-r--r--arch/sparc64/kernel/pci_schizo.c59
-rw-r--r--arch/sparc64/kernel/power.c64
-rw-r--r--arch/sparc64/kernel/ptrace.c21
-rw-r--r--arch/sparc64/kernel/rtrap.S30
-rw-r--r--arch/sparc64/kernel/setup.c58
-rw-r--r--arch/sparc64/kernel/smp.c28
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c3
-rw-r--r--arch/sparc64/kernel/sys32.S170
-rw-r--r--arch/sparc64/kernel/trampoline.S31
-rw-r--r--arch/sparc64/kernel/traps.c100
-rw-r--r--arch/sparc64/kernel/una_asm.S67
-rw-r--r--arch/sparc64/kernel/unaligned.c101
-rw-r--r--arch/sparc64/kernel/us3_cpufreq.c5
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S3
-rw-r--r--arch/sparc64/kernel/winfixup.S33
-rw-r--r--arch/sparc64/lib/Makefile2
-rw-r--r--arch/sparc64/lib/VISsave.S8
-rw-r--r--arch/sparc64/lib/dec_and_lock.S80
-rw-r--r--arch/sparc64/lib/strncpy_from_user.S16
-rw-r--r--arch/sparc64/lib/user_fixup.c63
-rw-r--r--arch/sparc64/mm/Makefile2
-rw-r--r--arch/sparc64/mm/extable.c80
-rw-r--r--arch/sparc64/mm/fault.c69
-rw-r--r--arch/sparc64/mm/init.c868
-rw-r--r--arch/sparc64/mm/ultra.S112
-rw-r--r--arch/sparc64/prom/Makefile4
-rw-r--r--arch/sparc64/prom/console.c2
-rw-r--r--arch/sparc64/prom/devops.c2
-rw-r--r--arch/sparc64/prom/init.c5
-rw-r--r--arch/sparc64/prom/map.S72
-rw-r--r--arch/sparc64/prom/memory.c152
-rw-r--r--arch/sparc64/prom/misc.c46
-rw-r--r--arch/sparc64/prom/p1275.c2
-rw-r--r--arch/sparc64/prom/printf.c2
-rw-r--r--arch/sparc64/prom/tree.c50
50 files changed, 1614 insertions, 2608 deletions
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
index cd8d39fb954d..fa06ea04837b 100644
--- a/arch/sparc64/Kconfig.debug
+++ b/arch/sparc64/Kconfig.debug
@@ -33,13 +33,13 @@ config DEBUG_BOOTMEM
33 depends on DEBUG_KERNEL 33 depends on DEBUG_KERNEL
34 bool "Debug BOOTMEM initialization" 34 bool "Debug BOOTMEM initialization"
35 35
36# We have a custom atomic_dec_and_lock() implementation but it's not 36config DEBUG_PAGEALLOC
37# compatible with spinlock debugging so we need to fall back on 37 bool "Page alloc debugging"
38# the generic version in that case. 38 depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND
39config HAVE_DEC_LOCK 39 help
40 bool 40 Unmap pages from the kernel linear mapping after free_pages().
41 depends on SMP && !DEBUG_SPINLOCK 41 This results in a large slowdown, but helps to find certain types
42 default y 42 of memory corruptions.
43 43
44config MCOUNT 44config MCOUNT
45 bool 45 bool
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
index 48756958116b..77ef5df4e5a7 100644
--- a/arch/sparc64/kernel/cpu.c
+++ b/arch/sparc64/kernel/cpu.c
@@ -39,6 +39,8 @@ struct cpu_fp_info linux_sparc_fpu[] = {
39 { 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"}, 39 { 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"},
40 { 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"}, 40 { 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"},
41 { 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"}, 41 { 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"},
42 { 0x3e, 0x19, 0, "UltraSparc IV+ integrated FPU"},
43 { 0x3e, 0x22, 0, "UltraSparc IIIi+ integrated FPU"},
42}; 44};
43 45
44#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info)) 46#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info))
@@ -53,6 +55,8 @@ struct cpu_iu_info linux_sparc_chips[] = {
53 { 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"}, 55 { 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"},
54 { 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"}, 56 { 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"},
55 { 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"}, 57 { 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"},
58 { 0x3e, 0x19, "TI UltraSparc IV+ (Panther)"},
59 { 0x3e, 0x22, "TI UltraSparc IIIi+ (Serrano)"},
56}; 60};
57 61
58#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info)) 62#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
index d710274e516b..df9a1ca8fd77 100644
--- a/arch/sparc64/kernel/devices.c
+++ b/arch/sparc64/kernel/devices.c
@@ -135,6 +135,28 @@ void __init device_scan(void)
135 cpu_data(0).clock_tick = prom_getintdefault(cpu_node, 135 cpu_data(0).clock_tick = prom_getintdefault(cpu_node,
136 "clock-frequency", 136 "clock-frequency",
137 0); 137 0);
138 cpu_data(0).dcache_size = prom_getintdefault(cpu_node,
139 "dcache-size",
140 16 * 1024);
141 cpu_data(0).dcache_line_size =
142 prom_getintdefault(cpu_node, "dcache-line-size", 32);
143 cpu_data(0).icache_size = prom_getintdefault(cpu_node,
144 "icache-size",
145 16 * 1024);
146 cpu_data(0).icache_line_size =
147 prom_getintdefault(cpu_node, "icache-line-size", 32);
148 cpu_data(0).ecache_size = prom_getintdefault(cpu_node,
149 "ecache-size",
150 4 * 1024 * 1024);
151 cpu_data(0).ecache_line_size =
152 prom_getintdefault(cpu_node, "ecache-line-size", 64);
153 printk("CPU[0]: Caches "
154 "D[sz(%d):line_sz(%d)] "
155 "I[sz(%d):line_sz(%d)] "
156 "E[sz(%d):line_sz(%d)]\n",
157 cpu_data(0).dcache_size, cpu_data(0).dcache_line_size,
158 cpu_data(0).icache_size, cpu_data(0).icache_line_size,
159 cpu_data(0).ecache_size, cpu_data(0).ecache_line_size);
138 } 160 }
139#endif 161#endif
140 162
diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S
index 538522848ad4..acc889a7f9c1 100644
--- a/arch/sparc64/kernel/dtlb_backend.S
+++ b/arch/sparc64/kernel/dtlb_backend.S
@@ -9,17 +9,7 @@
9#include <asm/pgtable.h> 9#include <asm/pgtable.h>
10#include <asm/mmu.h> 10#include <asm/mmu.h>
11 11
12#if PAGE_SHIFT == 13 12#define VALID_SZ_BITS (_PAGE_VALID | _PAGE_SZBITS)
13#define SZ_BITS _PAGE_SZ8K
14#elif PAGE_SHIFT == 16
15#define SZ_BITS _PAGE_SZ64K
16#elif PAGE_SHIFT == 19
17#define SZ_BITS _PAGE_SZ512K
18#elif PAGE_SHIFT == 22
19#define SZ_BITS _PAGE_SZ4MB
20#endif
21
22#define VALID_SZ_BITS (_PAGE_VALID | SZ_BITS)
23 13
24#define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P ) 14#define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P )
25#define VPTE_SHIFT (PAGE_SHIFT - 3) 15#define VPTE_SHIFT (PAGE_SHIFT - 3)
@@ -163,7 +153,6 @@ sparc64_vpte_continue:
163 stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS 153 stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS
164 retry ! Load PTE once again 154 retry ! Load PTE once again
165 155
166#undef SZ_BITS
167#undef VALID_SZ_BITS 156#undef VALID_SZ_BITS
168#undef VPTE_SHIFT 157#undef VPTE_SHIFT
169#undef VPTE_BITS 158#undef VPTE_BITS
diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S
index ded2fed23fcc..6528786840c0 100644
--- a/arch/sparc64/kernel/dtlb_base.S
+++ b/arch/sparc64/kernel/dtlb_base.S
@@ -53,39 +53,36 @@
53 * be guaranteed to be 0 ... mmu_context.h does guarantee this 53 * be guaranteed to be 0 ... mmu_context.h does guarantee this
54 * by only using 10 bits in the hwcontext value. 54 * by only using 10 bits in the hwcontext value.
55 */ 55 */
56#define CREATE_VPTE_OFFSET1(r1, r2) 56#define CREATE_VPTE_OFFSET1(r1, r2) nop
57#define CREATE_VPTE_OFFSET2(r1, r2) \ 57#define CREATE_VPTE_OFFSET2(r1, r2) \
58 srax r1, 10, r2 58 srax r1, 10, r2
59#define CREATE_VPTE_NOP nop
60#else 59#else
61#define CREATE_VPTE_OFFSET1(r1, r2) \ 60#define CREATE_VPTE_OFFSET1(r1, r2) \
62 srax r1, PAGE_SHIFT, r2 61 srax r1, PAGE_SHIFT, r2
63#define CREATE_VPTE_OFFSET2(r1, r2) \ 62#define CREATE_VPTE_OFFSET2(r1, r2) \
64 sllx r2, 3, r2 63 sllx r2, 3, r2
65#define CREATE_VPTE_NOP
66#endif 64#endif
67 65
68/* DTLB ** ICACHE line 1: Quick user TLB misses */ 66/* DTLB ** ICACHE line 1: Quick user TLB misses */
67 mov TLB_SFSR, %g1
69 ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS 68 ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
70 andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus? 69 andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus?
71from_tl1_trap: 70from_tl1_trap:
72 rdpr %tl, %g5 ! For TL==3 test 71 rdpr %tl, %g5 ! For TL==3 test
73 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset 72 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
74 be,pn %xcc, 3f ! Yep, special processing 73 be,pn %xcc, kvmap ! Yep, special processing
75 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset 74 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
76 cmp %g5, 4 ! Last trap level? 75 cmp %g5, 4 ! Last trap level?
77 be,pn %xcc, longpath ! Yep, cannot risk VPTE miss
78 nop ! delay slot
79 76
80/* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */ 77/* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */
78 be,pn %xcc, longpath ! Yep, cannot risk VPTE miss
79 nop ! delay slot
81 ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE 80 ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE
821: brgez,pn %g5, longpath ! Invalid, branch out 811: brgez,pn %g5, longpath ! Invalid, branch out
83 nop ! Delay-slot 82 nop ! Delay-slot
849: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB 839: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
85 retry ! Trap return 84 retry ! Trap return
863: brlz,pt %g4, 9b ! Kernel virtual map? 85 nop
87 xor %g2, %g4, %g5 ! Finish bit twiddles
88 ba,a,pt %xcc, kvmap ! Yep, go check for obp/vmalloc
89 86
90/* DTLB ** ICACHE line 3: winfixups+real_faults */ 87/* DTLB ** ICACHE line 3: winfixups+real_faults */
91longpath: 88longpath:
@@ -106,8 +103,7 @@ longpath:
106 nop 103 nop
107 nop 104 nop
108 nop 105 nop
109 CREATE_VPTE_NOP 106 nop
110 107
111#undef CREATE_VPTE_OFFSET1 108#undef CREATE_VPTE_OFFSET1
112#undef CREATE_VPTE_OFFSET2 109#undef CREATE_VPTE_OFFSET2
113#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/dtlb_prot.S b/arch/sparc64/kernel/dtlb_prot.S
index d848bb7374bb..e0a920162604 100644
--- a/arch/sparc64/kernel/dtlb_prot.S
+++ b/arch/sparc64/kernel/dtlb_prot.S
@@ -14,14 +14,14 @@
14 */ 14 */
15 15
16/* PROT ** ICACHE line 1: User DTLB protection trap */ 16/* PROT ** ICACHE line 1: User DTLB protection trap */
17 stxa %g0, [%g1] ASI_DMMU ! Clear SFSR FaultValid bit 17 mov TLB_SFSR, %g1
18 membar #Sync ! Synchronize ASI stores 18 stxa %g0, [%g1] ASI_DMMU ! Clear FaultValid bit
19 rdpr %pstate, %g5 ! Move into alternate globals 19 membar #Sync ! Synchronize stores
20 rdpr %pstate, %g5 ! Move into alt-globals
20 wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate 21 wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
21 rdpr %tl, %g1 ! Need to do a winfixup? 22 rdpr %tl, %g1 ! Need a winfixup?
22 cmp %g1, 1 ! Trap level >1? 23 cmp %g1, 1 ! Trap level >1?
23 mov TLB_TAG_ACCESS, %g4 ! Prepare reload of vaddr 24 mov TLB_TAG_ACCESS, %g4 ! For reload of vaddr
24 nop
25 25
26/* PROT ** ICACHE line 2: More real fault processing */ 26/* PROT ** ICACHE line 2: More real fault processing */
27 bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup 27 bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 3e0badb820c5..11a848402fb1 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -30,164 +30,10 @@
30 .text 30 .text
31 .align 32 31 .align 32
32 32
33 .globl sparc64_vpte_patchme1
34 .globl sparc64_vpte_patchme2
35/*
36 * On a second level vpte miss, check whether the original fault is to the OBP
37 * range (note that this is only possible for instruction miss, data misses to
38 * obp range do not use vpte). If so, go back directly to the faulting address.
39 * This is because we want to read the tpc, otherwise we have no way of knowing
40 * the 8k aligned faulting address if we are using >8k kernel pagesize. This
41 * also ensures no vpte range addresses are dropped into tlb while obp is
42 * executing (see inherit_locked_prom_mappings() rant).
43 */
44sparc64_vpte_nucleus:
45 /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
46 mov 0xf, %g5
47 sllx %g5, 28, %g5
48
49 /* Is addr >= LOW_OBP_ADDRESS? */
50 cmp %g4, %g5
51 blu,pn %xcc, sparc64_vpte_patchme1
52 mov 0x1, %g5
53
54 /* Load 0x100000000, which is HI_OBP_ADDRESS. */
55 sllx %g5, 32, %g5
56
57 /* Is addr < HI_OBP_ADDRESS? */
58 cmp %g4, %g5
59 blu,pn %xcc, obp_iaddr_patch
60 nop
61
62 /* These two instructions are patched by paginig_init(). */
63sparc64_vpte_patchme1:
64 sethi %hi(0), %g5
65sparc64_vpte_patchme2:
66 or %g5, %lo(0), %g5
67
68 /* With kernel PGD in %g5, branch back into dtlb_backend. */
69 ba,pt %xcc, sparc64_kpte_continue
70 andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
71
72vpte_noent:
73 /* Restore previous TAG_ACCESS, %g5 is zero, and we will
74 * skip over the trap instruction so that the top level
75 * TLB miss handler will thing this %g5 value is just an
76 * invalid PTE, thus branching to full fault processing.
77 */
78 mov TLB_SFSR, %g1
79 stxa %g4, [%g1 + %g1] ASI_DMMU
80 done
81
82 .globl obp_iaddr_patch
83obp_iaddr_patch:
84 /* These two instructions patched by inherit_prom_mappings(). */
85 sethi %hi(0), %g5
86 or %g5, %lo(0), %g5
87
88 /* Behave as if we are at TL0. */
89 wrpr %g0, 1, %tl
90 rdpr %tpc, %g4 /* Find original faulting iaddr */
91 srlx %g4, 13, %g4 /* Throw out context bits */
92 sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
93
94 /* Restore previous TAG_ACCESS. */
95 mov TLB_SFSR, %g1
96 stxa %g4, [%g1 + %g1] ASI_IMMU
97
98 /* Get PMD offset. */
99 srlx %g4, 23, %g6
100 and %g6, 0x7ff, %g6
101 sllx %g6, 2, %g6
102
103 /* Load PMD, is it valid? */
104 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
105 brz,pn %g5, longpath
106 sllx %g5, 11, %g5
107
108 /* Get PTE offset. */
109 srlx %g4, 13, %g6
110 and %g6, 0x3ff, %g6
111 sllx %g6, 3, %g6
112
113 /* Load PTE. */
114 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
115 brgez,pn %g5, longpath
116 nop
117
118 /* TLB load and return from trap. */
119 stxa %g5, [%g0] ASI_ITLB_DATA_IN
120 retry
121
122 .globl obp_daddr_patch
123obp_daddr_patch:
124 /* These two instructions patched by inherit_prom_mappings(). */
125 sethi %hi(0), %g5
126 or %g5, %lo(0), %g5
127
128 /* Get PMD offset. */
129 srlx %g4, 23, %g6
130 and %g6, 0x7ff, %g6
131 sllx %g6, 2, %g6
132
133 /* Load PMD, is it valid? */
134 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
135 brz,pn %g5, longpath
136 sllx %g5, 11, %g5
137
138 /* Get PTE offset. */
139 srlx %g4, 13, %g6
140 and %g6, 0x3ff, %g6
141 sllx %g6, 3, %g6
142
143 /* Load PTE. */
144 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
145 brgez,pn %g5, longpath
146 nop
147
148 /* TLB load and return from trap. */
149 stxa %g5, [%g0] ASI_DTLB_DATA_IN
150 retry
151
152/*
153 * On a first level data miss, check whether this is to the OBP range (note
154 * that such accesses can be made by prom, as well as by kernel using
155 * prom_getproperty on "address"), and if so, do not use vpte access ...
156 * rather, use information saved during inherit_prom_mappings() using 8k
157 * pagesize.
158 */
159kvmap:
160 /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
161 mov 0xf, %g5
162 sllx %g5, 28, %g5
163
164 /* Is addr >= LOW_OBP_ADDRESS? */
165 cmp %g4, %g5
166 blu,pn %xcc, vmalloc_addr
167 mov 0x1, %g5
168
169 /* Load 0x100000000, which is HI_OBP_ADDRESS. */
170 sllx %g5, 32, %g5
171
172 /* Is addr < HI_OBP_ADDRESS? */
173 cmp %g4, %g5
174 blu,pn %xcc, obp_daddr_patch
175 nop
176
177vmalloc_addr:
178 /* If we get here, a vmalloc addr accessed, load kernel VPTE. */
179 ldxa [%g3 + %g6] ASI_N, %g5
180 brgez,pn %g5, longpath
181 nop
182
183 /* PTE is valid, load into TLB and return from trap. */
184 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
185 retry
186
187 /* This is trivial with the new code... */ 33 /* This is trivial with the new code... */
188 .globl do_fpdis 34 .globl do_fpdis
189do_fpdis: 35do_fpdis:
190 sethi %hi(TSTATE_PEF), %g4 ! IEU0 36 sethi %hi(TSTATE_PEF), %g4
191 rdpr %tstate, %g5 37 rdpr %tstate, %g5
192 andcc %g5, %g4, %g0 38 andcc %g5, %g4, %g0
193 be,pt %xcc, 1f 39 be,pt %xcc, 1f
@@ -204,18 +50,18 @@ do_fpdis:
204 add %g0, %g0, %g0 50 add %g0, %g0, %g0
205 ba,a,pt %xcc, rtrap_clr_l6 51 ba,a,pt %xcc, rtrap_clr_l6
206 52
2071: ldub [%g6 + TI_FPSAVED], %g5 ! Load Group 531: ldub [%g6 + TI_FPSAVED], %g5
208 wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles 54 wr %g0, FPRS_FEF, %fprs
209 andcc %g5, FPRS_FEF, %g0 ! IEU1 Group 55 andcc %g5, FPRS_FEF, %g0
210 be,a,pt %icc, 1f ! CTI 56 be,a,pt %icc, 1f
211 clr %g7 ! IEU0 57 clr %g7
212 ldx [%g6 + TI_GSR], %g7 ! Load Group 58 ldx [%g6 + TI_GSR], %g7
2131: andcc %g5, FPRS_DL, %g0 ! IEU1 591: andcc %g5, FPRS_DL, %g0
214 bne,pn %icc, 2f ! CTI 60 bne,pn %icc, 2f
215 fzero %f0 ! FPA 61 fzero %f0
216 andcc %g5, FPRS_DU, %g0 ! IEU1 Group 62 andcc %g5, FPRS_DU, %g0
217 bne,pn %icc, 1f ! CTI 63 bne,pn %icc, 1f
218 fzero %f2 ! FPA 64 fzero %f2
219 faddd %f0, %f2, %f4 65 faddd %f0, %f2, %f4
220 fmuld %f0, %f2, %f6 66 fmuld %f0, %f2, %f6
221 faddd %f0, %f2, %f8 67 faddd %f0, %f2, %f8
@@ -251,15 +97,17 @@ do_fpdis:
251 faddd %f0, %f2, %f4 97 faddd %f0, %f2, %f4
252 fmuld %f0, %f2, %f6 98 fmuld %f0, %f2, %f6
253 ldxa [%g3] ASI_DMMU, %g5 99 ldxa [%g3] ASI_DMMU, %g5
254cplus_fptrap_insn_1: 100 sethi %hi(sparc64_kern_sec_context), %g2
255 sethi %hi(0), %g2 101 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
256 stxa %g2, [%g3] ASI_DMMU 102 stxa %g2, [%g3] ASI_DMMU
257 membar #Sync 103 membar #Sync
258 add %g6, TI_FPREGS + 0xc0, %g2 104 add %g6, TI_FPREGS + 0xc0, %g2
259 faddd %f0, %f2, %f8 105 faddd %f0, %f2, %f8
260 fmuld %f0, %f2, %f10 106 fmuld %f0, %f2, %f10
261 ldda [%g1] ASI_BLK_S, %f32 ! grrr, where is ASI_BLK_NUCLEUS 8-( 107 membar #Sync
108 ldda [%g1] ASI_BLK_S, %f32
262 ldda [%g2] ASI_BLK_S, %f48 109 ldda [%g2] ASI_BLK_S, %f48
110 membar #Sync
263 faddd %f0, %f2, %f12 111 faddd %f0, %f2, %f12
264 fmuld %f0, %f2, %f14 112 fmuld %f0, %f2, %f14
265 faddd %f0, %f2, %f16 113 faddd %f0, %f2, %f16
@@ -270,7 +118,6 @@ cplus_fptrap_insn_1:
270 fmuld %f0, %f2, %f26 118 fmuld %f0, %f2, %f26
271 faddd %f0, %f2, %f28 119 faddd %f0, %f2, %f28
272 fmuld %f0, %f2, %f30 120 fmuld %f0, %f2, %f30
273 membar #Sync
274 b,pt %xcc, fpdis_exit 121 b,pt %xcc, fpdis_exit
275 nop 122 nop
2762: andcc %g5, FPRS_DU, %g0 1232: andcc %g5, FPRS_DU, %g0
@@ -280,15 +127,17 @@ cplus_fptrap_insn_1:
280 fzero %f34 127 fzero %f34
281 ldxa [%g3] ASI_DMMU, %g5 128 ldxa [%g3] ASI_DMMU, %g5
282 add %g6, TI_FPREGS, %g1 129 add %g6, TI_FPREGS, %g1
283cplus_fptrap_insn_2: 130 sethi %hi(sparc64_kern_sec_context), %g2
284 sethi %hi(0), %g2 131 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
285 stxa %g2, [%g3] ASI_DMMU 132 stxa %g2, [%g3] ASI_DMMU
286 membar #Sync 133 membar #Sync
287 add %g6, TI_FPREGS + 0x40, %g2 134 add %g6, TI_FPREGS + 0x40, %g2
288 faddd %f32, %f34, %f36 135 faddd %f32, %f34, %f36
289 fmuld %f32, %f34, %f38 136 fmuld %f32, %f34, %f38
290 ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-( 137 membar #Sync
138 ldda [%g1] ASI_BLK_S, %f0
291 ldda [%g2] ASI_BLK_S, %f16 139 ldda [%g2] ASI_BLK_S, %f16
140 membar #Sync
292 faddd %f32, %f34, %f40 141 faddd %f32, %f34, %f40
293 fmuld %f32, %f34, %f42 142 fmuld %f32, %f34, %f42
294 faddd %f32, %f34, %f44 143 faddd %f32, %f34, %f44
@@ -301,18 +150,18 @@ cplus_fptrap_insn_2:
301 fmuld %f32, %f34, %f58 150 fmuld %f32, %f34, %f58
302 faddd %f32, %f34, %f60 151 faddd %f32, %f34, %f60
303 fmuld %f32, %f34, %f62 152 fmuld %f32, %f34, %f62
304 membar #Sync
305 ba,pt %xcc, fpdis_exit 153 ba,pt %xcc, fpdis_exit
306 nop 154 nop
3073: mov SECONDARY_CONTEXT, %g3 1553: mov SECONDARY_CONTEXT, %g3
308 add %g6, TI_FPREGS, %g1 156 add %g6, TI_FPREGS, %g1
309 ldxa [%g3] ASI_DMMU, %g5 157 ldxa [%g3] ASI_DMMU, %g5
310cplus_fptrap_insn_3: 158 sethi %hi(sparc64_kern_sec_context), %g2
311 sethi %hi(0), %g2 159 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
312 stxa %g2, [%g3] ASI_DMMU 160 stxa %g2, [%g3] ASI_DMMU
313 membar #Sync 161 membar #Sync
314 mov 0x40, %g2 162 mov 0x40, %g2
315 ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-( 163 membar #Sync
164 ldda [%g1] ASI_BLK_S, %f0
316 ldda [%g1 + %g2] ASI_BLK_S, %f16 165 ldda [%g1 + %g2] ASI_BLK_S, %f16
317 add %g1, 0x80, %g1 166 add %g1, 0x80, %g1
318 ldda [%g1] ASI_BLK_S, %f32 167 ldda [%g1] ASI_BLK_S, %f32
@@ -473,8 +322,8 @@ do_fptrap_after_fsr:
473 stx %g3, [%g6 + TI_GSR] 322 stx %g3, [%g6 + TI_GSR]
474 mov SECONDARY_CONTEXT, %g3 323 mov SECONDARY_CONTEXT, %g3
475 ldxa [%g3] ASI_DMMU, %g5 324 ldxa [%g3] ASI_DMMU, %g5
476cplus_fptrap_insn_4: 325 sethi %hi(sparc64_kern_sec_context), %g2
477 sethi %hi(0), %g2 326 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
478 stxa %g2, [%g3] ASI_DMMU 327 stxa %g2, [%g3] ASI_DMMU
479 membar #Sync 328 membar #Sync
480 add %g6, TI_FPREGS, %g2 329 add %g6, TI_FPREGS, %g2
@@ -495,45 +344,17 @@ cplus_fptrap_insn_4:
495 ba,pt %xcc, etrap 344 ba,pt %xcc, etrap
496 wr %g0, 0, %fprs 345 wr %g0, 0, %fprs
497 346
498cplus_fptrap_1:
499 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
500
501 .globl cheetah_plus_patch_fpdis
502cheetah_plus_patch_fpdis:
503 /* We configure the dTLB512_0 for 4MB pages and the
504 * dTLB512_1 for 8K pages when in context zero.
505 */
506 sethi %hi(cplus_fptrap_1), %o0
507 lduw [%o0 + %lo(cplus_fptrap_1)], %o1
508
509 set cplus_fptrap_insn_1, %o2
510 stw %o1, [%o2]
511 flush %o2
512 set cplus_fptrap_insn_2, %o2
513 stw %o1, [%o2]
514 flush %o2
515 set cplus_fptrap_insn_3, %o2
516 stw %o1, [%o2]
517 flush %o2
518 set cplus_fptrap_insn_4, %o2
519 stw %o1, [%o2]
520 flush %o2
521
522 retl
523 nop
524
525 /* The registers for cross calls will be: 347 /* The registers for cross calls will be:
526 * 348 *
527 * DATA 0: [low 32-bits] Address of function to call, jmp to this 349 * DATA 0: [low 32-bits] Address of function to call, jmp to this
528 * [high 32-bits] MMU Context Argument 0, place in %g5 350 * [high 32-bits] MMU Context Argument 0, place in %g5
529 * DATA 1: Address Argument 1, place in %g6 351 * DATA 1: Address Argument 1, place in %g1
530 * DATA 2: Address Argument 2, place in %g7 352 * DATA 2: Address Argument 2, place in %g7
531 * 353 *
532 * With this method we can do most of the cross-call tlb/cache 354 * With this method we can do most of the cross-call tlb/cache
533 * flushing very quickly. 355 * flushing very quickly.
534 * 356 *
535 * Current CPU's IRQ worklist table is locked into %g1, 357 * Current CPU's IRQ worklist table is locked into %g6, don't touch.
536 * don't touch.
537 */ 358 */
538 .text 359 .text
539 .align 32 360 .align 32
@@ -1007,13 +828,14 @@ cheetah_plus_dcpe_trap_vector:
1007 nop 828 nop
1008 829
1009do_cheetah_plus_data_parity: 830do_cheetah_plus_data_parity:
1010 ba,pt %xcc, etrap 831 rdpr %pil, %g2
832 wrpr %g0, 15, %pil
833 ba,pt %xcc, etrap_irq
1011 rd %pc, %g7 834 rd %pc, %g7
1012 mov 0x0, %o0 835 mov 0x0, %o0
1013 call cheetah_plus_parity_error 836 call cheetah_plus_parity_error
1014 add %sp, PTREGS_OFF, %o1 837 add %sp, PTREGS_OFF, %o1
1015 ba,pt %xcc, rtrap 838 ba,a,pt %xcc, rtrap_irq
1016 clr %l6
1017 839
1018cheetah_plus_dcpe_trap_vector_tl1: 840cheetah_plus_dcpe_trap_vector_tl1:
1019 membar #Sync 841 membar #Sync
@@ -1037,13 +859,14 @@ cheetah_plus_icpe_trap_vector:
1037 nop 859 nop
1038 860
1039do_cheetah_plus_insn_parity: 861do_cheetah_plus_insn_parity:
1040 ba,pt %xcc, etrap 862 rdpr %pil, %g2
863 wrpr %g0, 15, %pil
864 ba,pt %xcc, etrap_irq
1041 rd %pc, %g7 865 rd %pc, %g7
1042 mov 0x1, %o0 866 mov 0x1, %o0
1043 call cheetah_plus_parity_error 867 call cheetah_plus_parity_error
1044 add %sp, PTREGS_OFF, %o1 868 add %sp, PTREGS_OFF, %o1
1045 ba,pt %xcc, rtrap 869 ba,a,pt %xcc, rtrap_irq
1046 clr %l6
1047 870
1048cheetah_plus_icpe_trap_vector_tl1: 871cheetah_plus_icpe_trap_vector_tl1:
1049 membar #Sync 872 membar #Sync
@@ -1076,6 +899,10 @@ do_dcpe_tl1:
1076 nop 899 nop
1077 wrpr %g1, %tl ! Restore original trap level 900 wrpr %g1, %tl ! Restore original trap level
1078do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ 901do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
902 sethi %hi(dcache_parity_tl1_occurred), %g2
903 lduw [%g2 + %lo(dcache_parity_tl1_occurred)], %g1
904 add %g1, 1, %g1
905 stw %g1, [%g2 + %lo(dcache_parity_tl1_occurred)]
1079 /* Reset D-cache parity */ 906 /* Reset D-cache parity */
1080 sethi %hi(1 << 16), %g1 ! D-cache size 907 sethi %hi(1 << 16), %g1 ! D-cache size
1081 mov (1 << 5), %g2 ! D-cache line size 908 mov (1 << 5), %g2 ! D-cache line size
@@ -1122,6 +949,10 @@ do_icpe_tl1:
1122 nop 949 nop
1123 wrpr %g1, %tl ! Restore original trap level 950 wrpr %g1, %tl ! Restore original trap level
1124do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ 951do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
952 sethi %hi(icache_parity_tl1_occurred), %g2
953 lduw [%g2 + %lo(icache_parity_tl1_occurred)], %g1
954 add %g1, 1, %g1
955 stw %g1, [%g2 + %lo(icache_parity_tl1_occurred)]
1125 /* Flush I-cache */ 956 /* Flush I-cache */
1126 sethi %hi(1 << 15), %g1 ! I-cache size 957 sethi %hi(1 << 15), %g1 ! I-cache size
1127 mov (1 << 5), %g2 ! I-cache line size 958 mov (1 << 5), %g2 ! I-cache line size
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
index 50d2af1d98ae..0d8eba21111b 100644
--- a/arch/sparc64/kernel/etrap.S
+++ b/arch/sparc64/kernel/etrap.S
@@ -68,12 +68,8 @@ etrap_irq:
68 68
69 wrpr %g3, 0, %otherwin 69 wrpr %g3, 0, %otherwin
70 wrpr %g2, 0, %wstate 70 wrpr %g2, 0, %wstate
71cplus_etrap_insn_1: 71 sethi %hi(sparc64_kern_pri_context), %g2
72 sethi %hi(0), %g3 72 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
73 sllx %g3, 32, %g3
74cplus_etrap_insn_2:
75 sethi %hi(0), %g2
76 or %g3, %g2, %g3
77 stxa %g3, [%l4] ASI_DMMU 73 stxa %g3, [%l4] ASI_DMMU
78 flush %l6 74 flush %l6
79 wr %g0, ASI_AIUS, %asi 75 wr %g0, ASI_AIUS, %asi
@@ -215,12 +211,8 @@ scetrap: rdpr %pil, %g2
215 mov PRIMARY_CONTEXT, %l4 211 mov PRIMARY_CONTEXT, %l4
216 wrpr %g3, 0, %otherwin 212 wrpr %g3, 0, %otherwin
217 wrpr %g2, 0, %wstate 213 wrpr %g2, 0, %wstate
218cplus_etrap_insn_3: 214 sethi %hi(sparc64_kern_pri_context), %g2
219 sethi %hi(0), %g3 215 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
220 sllx %g3, 32, %g3
221cplus_etrap_insn_4:
222 sethi %hi(0), %g2
223 or %g3, %g2, %g3
224 stxa %g3, [%l4] ASI_DMMU 216 stxa %g3, [%l4] ASI_DMMU
225 flush %l6 217 flush %l6
226 218
@@ -264,38 +256,3 @@ cplus_etrap_insn_4:
264 256
265#undef TASK_REGOFF 257#undef TASK_REGOFF
266#undef ETRAP_PSTATE1 258#undef ETRAP_PSTATE1
267
268cplus_einsn_1:
269 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
270cplus_einsn_2:
271 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
272
273 .globl cheetah_plus_patch_etrap
274cheetah_plus_patch_etrap:
275 /* We configure the dTLB512_0 for 4MB pages and the
276 * dTLB512_1 for 8K pages when in context zero.
277 */
278 sethi %hi(cplus_einsn_1), %o0
279 sethi %hi(cplus_etrap_insn_1), %o2
280 lduw [%o0 + %lo(cplus_einsn_1)], %o1
281 or %o2, %lo(cplus_etrap_insn_1), %o2
282 stw %o1, [%o2]
283 flush %o2
284 sethi %hi(cplus_etrap_insn_3), %o2
285 or %o2, %lo(cplus_etrap_insn_3), %o2
286 stw %o1, [%o2]
287 flush %o2
288
289 sethi %hi(cplus_einsn_2), %o0
290 sethi %hi(cplus_etrap_insn_2), %o2
291 lduw [%o0 + %lo(cplus_einsn_2)], %o1
292 or %o2, %lo(cplus_etrap_insn_2), %o2
293 stw %o1, [%o2]
294 flush %o2
295 sethi %hi(cplus_etrap_insn_4), %o2
296 or %o2, %lo(cplus_etrap_insn_4), %o2
297 stw %o1, [%o2]
298 flush %o2
299
300 retl
301 nop
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 1fa06c4e3bdb..b49dcd4504b0 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -28,19 +28,14 @@
28#include <asm/mmu.h> 28#include <asm/mmu.h>
29 29
30/* This section from from _start to sparc64_boot_end should fit into 30/* This section from from _start to sparc64_boot_end should fit into
31 * 0x0000.0000.0040.4000 to 0x0000.0000.0040.8000 and will be sharing space 31 * 0x0000000000404000 to 0x0000000000408000.
32 * with bootup_user_stack, which is from 0x0000.0000.0040.4000 to
33 * 0x0000.0000.0040.6000 and empty_bad_page, which is from
34 * 0x0000.0000.0040.6000 to 0x0000.0000.0040.8000.
35 */ 32 */
36
37 .text 33 .text
38 .globl start, _start, stext, _stext 34 .globl start, _start, stext, _stext
39_start: 35_start:
40start: 36start:
41_stext: 37_stext:
42stext: 38stext:
43bootup_user_stack:
44! 0x0000000000404000 39! 0x0000000000404000
45 b sparc64_boot 40 b sparc64_boot
46 flushw /* Flush register file. */ 41 flushw /* Flush register file. */
@@ -80,15 +75,169 @@ sparc_ramdisk_image64:
80 .xword 0 75 .xword 0
81 .word _end 76 .word _end
82 77
83 /* We must be careful, 32-bit OpenBOOT will get confused if it 78 /* PROM cif handler code address is in %o4. */
84 * tries to save away a register window to a 64-bit kernel 79sparc64_boot:
85 * stack address. Flush all windows, disable interrupts, 801: rd %pc, %g7
86 * remap if necessary, jump onto kernel trap table, then kernel 81 set 1b, %g1
87 * stack, or else we die. 82 cmp %g1, %g7
83 be,pn %xcc, sparc64_boot_after_remap
84 mov %o4, %l7
85
86 /* We need to remap the kernel. Use position independant
87 * code to remap us to KERNBASE.
88 * 88 *
89 * PROM entry point is on %o4 89 * SILO can invoke us with 32-bit address masking enabled,
90 * so make sure that's clear.
90 */ 91 */
91sparc64_boot: 92 rdpr %pstate, %g1
93 andn %g1, PSTATE_AM, %g1
94 wrpr %g1, 0x0, %pstate
95 ba,a,pt %xcc, 1f
96
97 .globl prom_finddev_name, prom_chosen_path
98 .globl prom_getprop_name, prom_mmu_name
99 .globl prom_callmethod_name, prom_translate_name
100 .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache
101 .globl prom_boot_mapped_pc, prom_boot_mapping_mode
102 .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low
103prom_finddev_name:
104 .asciz "finddevice"
105prom_chosen_path:
106 .asciz "/chosen"
107prom_getprop_name:
108 .asciz "getprop"
109prom_mmu_name:
110 .asciz "mmu"
111prom_callmethod_name:
112 .asciz "call-method"
113prom_translate_name:
114 .asciz "translate"
115prom_map_name:
116 .asciz "map"
117prom_unmap_name:
118 .asciz "unmap"
119 .align 4
120prom_mmu_ihandle_cache:
121 .word 0
122prom_boot_mapped_pc:
123 .word 0
124prom_boot_mapping_mode:
125 .word 0
126 .align 8
127prom_boot_mapping_phys_high:
128 .xword 0
129prom_boot_mapping_phys_low:
130 .xword 0
1311:
132 rd %pc, %l0
133 mov (1b - prom_finddev_name), %l1
134 mov (1b - prom_chosen_path), %l2
135 mov (1b - prom_boot_mapped_pc), %l3
136 sub %l0, %l1, %l1
137 sub %l0, %l2, %l2
138 sub %l0, %l3, %l3
139 stw %l0, [%l3]
140 sub %sp, (192 + 128), %sp
141
142 /* chosen_node = prom_finddevice("/chosen") */
143 stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice"
144 mov 1, %l3
145 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1
146 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
147 stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/chosen"
148 stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1
149 call %l7
150 add %sp, (2047 + 128), %o0 ! argument array
151
152 ldx [%sp + 2047 + 128 + 0x20], %l4 ! chosen device node
153
154 mov (1b - prom_getprop_name), %l1
155 mov (1b - prom_mmu_name), %l2
156 mov (1b - prom_mmu_ihandle_cache), %l5
157 sub %l0, %l1, %l1
158 sub %l0, %l2, %l2
159 sub %l0, %l5, %l5
160
161 /* prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu") */
162 stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop"
163 mov 4, %l3
164 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4
165 mov 1, %l3
166 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
167 stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, chosen_node
168 stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "mmu"
169 stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_mmu_ihandle_cache
170 mov 4, %l3
171 stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, sizeof(arg3)
172 stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1
173 call %l7
174 add %sp, (2047 + 128), %o0 ! argument array
175
176 mov (1b - prom_callmethod_name), %l1
177 mov (1b - prom_translate_name), %l2
178 sub %l0, %l1, %l1
179 sub %l0, %l2, %l2
180 lduw [%l5], %l5 ! prom_mmu_ihandle_cache
181
182 stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "call-method"
183 mov 3, %l3
184 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 3
185 mov 5, %l3
186 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 5
187 stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1: "translate"
188 stx %l5, [%sp + 2047 + 128 + 0x20] ! arg2: prom_mmu_ihandle_cache
189 /* PAGE align */
190 srlx %l0, 13, %l3
191 sllx %l3, 13, %l3
192 stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: vaddr, our PC
193 stx %g0, [%sp + 2047 + 128 + 0x30] ! res1
194 stx %g0, [%sp + 2047 + 128 + 0x38] ! res2
195 stx %g0, [%sp + 2047 + 128 + 0x40] ! res3
196 stx %g0, [%sp + 2047 + 128 + 0x48] ! res4
197 stx %g0, [%sp + 2047 + 128 + 0x50] ! res5
198 call %l7
199 add %sp, (2047 + 128), %o0 ! argument array
200
201 ldx [%sp + 2047 + 128 + 0x40], %l1 ! translation mode
202 mov (1b - prom_boot_mapping_mode), %l4
203 sub %l0, %l4, %l4
204 stw %l1, [%l4]
205 mov (1b - prom_boot_mapping_phys_high), %l4
206 sub %l0, %l4, %l4
207 ldx [%sp + 2047 + 128 + 0x48], %l2 ! physaddr high
208 stx %l2, [%l4 + 0x0]
209 ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low
210 /* 4MB align */
211 srlx %l3, 22, %l3
212 sllx %l3, 22, %l3
213 stx %l3, [%l4 + 0x8]
214
215 /* Leave service as-is, "call-method" */
216 mov 7, %l3
217 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 7
218 mov 1, %l3
219 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
220 mov (1b - prom_map_name), %l3
221 sub %l0, %l3, %l3
222 stx %l3, [%sp + 2047 + 128 + 0x18] ! arg1: "map"
223 /* Leave arg2 as-is, prom_mmu_ihandle_cache */
224 mov -1, %l3
225 stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default)
226 sethi %hi(8 * 1024 * 1024), %l3
227 stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: size (8MB)
228 sethi %hi(KERNBASE), %l3
229 stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE)
230 stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty
231 mov (1b - prom_boot_mapping_phys_low), %l3
232 sub %l0, %l3, %l3
233 ldx [%l3], %l3
234 stx %l3, [%sp + 2047 + 128 + 0x48] ! arg7: phys addr
235 call %l7
236 add %sp, (2047 + 128), %o0 ! argument array
237
238 add %sp, (192 + 128), %sp
239
240sparc64_boot_after_remap:
92 BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) 241 BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
93 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) 242 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
94 ba,pt %xcc, spitfire_boot 243 ba,pt %xcc, spitfire_boot
@@ -125,185 +274,7 @@ cheetah_generic_boot:
125 stxa %g0, [%g3] ASI_IMMU 274 stxa %g0, [%g3] ASI_IMMU
126 membar #Sync 275 membar #Sync
127 276
128 wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate 277 ba,a,pt %xcc, jump_to_sun4u_init
129 wr %g0, 0, %fprs
130
131 /* Just like for Spitfire, we probe itlb-2 for a mapping which
132 * matches our current %pc. We take the physical address in
133 * that mapping and use it to make our own.
134 */
135
136 /* %g5 holds the tlb data */
137 sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
138 sllx %g5, 32, %g5
139 or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
140
141 /* Put PADDR tlb data mask into %g3. */
142 sethi %uhi(_PAGE_PADDR), %g3
143 or %g3, %ulo(_PAGE_PADDR), %g3
144 sllx %g3, 32, %g3
145 sethi %hi(_PAGE_PADDR), %g7
146 or %g7, %lo(_PAGE_PADDR), %g7
147 or %g3, %g7, %g3
148
149 set 2 << 16, %l0 /* TLB entry walker. */
150 set 0x1fff, %l2 /* Page mask. */
151 rd %pc, %l3
152 andn %l3, %l2, %g2 /* vaddr comparator */
153
1541: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
155 membar #Sync
156 andn %g1, %l2, %g1
157 cmp %g1, %g2
158 be,pn %xcc, cheetah_got_tlbentry
159 nop
160 and %l0, (127 << 3), %g1
161 cmp %g1, (127 << 3)
162 blu,pt %xcc, 1b
163 add %l0, (1 << 3), %l0
164
165 /* Search the small TLB. OBP never maps us like that but
166 * newer SILO can.
167 */
168 clr %l0
169
1701: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
171 membar #Sync
172 andn %g1, %l2, %g1
173 cmp %g1, %g2
174 be,pn %xcc, cheetah_got_tlbentry
175 nop
176 cmp %l0, (15 << 3)
177 blu,pt %xcc, 1b
178 add %l0, (1 << 3), %l0
179
180 /* BUG() if we get here... */
181 ta 0x5
182
183cheetah_got_tlbentry:
184 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g0
185 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
186 membar #Sync
187 and %g1, %g3, %g1
188 set 0x5fff, %l0
189 andn %g1, %l0, %g1
190 or %g5, %g1, %g5
191
192 /* Clear out any KERNBASE area entries. */
193 set 2 << 16, %l0
194 sethi %hi(KERNBASE), %g3
195 sethi %hi(KERNBASE<<1), %g7
196 mov TLB_TAG_ACCESS, %l7
197
198 /* First, check ITLB */
1991: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
200 membar #Sync
201 andn %g1, %l2, %g1
202 cmp %g1, %g3
203 blu,pn %xcc, 2f
204 cmp %g1, %g7
205 bgeu,pn %xcc, 2f
206 nop
207 stxa %g0, [%l7] ASI_IMMU
208 membar #Sync
209 stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
210 membar #Sync
211
2122: and %l0, (127 << 3), %g1
213 cmp %g1, (127 << 3)
214 blu,pt %xcc, 1b
215 add %l0, (1 << 3), %l0
216
217 /* Next, check DTLB */
218 set 2 << 16, %l0
2191: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
220 membar #Sync
221 andn %g1, %l2, %g1
222 cmp %g1, %g3
223 blu,pn %xcc, 2f
224 cmp %g1, %g7
225 bgeu,pn %xcc, 2f
226 nop
227 stxa %g0, [%l7] ASI_DMMU
228 membar #Sync
229 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
230 membar #Sync
231
2322: and %l0, (511 << 3), %g1
233 cmp %g1, (511 << 3)
234 blu,pt %xcc, 1b
235 add %l0, (1 << 3), %l0
236
237 /* On Cheetah+, have to check second DTLB. */
238 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,l0,2f)
239 ba,pt %xcc, 9f
240 nop
241
2422: set 3 << 16, %l0
2431: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
244 membar #Sync
245 andn %g1, %l2, %g1
246 cmp %g1, %g3
247 blu,pn %xcc, 2f
248 cmp %g1, %g7
249 bgeu,pn %xcc, 2f
250 nop
251 stxa %g0, [%l7] ASI_DMMU
252 membar #Sync
253 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
254 membar #Sync
255
2562: and %l0, (511 << 3), %g1
257 cmp %g1, (511 << 3)
258 blu,pt %xcc, 1b
259 add %l0, (1 << 3), %l0
260
2619:
262
263 /* Now lock the TTE we created into ITLB-0 and DTLB-0,
264 * entry 15 (and maybe 14 too).
265 */
266 sethi %hi(KERNBASE), %g3
267 set (0 << 16) | (15 << 3), %g7
268 stxa %g3, [%l7] ASI_DMMU
269 membar #Sync
270 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
271 membar #Sync
272 stxa %g3, [%l7] ASI_IMMU
273 membar #Sync
274 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
275 membar #Sync
276 flush %g3
277 membar #Sync
278 sethi %hi(_end), %g3 /* Check for bigkernel case */
279 or %g3, %lo(_end), %g3
280 srl %g3, 23, %g3 /* Check if _end > 8M */
281 brz,pt %g3, 1f
282 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
283 sethi %hi(0x400000), %g3
284 or %g3, %lo(0x400000), %g3
285 add %g5, %g3, %g5 /* New tte data */
286 andn %g5, (_PAGE_G), %g5
287 sethi %hi(KERNBASE+0x400000), %g3
288 or %g3, %lo(KERNBASE+0x400000), %g3
289 set (0 << 16) | (14 << 3), %g7
290 stxa %g3, [%l7] ASI_DMMU
291 membar #Sync
292 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
293 membar #Sync
294 stxa %g3, [%l7] ASI_IMMU
295 membar #Sync
296 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
297 membar #Sync
298 flush %g3
299 membar #Sync
300 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
301 ba,pt %xcc, 1f
302 nop
303
3041: set sun4u_init, %g2
305 jmpl %g2 + %g0, %g0
306 nop
307 278
308spitfire_boot: 279spitfire_boot:
309 /* Typically PROM has already enabled both MMU's and both on-chip 280 /* Typically PROM has already enabled both MMU's and both on-chip
@@ -313,6 +284,7 @@ spitfire_boot:
313 stxa %g1, [%g0] ASI_LSU_CONTROL 284 stxa %g1, [%g0] ASI_LSU_CONTROL
314 membar #Sync 285 membar #Sync
315 286
287jump_to_sun4u_init:
316 /* 288 /*
317 * Make sure we are in privileged mode, have address masking, 289 * Make sure we are in privileged mode, have address masking,
318 * using the ordinary globals and have enabled floating 290 * using the ordinary globals and have enabled floating
@@ -324,151 +296,6 @@ spitfire_boot:
324 wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate 296 wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
325 wr %g0, 0, %fprs 297 wr %g0, 0, %fprs
326 298
327spitfire_create_mappings:
328 /* %g5 holds the tlb data */
329 sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
330 sllx %g5, 32, %g5
331 or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
332
333 /* Base of physical memory cannot reliably be assumed to be
334 * at 0x0! Figure out where it happens to be. -DaveM
335 */
336
337 /* Put PADDR tlb data mask into %g3. */
338 sethi %uhi(_PAGE_PADDR_SF), %g3
339 or %g3, %ulo(_PAGE_PADDR_SF), %g3
340 sllx %g3, 32, %g3
341 sethi %hi(_PAGE_PADDR_SF), %g7
342 or %g7, %lo(_PAGE_PADDR_SF), %g7
343 or %g3, %g7, %g3
344
345 /* Walk through entire ITLB, looking for entry which maps
346 * our %pc currently, stick PADDR from there into %g5 tlb data.
347 */
348 clr %l0 /* TLB entry walker. */
349 set 0x1fff, %l2 /* Page mask. */
350 rd %pc, %l3
351 andn %l3, %l2, %g2 /* vaddr comparator */
3521:
353 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
354 ldxa [%l0] ASI_ITLB_TAG_READ, %g1
355 nop
356 nop
357 nop
358 andn %g1, %l2, %g1 /* Get vaddr */
359 cmp %g1, %g2
360 be,a,pn %xcc, spitfire_got_tlbentry
361 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
362 cmp %l0, (63 << 3)
363 blu,pt %xcc, 1b
364 add %l0, (1 << 3), %l0
365
366 /* BUG() if we get here... */
367 ta 0x5
368
369spitfire_got_tlbentry:
370 /* Nops here again, perhaps Cheetah/Blackbird are better behaved... */
371 nop
372 nop
373 nop
374 and %g1, %g3, %g1 /* Mask to just get paddr bits. */
375 set 0x5fff, %l3 /* Mask offset to get phys base. */
376 andn %g1, %l3, %g1
377
378 /* NOTE: We hold on to %g1 paddr base as we need it below to lock
379 * NOTE: the PROM cif code into the TLB.
380 */
381
382 or %g5, %g1, %g5 /* Or it into TAG being built. */
383
384 clr %l0 /* TLB entry walker. */
385 sethi %hi(KERNBASE), %g3 /* 4M lower limit */
386 sethi %hi(KERNBASE<<1), %g7 /* 8M upper limit */
387 mov TLB_TAG_ACCESS, %l7
3881:
389 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
390 ldxa [%l0] ASI_ITLB_TAG_READ, %g1
391 nop
392 nop
393 nop
394 andn %g1, %l2, %g1 /* Get vaddr */
395 cmp %g1, %g3
396 blu,pn %xcc, 2f
397 cmp %g1, %g7
398 bgeu,pn %xcc, 2f
399 nop
400 stxa %g0, [%l7] ASI_IMMU
401 stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
402 membar #Sync
4032:
404 cmp %l0, (63 << 3)
405 blu,pt %xcc, 1b
406 add %l0, (1 << 3), %l0
407
408 nop; nop; nop
409
410 clr %l0 /* TLB entry walker. */
4111:
412 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
413 ldxa [%l0] ASI_DTLB_TAG_READ, %g1
414 nop
415 nop
416 nop
417 andn %g1, %l2, %g1 /* Get vaddr */
418 cmp %g1, %g3
419 blu,pn %xcc, 2f
420 cmp %g1, %g7
421 bgeu,pn %xcc, 2f
422 nop
423 stxa %g0, [%l7] ASI_DMMU
424 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
425 membar #Sync
4262:
427 cmp %l0, (63 << 3)
428 blu,pt %xcc, 1b
429 add %l0, (1 << 3), %l0
430
431 nop; nop; nop
432
433
434 /* PROM never puts any TLB entries into the MMU with the lock bit
435 * set. So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too.
436 */
437
438 sethi %hi(KERNBASE), %g3
439 mov (63 << 3), %g7
440 stxa %g3, [%l7] ASI_DMMU /* KERNBASE into TLB TAG */
441 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */
442 membar #Sync
443 stxa %g3, [%l7] ASI_IMMU /* KERNBASE into TLB TAG */
444 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */
445 membar #Sync
446 flush %g3
447 membar #Sync
448 sethi %hi(_end), %g3 /* Check for bigkernel case */
449 or %g3, %lo(_end), %g3
450 srl %g3, 23, %g3 /* Check if _end > 8M */
451 brz,pt %g3, 2f
452 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
453 sethi %hi(0x400000), %g3
454 or %g3, %lo(0x400000), %g3
455 add %g5, %g3, %g5 /* New tte data */
456 andn %g5, (_PAGE_G), %g5
457 sethi %hi(KERNBASE+0x400000), %g3
458 or %g3, %lo(KERNBASE+0x400000), %g3
459 mov (62 << 3), %g7
460 stxa %g3, [%l7] ASI_DMMU
461 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
462 membar #Sync
463 stxa %g3, [%l7] ASI_IMMU
464 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
465 membar #Sync
466 flush %g3
467 membar #Sync
468 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
4692: ba,pt %xcc, 1f
470 nop
4711:
472 set sun4u_init, %g2 299 set sun4u_init, %g2
473 jmpl %g2 + %g0, %g0 300 jmpl %g2 + %g0, %g0
474 nop 301 nop
@@ -483,38 +310,12 @@ sun4u_init:
483 stxa %g0, [%g7] ASI_DMMU 310 stxa %g0, [%g7] ASI_DMMU
484 membar #Sync 311 membar #Sync
485 312
486 /* We are now safely (we hope) in Nucleus context (0), rewrite
487 * the KERNBASE TTE's so they no longer have the global bit set.
488 * Don't forget to setup TAG_ACCESS first 8-)
489 */
490 mov TLB_TAG_ACCESS, %g2
491 stxa %g3, [%g2] ASI_IMMU
492 stxa %g3, [%g2] ASI_DMMU
493 membar #Sync
494
495 BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup) 313 BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup)
496 314
497 ba,pt %xcc, spitfire_tlb_fixup 315 ba,pt %xcc, spitfire_tlb_fixup
498 nop 316 nop
499 317
500cheetah_tlb_fixup: 318cheetah_tlb_fixup:
501 set (0 << 16) | (15 << 3), %g7
502 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g0
503 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
504 andn %g1, (_PAGE_G), %g1
505 stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
506 membar #Sync
507
508 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g0
509 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
510 andn %g1, (_PAGE_G), %g1
511 stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
512 membar #Sync
513
514 /* Kill instruction prefetch queues. */
515 flush %g3
516 membar #Sync
517
518 mov 2, %g2 /* Set TLB type to cheetah+. */ 319 mov 2, %g2 /* Set TLB type to cheetah+. */
519 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) 320 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
520 321
@@ -523,23 +324,7 @@ cheetah_tlb_fixup:
5231: sethi %hi(tlb_type), %g1 3241: sethi %hi(tlb_type), %g1
524 stw %g2, [%g1 + %lo(tlb_type)] 325 stw %g2, [%g1 + %lo(tlb_type)]
525 326
526 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) 327 /* Patch copy/page operations to cheetah optimized versions. */
527 ba,pt %xcc, 2f
528 nop
529
5301: /* Patch context register writes to support nucleus page
531 * size correctly.
532 */
533 call cheetah_plus_patch_etrap
534 nop
535 call cheetah_plus_patch_rtrap
536 nop
537 call cheetah_plus_patch_fpdis
538 nop
539 call cheetah_plus_patch_winfixup
540 nop
541
5422: /* Patch copy/page operations to cheetah optimized versions. */
543 call cheetah_patch_copyops 328 call cheetah_patch_copyops
544 nop 329 nop
545 call cheetah_patch_copy_page 330 call cheetah_patch_copy_page
@@ -551,21 +336,6 @@ cheetah_tlb_fixup:
551 nop 336 nop
552 337
553spitfire_tlb_fixup: 338spitfire_tlb_fixup:
554 mov (63 << 3), %g7
555 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
556 andn %g1, (_PAGE_G), %g1
557 stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
558 membar #Sync
559
560 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
561 andn %g1, (_PAGE_G), %g1
562 stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
563 membar #Sync
564
565 /* Kill instruction prefetch queues. */
566 flush %g3
567 membar #Sync
568
569 /* Set TLB type to spitfire. */ 339 /* Set TLB type to spitfire. */
570 mov 0, %g2 340 mov 0, %g2
571 sethi %hi(tlb_type), %g1 341 sethi %hi(tlb_type), %g1
@@ -578,24 +348,6 @@ tlb_fixup_done:
578 mov %sp, %l6 348 mov %sp, %l6
579 mov %o4, %l7 349 mov %o4, %l7
580 350
581#if 0 /* We don't do it like this anymore, but for historical hack value
582 * I leave this snippet here to show how crazy we can be sometimes. 8-)
583 */
584
585 /* Setup "Linux Current Register", thanks Sun 8-) */
586 wr %g0, 0x1, %pcr
587
588 /* Blackbird errata workaround. See commentary in
589 * smp.c:smp_percpu_timer_interrupt() for more
590 * information.
591 */
592 ba,pt %xcc, 99f
593 nop
594 .align 64
59599: wr %g6, %g0, %pic
596 rd %pic, %g0
597#endif
598
599 wr %g0, ASI_P, %asi 351 wr %g0, ASI_P, %asi
600 mov 1, %g1 352 mov 1, %g1
601 sllx %g1, THREAD_SHIFT, %g1 353 sllx %g1, THREAD_SHIFT, %g1
@@ -629,32 +381,78 @@ tlb_fixup_done:
629 nop 381 nop
630 /* Not reached... */ 382 /* Not reached... */
631 383
632/* IMPORTANT NOTE: Whenever making changes here, check 384 /* This is meant to allow the sharing of this code between
633 * trampoline.S as well. -jj */ 385 * boot processor invocation (via setup_tba() below) and
634 .globl setup_tba 386 * secondary processor startup (via trampoline.S). The
635setup_tba: /* i0 = is_starfire */ 387 * former does use this code, the latter does not yet due
636 save %sp, -160, %sp 388 * to some complexities. That should be fixed up at some
389 * point.
390 *
391 * There used to be enormous complexity wrt. transferring
392 * over from the firwmare's trap table to the Linux kernel's.
393 * For example, there was a chicken & egg problem wrt. building
394 * the OBP page tables, yet needing to be on the Linux kernel
395 * trap table (to translate PAGE_OFFSET addresses) in order to
396 * do that.
397 *
398 * We now handle OBP tlb misses differently, via linear lookups
399 * into the prom_trans[] array. So that specific problem no
400 * longer exists. Yet, unfortunately there are still some issues
401 * preventing trampoline.S from using this code... ho hum.
402 */
403 .globl setup_trap_table
404setup_trap_table:
405 save %sp, -192, %sp
637 406
638 rdpr %tba, %g7 407 /* Force interrupts to be disabled. */
639 sethi %hi(prom_tba), %o1 408 rdpr %pstate, %o1
640 or %o1, %lo(prom_tba), %o1 409 andn %o1, PSTATE_IE, %o1
641 stx %g7, [%o1] 410 wrpr %o1, 0x0, %pstate
411 wrpr %g0, 15, %pil
412
413 /* Make the firmware call to jump over to the Linux trap table. */
414 call prom_set_trap_table
415 sethi %hi(sparc64_ttable_tl0), %o0
416
417 /* Start using proper page size encodings in ctx register. */
418 sethi %hi(sparc64_kern_pri_context), %g3
419 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
420 mov PRIMARY_CONTEXT, %g1
421 stxa %g2, [%g1] ASI_DMMU
422 membar #Sync
423
424 /* The Linux trap handlers expect various trap global registers
425 * to be setup with some fixed values. So here we set these
426 * up very carefully. These globals are:
427 *
428 * Alternate Globals (PSTATE_AG):
429 *
430 * %g6 --> current_thread_info()
431 *
432 * MMU Globals (PSTATE_MG):
433 *
434 * %g1 --> TLB_SFSR
435 * %g2 --> ((_PAGE_VALID | _PAGE_SZ4MB |
436 * _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
437 * ^ 0xfffff80000000000)
438 * (this %g2 value is used for computing the PAGE_OFFSET kernel
439 * TLB entries quickly, the virtual address of the fault XOR'd
440 * with this %g2 value is the PTE to load into the TLB)
441 * %g3 --> VPTE_BASE_CHEETAH or VPTE_BASE_SPITFIRE
442 *
443 * Interrupt Globals (PSTATE_IG, setup by init_irqwork_curcpu()):
444 *
445 * %g6 --> __irq_work[smp_processor_id()]
446 */
642 447
643 /* Setup "Linux" globals 8-) */
644 rdpr %pstate, %o1 448 rdpr %pstate, %o1
645 mov %g6, %o2 449 mov %g6, %o2
646 wrpr %o1, (PSTATE_AG|PSTATE_IE), %pstate 450 wrpr %o1, PSTATE_AG, %pstate
647 sethi %hi(sparc64_ttable_tl0), %g1
648 wrpr %g1, %tba
649 mov %o2, %g6 451 mov %o2, %g6
650 452
651 /* Set up MMU globals */
652 wrpr %o1, (PSTATE_MG|PSTATE_IE), %pstate
653
654 /* Set fixed globals used by dTLB miss handler. */
655#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000) 453#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
656#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) 454#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
657 455 wrpr %o1, PSTATE_MG, %pstate
658 mov TSB_REG, %g1 456 mov TSB_REG, %g1
659 stxa %g0, [%g1] ASI_DMMU 457 stxa %g0, [%g1] ASI_DMMU
660 membar #Sync 458 membar #Sync
@@ -666,17 +464,17 @@ setup_tba: /* i0 = is_starfire */
666 sllx %g2, 32, %g2 464 sllx %g2, 32, %g2
667 or %g2, KERN_LOWBITS, %g2 465 or %g2, KERN_LOWBITS, %g2
668 466
669 BRANCH_IF_ANY_CHEETAH(g3,g7,cheetah_vpte_base) 467 BRANCH_IF_ANY_CHEETAH(g3,g7,8f)
670 ba,pt %xcc, spitfire_vpte_base 468 ba,pt %xcc, 9f
671 nop 469 nop
672 470
673cheetah_vpte_base: 4718:
674 sethi %uhi(VPTE_BASE_CHEETAH), %g3 472 sethi %uhi(VPTE_BASE_CHEETAH), %g3
675 or %g3, %ulo(VPTE_BASE_CHEETAH), %g3 473 or %g3, %ulo(VPTE_BASE_CHEETAH), %g3
676 ba,pt %xcc, 2f 474 ba,pt %xcc, 2f
677 sllx %g3, 32, %g3 475 sllx %g3, 32, %g3
678 476
679spitfire_vpte_base: 4779:
680 sethi %uhi(VPTE_BASE_SPITFIRE), %g3 478 sethi %uhi(VPTE_BASE_SPITFIRE), %g3
681 or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3 479 or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3
682 sllx %g3, 32, %g3 480 sllx %g3, 32, %g3
@@ -702,48 +500,55 @@ spitfire_vpte_base:
702 sllx %o2, 32, %o2 500 sllx %o2, 32, %o2
703 wr %o2, %asr25 501 wr %o2, %asr25
704 502
705 /* Ok, we're done setting up all the state our trap mechanims needs,
706 * now get back into normal globals and let the PROM know what is up.
707 */
7082: 5032:
709 wrpr %g0, %g0, %wstate 504 wrpr %g0, %g0, %wstate
710 wrpr %o1, PSTATE_IE, %pstate 505 wrpr %o1, 0x0, %pstate
711 506
712 call init_irqwork_curcpu 507 call init_irqwork_curcpu
713 nop 508 nop
714 509
715 call prom_set_trap_table 510 /* Now we can turn interrupts back on. */
716 sethi %hi(sparc64_ttable_tl0), %o0
717
718 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
719 ba,pt %xcc, 2f
720 nop
721
7221: /* Start using proper page size encodings in ctx register. */
723 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
724 mov PRIMARY_CONTEXT, %g1
725 sllx %g3, 32, %g3
726 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
727 or %g3, %g2, %g3
728 stxa %g3, [%g1] ASI_DMMU
729 membar #Sync
730
7312:
732 rdpr %pstate, %o1 511 rdpr %pstate, %o1
733 or %o1, PSTATE_IE, %o1 512 or %o1, PSTATE_IE, %o1
734 wrpr %o1, 0, %pstate 513 wrpr %o1, 0, %pstate
514 wrpr %g0, 0x0, %pil
515
516 ret
517 restore
518
519 .globl setup_tba
520setup_tba: /* i0 = is_starfire */
521 save %sp, -192, %sp
522
523 /* The boot processor is the only cpu which invokes this
524 * routine, the other cpus set things up via trampoline.S.
525 * So save the OBP trap table address here.
526 */
527 rdpr %tba, %g7
528 sethi %hi(prom_tba), %o1
529 or %o1, %lo(prom_tba), %o1
530 stx %g7, [%o1]
531
532 call setup_trap_table
533 nop
735 534
736 ret 535 ret
737 restore 536 restore
537sparc64_boot_end:
538
539#include "systbls.S"
540#include "ktlb.S"
541#include "etrap.S"
542#include "rtrap.S"
543#include "winfixup.S"
544#include "entry.S"
738 545
739/* 546/*
740 * The following skips make sure the trap table in ttable.S is aligned 547 * The following skip makes sure the trap table in ttable.S is aligned
741 * on a 32K boundary as required by the v9 specs for TBA register. 548 * on a 32K boundary as required by the v9 specs for TBA register.
742 */ 549 */
743sparc64_boot_end: 5501:
744 .skip 0x2000 + _start - sparc64_boot_end 551 .skip 0x4000 + _start - 1b
745bootup_user_stack_end:
746 .skip 0x2000
747 552
748#ifdef CONFIG_SBUS 553#ifdef CONFIG_SBUS
749/* This is just a hack to fool make depend config.h discovering 554/* This is just a hack to fool make depend config.h discovering
@@ -755,20 +560,6 @@ bootup_user_stack_end:
755! 0x0000000000408000 560! 0x0000000000408000
756 561
757#include "ttable.S" 562#include "ttable.S"
758#include "systbls.S"
759
760 .align 1024
761 .globl swapper_pg_dir
762swapper_pg_dir:
763 .word 0
764
765#include "etrap.S"
766#include "rtrap.S"
767#include "winfixup.S"
768#include "entry.S"
769
770 /* This is just anal retentiveness on my part... */
771 .align 16384
772 563
773 .data 564 .data
774 .align 8 565 .align 8
@@ -776,8 +567,11 @@ swapper_pg_dir:
776prom_tba: .xword 0 567prom_tba: .xword 0
777tlb_type: .word 0 /* Must NOT end up in BSS */ 568tlb_type: .word 0 /* Must NOT end up in BSS */
778 .section ".fixup",#alloc,#execinstr 569 .section ".fixup",#alloc,#execinstr
779 .globl __ret_efault 570
571 .globl __ret_efault, __retl_efault
780__ret_efault: 572__ret_efault:
781 ret 573 ret
782 restore %g0, -EFAULT, %o0 574 restore %g0, -EFAULT, %o0
783 575__retl_efault:
576 retl
577 mov -EFAULT, %o0
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index c9b69167632a..233526ba3abe 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -27,6 +27,7 @@
27#include <asm/atomic.h> 27#include <asm/atomic.h>
28#include <asm/system.h> 28#include <asm/system.h>
29#include <asm/irq.h> 29#include <asm/irq.h>
30#include <asm/io.h>
30#include <asm/sbus.h> 31#include <asm/sbus.h>
31#include <asm/iommu.h> 32#include <asm/iommu.h>
32#include <asm/upa.h> 33#include <asm/upa.h>
diff --git a/arch/sparc64/kernel/itlb_base.S b/arch/sparc64/kernel/itlb_base.S
index b5e32dfa4fbc..4951ff8f6877 100644
--- a/arch/sparc64/kernel/itlb_base.S
+++ b/arch/sparc64/kernel/itlb_base.S
@@ -15,14 +15,12 @@
15 */ 15 */
16#define CREATE_VPTE_OFFSET1(r1, r2) \ 16#define CREATE_VPTE_OFFSET1(r1, r2) \
17 srax r1, 10, r2 17 srax r1, 10, r2
18#define CREATE_VPTE_OFFSET2(r1, r2) 18#define CREATE_VPTE_OFFSET2(r1, r2) nop
19#define CREATE_VPTE_NOP nop
20#else /* PAGE_SHIFT */ 19#else /* PAGE_SHIFT */
21#define CREATE_VPTE_OFFSET1(r1, r2) \ 20#define CREATE_VPTE_OFFSET1(r1, r2) \
22 srax r1, PAGE_SHIFT, r2 21 srax r1, PAGE_SHIFT, r2
23#define CREATE_VPTE_OFFSET2(r1, r2) \ 22#define CREATE_VPTE_OFFSET2(r1, r2) \
24 sllx r2, 3, r2 23 sllx r2, 3, r2
25#define CREATE_VPTE_NOP
26#endif /* PAGE_SHIFT */ 24#endif /* PAGE_SHIFT */
27 25
28 26
@@ -36,6 +34,7 @@
36 */ 34 */
37 35
38/* ITLB ** ICACHE line 1: Quick user TLB misses */ 36/* ITLB ** ICACHE line 1: Quick user TLB misses */
37 mov TLB_SFSR, %g1
39 ldxa [%g1 + %g1] ASI_IMMU, %g4 ! Get TAG_ACCESS 38 ldxa [%g1 + %g1] ASI_IMMU, %g4 ! Get TAG_ACCESS
40 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset 39 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
41 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset 40 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
@@ -43,41 +42,38 @@
431: brgez,pn %g5, 3f ! Not valid, branch out 421: brgez,pn %g5, 3f ! Not valid, branch out
44 sethi %hi(_PAGE_EXEC), %g4 ! Delay-slot 43 sethi %hi(_PAGE_EXEC), %g4 ! Delay-slot
45 andcc %g5, %g4, %g0 ! Executable? 44 andcc %g5, %g4, %g0 ! Executable?
45
46/* ITLB ** ICACHE line 2: Real faults */
46 be,pn %xcc, 3f ! Nope, branch. 47 be,pn %xcc, 3f ! Nope, branch.
47 nop ! Delay-slot 48 nop ! Delay-slot
482: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB 492: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB
49 retry ! Trap return 50 retry ! Trap return
503: rdpr %pstate, %g4 ! Move into alternate globals 513: rdpr %pstate, %g4 ! Move into alt-globals
51
52/* ITLB ** ICACHE line 2: Real faults */
53 wrpr %g4, PSTATE_AG|PSTATE_MG, %pstate 52 wrpr %g4, PSTATE_AG|PSTATE_MG, %pstate
54 rdpr %tpc, %g5 ! And load faulting VA 53 rdpr %tpc, %g5 ! And load faulting VA
55 mov FAULT_CODE_ITLB, %g4 ! It was read from ITLB 54 mov FAULT_CODE_ITLB, %g4 ! It was read from ITLB
56sparc64_realfault_common: ! Called by TL0 dtlb_miss too 55
56/* ITLB ** ICACHE line 3: Finish faults */
57sparc64_realfault_common: ! Called by dtlb_miss
57 stb %g4, [%g6 + TI_FAULT_CODE] 58 stb %g4, [%g6 + TI_FAULT_CODE]
58 stx %g5, [%g6 + TI_FAULT_ADDR] 59 stx %g5, [%g6 + TI_FAULT_ADDR]
59 ba,pt %xcc, etrap ! Save state 60 ba,pt %xcc, etrap ! Save state
601: rd %pc, %g7 ! ... 611: rd %pc, %g7 ! ...
61 nop
62
63/* ITLB ** ICACHE line 3: Finish faults + window fixups */
64 call do_sparc64_fault ! Call fault handler 62 call do_sparc64_fault ! Call fault handler
65 add %sp, PTREGS_OFF, %o0! Compute pt_regs arg 63 add %sp, PTREGS_OFF, %o0! Compute pt_regs arg
66 ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state 64 ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
67 nop 65 nop
66
67/* ITLB ** ICACHE line 4: Window fixups */
68winfix_trampoline: 68winfix_trampoline:
69 rdpr %tpc, %g3 ! Prepare winfixup TNPC 69 rdpr %tpc, %g3 ! Prepare winfixup TNPC
70 or %g3, 0x7c, %g3 ! Compute offset to branch 70 or %g3, 0x7c, %g3 ! Compute branch offset
71 wrpr %g3, %tnpc ! Write it into TNPC 71 wrpr %g3, %tnpc ! Write it into TNPC
72 done ! Do it to it 72 done ! Do it to it
73
74/* ITLB ** ICACHE line 4: Unused... */
75 nop 73 nop
76 nop 74 nop
77 nop 75 nop
78 nop 76 nop
79 CREATE_VPTE_NOP
80 77
81#undef CREATE_VPTE_OFFSET1 78#undef CREATE_VPTE_OFFSET1
82#undef CREATE_VPTE_OFFSET2 79#undef CREATE_VPTE_OFFSET2
83#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
new file mode 100644
index 000000000000..d9244d3c9f73
--- /dev/null
+++ b/arch/sparc64/kernel/ktlb.S
@@ -0,0 +1,194 @@
1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
2 *
3 * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7*/
8
9#include <linux/config.h>
10#include <asm/head.h>
11#include <asm/asi.h>
12#include <asm/page.h>
13#include <asm/pgtable.h>
14
15 .text
16 .align 32
17
18/*
19 * On a second level vpte miss, check whether the original fault is to the OBP
20 * range (note that this is only possible for instruction miss, data misses to
21 * obp range do not use vpte). If so, go back directly to the faulting address.
22 * This is because we want to read the tpc, otherwise we have no way of knowing
23 * the 8k aligned faulting address if we are using >8k kernel pagesize. This
24 * also ensures no vpte range addresses are dropped into tlb while obp is
25 * executing (see inherit_locked_prom_mappings() rant).
26 */
27sparc64_vpte_nucleus:
28 /* Note that kvmap below has verified that the address is
29 * in the range MODULES_VADDR --> VMALLOC_END already. So
30 * here we need only check if it is an OBP address or not.
31 */
32 sethi %hi(LOW_OBP_ADDRESS), %g5
33 cmp %g4, %g5
34 blu,pn %xcc, kern_vpte
35 mov 0x1, %g5
36 sllx %g5, 32, %g5
37 cmp %g4, %g5
38 blu,pn %xcc, vpte_insn_obp
39 nop
40
41 /* These two instructions are patched by paginig_init(). */
42kern_vpte:
43 sethi %hi(swapper_pgd_zero), %g5
44 lduw [%g5 + %lo(swapper_pgd_zero)], %g5
45
46 /* With kernel PGD in %g5, branch back into dtlb_backend. */
47 ba,pt %xcc, sparc64_kpte_continue
48 andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
49
50vpte_noent:
51 /* Restore previous TAG_ACCESS, %g5 is zero, and we will
52 * skip over the trap instruction so that the top level
53 * TLB miss handler will thing this %g5 value is just an
54 * invalid PTE, thus branching to full fault processing.
55 */
56 mov TLB_SFSR, %g1
57 stxa %g4, [%g1 + %g1] ASI_DMMU
58 done
59
60vpte_insn_obp:
61 /* Behave as if we are at TL0. */
62 wrpr %g0, 1, %tl
63 rdpr %tpc, %g4 /* Find original faulting iaddr */
64 srlx %g4, 13, %g4 /* Throw out context bits */
65 sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
66
67 /* Restore previous TAG_ACCESS. */
68 mov TLB_SFSR, %g1
69 stxa %g4, [%g1 + %g1] ASI_IMMU
70
71 sethi %hi(prom_trans), %g5
72 or %g5, %lo(prom_trans), %g5
73
741: ldx [%g5 + 0x00], %g6 ! base
75 brz,a,pn %g6, longpath ! no more entries, fail
76 mov TLB_SFSR, %g1 ! and restore %g1
77 ldx [%g5 + 0x08], %g1 ! len
78 add %g6, %g1, %g1 ! end
79 cmp %g6, %g4
80 bgu,pt %xcc, 2f
81 cmp %g4, %g1
82 bgeu,pt %xcc, 2f
83 ldx [%g5 + 0x10], %g1 ! PTE
84
85 /* TLB load, restore %g1, and return from trap. */
86 sub %g4, %g6, %g6
87 add %g1, %g6, %g5
88 mov TLB_SFSR, %g1
89 stxa %g5, [%g0] ASI_ITLB_DATA_IN
90 retry
91
922: ba,pt %xcc, 1b
93 add %g5, (3 * 8), %g5 ! next entry
94
95kvmap_do_obp:
96 sethi %hi(prom_trans), %g5
97 or %g5, %lo(prom_trans), %g5
98 srlx %g4, 13, %g4
99 sllx %g4, 13, %g4
100
1011: ldx [%g5 + 0x00], %g6 ! base
102 brz,a,pn %g6, longpath ! no more entries, fail
103 mov TLB_SFSR, %g1 ! and restore %g1
104 ldx [%g5 + 0x08], %g1 ! len
105 add %g6, %g1, %g1 ! end
106 cmp %g6, %g4
107 bgu,pt %xcc, 2f
108 cmp %g4, %g1
109 bgeu,pt %xcc, 2f
110 ldx [%g5 + 0x10], %g1 ! PTE
111
112 /* TLB load, restore %g1, and return from trap. */
113 sub %g4, %g6, %g6
114 add %g1, %g6, %g5
115 mov TLB_SFSR, %g1
116 stxa %g5, [%g0] ASI_DTLB_DATA_IN
117 retry
118
1192: ba,pt %xcc, 1b
120 add %g5, (3 * 8), %g5 ! next entry
121
122/*
123 * On a first level data miss, check whether this is to the OBP range (note
124 * that such accesses can be made by prom, as well as by kernel using
125 * prom_getproperty on "address"), and if so, do not use vpte access ...
126 * rather, use information saved during inherit_prom_mappings() using 8k
127 * pagesize.
128 */
129 .align 32
130kvmap:
131 brgez,pn %g4, kvmap_nonlinear
132 nop
133
134#ifdef CONFIG_DEBUG_PAGEALLOC
135 .globl kvmap_linear_patch
136kvmap_linear_patch:
137#endif
138 ba,pt %xcc, kvmap_load
139 xor %g2, %g4, %g5
140
141#ifdef CONFIG_DEBUG_PAGEALLOC
142 sethi %hi(swapper_pg_dir), %g5
143 or %g5, %lo(swapper_pg_dir), %g5
144 sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6
145 srlx %g6, 64 - PAGE_SHIFT, %g6
146 andn %g6, 0x3, %g6
147 lduw [%g5 + %g6], %g5
148 brz,pn %g5, longpath
149 sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6
150 srlx %g6, 64 - PAGE_SHIFT, %g6
151 sllx %g5, 11, %g5
152 andn %g6, 0x3, %g6
153 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
154 brz,pn %g5, longpath
155 sllx %g4, 64 - PMD_SHIFT, %g6
156 srlx %g6, 64 - PAGE_SHIFT, %g6
157 sllx %g5, 11, %g5
158 andn %g6, 0x7, %g6
159 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
160 brz,pn %g5, longpath
161 nop
162 ba,a,pt %xcc, kvmap_load
163#endif
164
165kvmap_nonlinear:
166 sethi %hi(MODULES_VADDR), %g5
167 cmp %g4, %g5
168 blu,pn %xcc, longpath
169 mov (VMALLOC_END >> 24), %g5
170 sllx %g5, 24, %g5
171 cmp %g4, %g5
172 bgeu,pn %xcc, longpath
173 nop
174
175kvmap_check_obp:
176 sethi %hi(LOW_OBP_ADDRESS), %g5
177 cmp %g4, %g5
178 blu,pn %xcc, kvmap_vmalloc_addr
179 mov 0x1, %g5
180 sllx %g5, 32, %g5
181 cmp %g4, %g5
182 blu,pn %xcc, kvmap_do_obp
183 nop
184
185kvmap_vmalloc_addr:
186 /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
187 ldxa [%g3 + %g6] ASI_N, %g5
188 brgez,pn %g5, longpath
189 nop
190
191kvmap_load:
192 /* PTE is valid, load into TLB and return from trap. */
193 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
194 retry
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index 425c60cfea19..a11910be1013 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -49,12 +49,6 @@ static void __iommu_flushall(struct pci_iommu *iommu)
49 49
50 /* Ensure completion of previous PIO writes. */ 50 /* Ensure completion of previous PIO writes. */
51 (void) pci_iommu_read(iommu->write_complete_reg); 51 (void) pci_iommu_read(iommu->write_complete_reg);
52
53 /* Now update everyone's flush point. */
54 for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
55 iommu->alloc_info[entry].flush =
56 iommu->alloc_info[entry].next;
57 }
58} 52}
59 53
60#define IOPTE_CONSISTENT(CTX) \ 54#define IOPTE_CONSISTENT(CTX) \
@@ -80,120 +74,117 @@ static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
80 iopte_val(*iopte) = val; 74 iopte_val(*iopte) = val;
81} 75}
82 76
83void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize) 77/* Based largely upon the ppc64 iommu allocator. */
78static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages)
84{ 79{
85 int i; 80 struct pci_iommu_arena *arena = &iommu->arena;
86 81 unsigned long n, i, start, end, limit;
87 tsbsize /= sizeof(iopte_t); 82 int pass;
88 83
89 for (i = 0; i < tsbsize; i++) 84 limit = arena->limit;
90 iopte_make_dummy(iommu, &iommu->page_table[i]); 85 start = arena->hint;
91} 86 pass = 0;
92 87
93static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages) 88again:
94{ 89 n = find_next_zero_bit(arena->map, limit, start);
95 iopte_t *iopte, *limit, *first; 90 end = n + npages;
96 unsigned long cnum, ent, flush_point; 91 if (unlikely(end >= limit)) {
97 92 if (likely(pass < 1)) {
98 cnum = 0; 93 limit = start;
99 while ((1UL << cnum) < npages) 94 start = 0;
100 cnum++; 95 __iommu_flushall(iommu);
101 iopte = (iommu->page_table + 96 pass++;
102 (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS))); 97 goto again;
103 98 } else {
104 if (cnum == 0) 99 /* Scanned the whole thing, give up. */
105 limit = (iommu->page_table + 100 return -1;
106 iommu->lowest_consistent_map);
107 else
108 limit = (iopte +
109 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
110
111 iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
112 flush_point = iommu->alloc_info[cnum].flush;
113
114 first = iopte;
115 for (;;) {
116 if (IOPTE_IS_DUMMY(iommu, iopte)) {
117 if ((iopte + (1 << cnum)) >= limit)
118 ent = 0;
119 else
120 ent = ent + 1;
121 iommu->alloc_info[cnum].next = ent;
122 if (ent == flush_point)
123 __iommu_flushall(iommu);
124 break;
125 } 101 }
126 iopte += (1 << cnum); 102 }
127 ent++; 103
128 if (iopte >= limit) { 104 for (i = n; i < end; i++) {
129 iopte = (iommu->page_table + 105 if (test_bit(i, arena->map)) {
130 (cnum << 106 start = i + 1;
131 (iommu->page_table_sz_bits - PBM_LOGCLUSTERS))); 107 goto again;
132 ent = 0;
133 } 108 }
134 if (ent == flush_point)
135 __iommu_flushall(iommu);
136 if (iopte == first)
137 goto bad;
138 } 109 }
139 110
140 /* I've got your streaming cluster right here buddy boy... */ 111 for (i = n; i < end; i++)
141 return iopte; 112 __set_bit(i, arena->map);
142 113
143bad: 114 arena->hint = end;
144 printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n", 115
145 npages); 116 return n;
146 return NULL;
147} 117}
148 118
149static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base, 119static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
150 unsigned long npages, unsigned long ctx)
151{ 120{
152 unsigned long cnum, ent; 121 unsigned long i;
153 122
154 cnum = 0; 123 for (i = base; i < (base + npages); i++)
155 while ((1UL << cnum) < npages) 124 __clear_bit(i, arena->map);
156 cnum++; 125}
157 126
158 ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits)) 127void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
159 >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits); 128{
129 unsigned long i, tsbbase, order, sz, num_tsb_entries;
130
131 num_tsb_entries = tsbsize / sizeof(iopte_t);
132
133 /* Setup initial software IOMMU state. */
134 spin_lock_init(&iommu->lock);
135 iommu->ctx_lowest_free = 1;
136 iommu->page_table_map_base = dma_offset;
137 iommu->dma_addr_mask = dma_addr_mask;
138
139 /* Allocate and initialize the free area map. */
140 sz = num_tsb_entries / 8;
141 sz = (sz + 7UL) & ~7UL;
142 iommu->arena.map = kmalloc(sz, GFP_KERNEL);
143 if (!iommu->arena.map) {
144 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
145 prom_halt();
146 }
147 memset(iommu->arena.map, 0, sz);
148 iommu->arena.limit = num_tsb_entries;
160 149
161 /* If the global flush might not have caught this entry, 150 /* Allocate and initialize the dummy page which we
162 * adjust the flush point such that we will flush before 151 * set inactive IO PTEs to point to.
163 * ever trying to reuse it.
164 */ 152 */
165#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y))) 153 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
166 if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush)) 154 if (!iommu->dummy_page) {
167 iommu->alloc_info[cnum].flush = ent; 155 prom_printf("PCI_IOMMU: Error, gfp(dummy_page) failed.\n");
168#undef between 156 prom_halt();
157 }
158 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
159 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
160
161 /* Now allocate and setup the IOMMU page table itself. */
162 order = get_order(tsbsize);
163 tsbbase = __get_free_pages(GFP_KERNEL, order);
164 if (!tsbbase) {
165 prom_printf("PCI_IOMMU: Error, gfp(tsb) failed.\n");
166 prom_halt();
167 }
168 iommu->page_table = (iopte_t *)tsbbase;
169
170 for (i = 0; i < num_tsb_entries; i++)
171 iopte_make_dummy(iommu, &iommu->page_table[i]);
169} 172}
170 173
171/* We allocate consistent mappings from the end of cluster zero. */ 174static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npages)
172static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
173{ 175{
174 iopte_t *iopte; 176 long entry;
175 177
176 iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)); 178 entry = pci_arena_alloc(iommu, npages);
177 while (iopte > iommu->page_table) { 179 if (unlikely(entry < 0))
178 iopte--; 180 return NULL;
179 if (IOPTE_IS_DUMMY(iommu, iopte)) {
180 unsigned long tmp = npages;
181 181
182 while (--tmp) { 182 return iommu->page_table + entry;
183 iopte--; 183}
184 if (!IOPTE_IS_DUMMY(iommu, iopte))
185 break;
186 }
187 if (tmp == 0) {
188 u32 entry = (iopte - iommu->page_table);
189 184
190 if (entry < iommu->lowest_consistent_map) 185static inline void free_npages(struct pci_iommu *iommu, dma_addr_t base, unsigned long npages)
191 iommu->lowest_consistent_map = entry; 186{
192 return iopte; 187 pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
193 }
194 }
195 }
196 return NULL;
197} 188}
198 189
199static int iommu_alloc_ctx(struct pci_iommu *iommu) 190static int iommu_alloc_ctx(struct pci_iommu *iommu)
@@ -233,7 +224,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
233 struct pcidev_cookie *pcp; 224 struct pcidev_cookie *pcp;
234 struct pci_iommu *iommu; 225 struct pci_iommu *iommu;
235 iopte_t *iopte; 226 iopte_t *iopte;
236 unsigned long flags, order, first_page, ctx; 227 unsigned long flags, order, first_page;
237 void *ret; 228 void *ret;
238 int npages; 229 int npages;
239 230
@@ -251,9 +242,10 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
251 iommu = pcp->pbm->iommu; 242 iommu = pcp->pbm->iommu;
252 243
253 spin_lock_irqsave(&iommu->lock, flags); 244 spin_lock_irqsave(&iommu->lock, flags);
254 iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT); 245 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
255 if (iopte == NULL) { 246 spin_unlock_irqrestore(&iommu->lock, flags);
256 spin_unlock_irqrestore(&iommu->lock, flags); 247
248 if (unlikely(iopte == NULL)) {
257 free_pages(first_page, order); 249 free_pages(first_page, order);
258 return NULL; 250 return NULL;
259 } 251 }
@@ -262,31 +254,15 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
262 ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); 254 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
263 ret = (void *) first_page; 255 ret = (void *) first_page;
264 npages = size >> IO_PAGE_SHIFT; 256 npages = size >> IO_PAGE_SHIFT;
265 ctx = 0;
266 if (iommu->iommu_ctxflush)
267 ctx = iommu_alloc_ctx(iommu);
268 first_page = __pa(first_page); 257 first_page = __pa(first_page);
269 while (npages--) { 258 while (npages--) {
270 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | 259 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
271 IOPTE_WRITE | 260 IOPTE_WRITE |
272 (first_page & IOPTE_PAGE)); 261 (first_page & IOPTE_PAGE));
273 iopte++; 262 iopte++;
274 first_page += IO_PAGE_SIZE; 263 first_page += IO_PAGE_SIZE;
275 } 264 }
276 265
277 {
278 int i;
279 u32 daddr = *dma_addrp;
280
281 npages = size >> IO_PAGE_SHIFT;
282 for (i = 0; i < npages; i++) {
283 pci_iommu_write(iommu->iommu_flush, daddr);
284 daddr += IO_PAGE_SIZE;
285 }
286 }
287
288 spin_unlock_irqrestore(&iommu->lock, flags);
289
290 return ret; 266 return ret;
291} 267}
292 268
@@ -296,7 +272,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
296 struct pcidev_cookie *pcp; 272 struct pcidev_cookie *pcp;
297 struct pci_iommu *iommu; 273 struct pci_iommu *iommu;
298 iopte_t *iopte; 274 iopte_t *iopte;
299 unsigned long flags, order, npages, i, ctx; 275 unsigned long flags, order, npages;
300 276
301 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 277 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
302 pcp = pdev->sysdata; 278 pcp = pdev->sysdata;
@@ -306,46 +282,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
306 282
307 spin_lock_irqsave(&iommu->lock, flags); 283 spin_lock_irqsave(&iommu->lock, flags);
308 284
309 if ((iopte - iommu->page_table) == 285 free_npages(iommu, dvma, npages);
310 iommu->lowest_consistent_map) {
311 iopte_t *walk = iopte + npages;
312 iopte_t *limit;
313
314 limit = (iommu->page_table +
315 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
316 while (walk < limit) {
317 if (!IOPTE_IS_DUMMY(iommu, walk))
318 break;
319 walk++;
320 }
321 iommu->lowest_consistent_map =
322 (walk - iommu->page_table);
323 }
324
325 /* Data for consistent mappings cannot enter the streaming
326 * buffers, so we only need to update the TSB. We flush
327 * the IOMMU here as well to prevent conflicts with the
328 * streaming mapping deferred tlb flush scheme.
329 */
330
331 ctx = 0;
332 if (iommu->iommu_ctxflush)
333 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
334
335 for (i = 0; i < npages; i++, iopte++)
336 iopte_make_dummy(iommu, iopte);
337
338 if (iommu->iommu_ctxflush) {
339 pci_iommu_write(iommu->iommu_ctxflush, ctx);
340 } else {
341 for (i = 0; i < npages; i++) {
342 u32 daddr = dvma + (i << IO_PAGE_SHIFT);
343
344 pci_iommu_write(iommu->iommu_flush, daddr);
345 }
346 }
347
348 iommu_free_ctx(iommu, ctx);
349 286
350 spin_unlock_irqrestore(&iommu->lock, flags); 287 spin_unlock_irqrestore(&iommu->lock, flags);
351 288
@@ -372,25 +309,27 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
372 iommu = pcp->pbm->iommu; 309 iommu = pcp->pbm->iommu;
373 strbuf = &pcp->pbm->stc; 310 strbuf = &pcp->pbm->stc;
374 311
375 if (direction == PCI_DMA_NONE) 312 if (unlikely(direction == PCI_DMA_NONE))
376 BUG(); 313 goto bad_no_ctx;
377 314
378 oaddr = (unsigned long)ptr; 315 oaddr = (unsigned long)ptr;
379 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 316 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
380 npages >>= IO_PAGE_SHIFT; 317 npages >>= IO_PAGE_SHIFT;
381 318
382 spin_lock_irqsave(&iommu->lock, flags); 319 spin_lock_irqsave(&iommu->lock, flags);
320 base = alloc_npages(iommu, npages);
321 ctx = 0;
322 if (iommu->iommu_ctxflush)
323 ctx = iommu_alloc_ctx(iommu);
324 spin_unlock_irqrestore(&iommu->lock, flags);
383 325
384 base = alloc_streaming_cluster(iommu, npages); 326 if (unlikely(!base))
385 if (base == NULL)
386 goto bad; 327 goto bad;
328
387 bus_addr = (iommu->page_table_map_base + 329 bus_addr = (iommu->page_table_map_base +
388 ((base - iommu->page_table) << IO_PAGE_SHIFT)); 330 ((base - iommu->page_table) << IO_PAGE_SHIFT));
389 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 331 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
390 base_paddr = __pa(oaddr & IO_PAGE_MASK); 332 base_paddr = __pa(oaddr & IO_PAGE_MASK);
391 ctx = 0;
392 if (iommu->iommu_ctxflush)
393 ctx = iommu_alloc_ctx(iommu);
394 if (strbuf->strbuf_enabled) 333 if (strbuf->strbuf_enabled)
395 iopte_protection = IOPTE_STREAMING(ctx); 334 iopte_protection = IOPTE_STREAMING(ctx);
396 else 335 else
@@ -401,12 +340,13 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
401 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE) 340 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
402 iopte_val(*base) = iopte_protection | base_paddr; 341 iopte_val(*base) = iopte_protection | base_paddr;
403 342
404 spin_unlock_irqrestore(&iommu->lock, flags);
405
406 return ret; 343 return ret;
407 344
408bad: 345bad:
409 spin_unlock_irqrestore(&iommu->lock, flags); 346 iommu_free_ctx(iommu, ctx);
347bad_no_ctx:
348 if (printk_ratelimit())
349 WARN_ON(1);
410 return PCI_DMA_ERROR_CODE; 350 return PCI_DMA_ERROR_CODE;
411} 351}
412 352
@@ -481,10 +421,13 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
481 struct pci_iommu *iommu; 421 struct pci_iommu *iommu;
482 struct pci_strbuf *strbuf; 422 struct pci_strbuf *strbuf;
483 iopte_t *base; 423 iopte_t *base;
484 unsigned long flags, npages, ctx; 424 unsigned long flags, npages, ctx, i;
485 425
486 if (direction == PCI_DMA_NONE) 426 if (unlikely(direction == PCI_DMA_NONE)) {
487 BUG(); 427 if (printk_ratelimit())
428 WARN_ON(1);
429 return;
430 }
488 431
489 pcp = pdev->sysdata; 432 pcp = pdev->sysdata;
490 iommu = pcp->pbm->iommu; 433 iommu = pcp->pbm->iommu;
@@ -510,13 +453,14 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
510 453
511 /* Step 1: Kick data out of streaming buffers if necessary. */ 454 /* Step 1: Kick data out of streaming buffers if necessary. */
512 if (strbuf->strbuf_enabled) 455 if (strbuf->strbuf_enabled)
513 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); 456 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx,
457 npages, direction);
514 458
515 /* Step 2: Clear out first TSB entry. */ 459 /* Step 2: Clear out TSB entries. */
516 iopte_make_dummy(iommu, base); 460 for (i = 0; i < npages; i++)
461 iopte_make_dummy(iommu, base + i);
517 462
518 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, 463 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
519 npages, ctx);
520 464
521 iommu_free_ctx(iommu, ctx); 465 iommu_free_ctx(iommu, ctx);
522 466
@@ -621,6 +565,8 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
621 pci_map_single(pdev, 565 pci_map_single(pdev,
622 (page_address(sglist->page) + sglist->offset), 566 (page_address(sglist->page) + sglist->offset),
623 sglist->length, direction); 567 sglist->length, direction);
568 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
569 return 0;
624 sglist->dma_length = sglist->length; 570 sglist->dma_length = sglist->length;
625 return 1; 571 return 1;
626 } 572 }
@@ -629,21 +575,29 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
629 iommu = pcp->pbm->iommu; 575 iommu = pcp->pbm->iommu;
630 strbuf = &pcp->pbm->stc; 576 strbuf = &pcp->pbm->stc;
631 577
632 if (direction == PCI_DMA_NONE) 578 if (unlikely(direction == PCI_DMA_NONE))
633 BUG(); 579 goto bad_no_ctx;
634 580
635 /* Step 1: Prepare scatter list. */ 581 /* Step 1: Prepare scatter list. */
636 582
637 npages = prepare_sg(sglist, nelems); 583 npages = prepare_sg(sglist, nelems);
638 584
639 /* Step 2: Allocate a cluster. */ 585 /* Step 2: Allocate a cluster and context, if necessary. */
640 586
641 spin_lock_irqsave(&iommu->lock, flags); 587 spin_lock_irqsave(&iommu->lock, flags);
642 588
643 base = alloc_streaming_cluster(iommu, npages); 589 base = alloc_npages(iommu, npages);
590 ctx = 0;
591 if (iommu->iommu_ctxflush)
592 ctx = iommu_alloc_ctx(iommu);
593
594 spin_unlock_irqrestore(&iommu->lock, flags);
595
644 if (base == NULL) 596 if (base == NULL)
645 goto bad; 597 goto bad;
646 dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT); 598
599 dma_base = iommu->page_table_map_base +
600 ((base - iommu->page_table) << IO_PAGE_SHIFT);
647 601
648 /* Step 3: Normalize DMA addresses. */ 602 /* Step 3: Normalize DMA addresses. */
649 used = nelems; 603 used = nelems;
@@ -656,30 +610,28 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
656 } 610 }
657 used = nelems - used; 611 used = nelems - used;
658 612
659 /* Step 4: Choose a context if necessary. */ 613 /* Step 4: Create the mappings. */
660 ctx = 0;
661 if (iommu->iommu_ctxflush)
662 ctx = iommu_alloc_ctx(iommu);
663
664 /* Step 5: Create the mappings. */
665 if (strbuf->strbuf_enabled) 614 if (strbuf->strbuf_enabled)
666 iopte_protection = IOPTE_STREAMING(ctx); 615 iopte_protection = IOPTE_STREAMING(ctx);
667 else 616 else
668 iopte_protection = IOPTE_CONSISTENT(ctx); 617 iopte_protection = IOPTE_CONSISTENT(ctx);
669 if (direction != PCI_DMA_TODEVICE) 618 if (direction != PCI_DMA_TODEVICE)
670 iopte_protection |= IOPTE_WRITE; 619 iopte_protection |= IOPTE_WRITE;
671 fill_sg (base, sglist, used, nelems, iopte_protection); 620
621 fill_sg(base, sglist, used, nelems, iopte_protection);
622
672#ifdef VERIFY_SG 623#ifdef VERIFY_SG
673 verify_sglist(sglist, nelems, base, npages); 624 verify_sglist(sglist, nelems, base, npages);
674#endif 625#endif
675 626
676 spin_unlock_irqrestore(&iommu->lock, flags);
677
678 return used; 627 return used;
679 628
680bad: 629bad:
681 spin_unlock_irqrestore(&iommu->lock, flags); 630 iommu_free_ctx(iommu, ctx);
682 return PCI_DMA_ERROR_CODE; 631bad_no_ctx:
632 if (printk_ratelimit())
633 WARN_ON(1);
634 return 0;
683} 635}
684 636
685/* Unmap a set of streaming mode DMA translations. */ 637/* Unmap a set of streaming mode DMA translations. */
@@ -692,8 +644,10 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
692 unsigned long flags, ctx, i, npages; 644 unsigned long flags, ctx, i, npages;
693 u32 bus_addr; 645 u32 bus_addr;
694 646
695 if (direction == PCI_DMA_NONE) 647 if (unlikely(direction == PCI_DMA_NONE)) {
696 BUG(); 648 if (printk_ratelimit())
649 WARN_ON(1);
650 }
697 651
698 pcp = pdev->sysdata; 652 pcp = pdev->sysdata;
699 iommu = pcp->pbm->iommu; 653 iommu = pcp->pbm->iommu;
@@ -705,7 +659,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
705 if (sglist[i].dma_length == 0) 659 if (sglist[i].dma_length == 0)
706 break; 660 break;
707 i--; 661 i--;
708 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT; 662 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
663 bus_addr) >> IO_PAGE_SHIFT;
709 664
710 base = iommu->page_table + 665 base = iommu->page_table +
711 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 666 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
@@ -726,11 +681,11 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
726 if (strbuf->strbuf_enabled) 681 if (strbuf->strbuf_enabled)
727 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); 682 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
728 683
729 /* Step 2: Clear out first TSB entry. */ 684 /* Step 2: Clear out the TSB entries. */
730 iopte_make_dummy(iommu, base); 685 for (i = 0; i < npages; i++)
686 iopte_make_dummy(iommu, base + i);
731 687
732 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, 688 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
733 npages, ctx);
734 689
735 iommu_free_ctx(iommu, ctx); 690 iommu_free_ctx(iommu, ctx);
736 691
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index 6ed1ef25e0ac..c03ed5f49d31 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -1207,13 +1207,9 @@ static void psycho_scan_bus(struct pci_controller_info *p)
1207static void psycho_iommu_init(struct pci_controller_info *p) 1207static void psycho_iommu_init(struct pci_controller_info *p)
1208{ 1208{
1209 struct pci_iommu *iommu = p->pbm_A.iommu; 1209 struct pci_iommu *iommu = p->pbm_A.iommu;
1210 unsigned long tsbbase, i; 1210 unsigned long i;
1211 u64 control; 1211 u64 control;
1212 1212
1213 /* Setup initial software IOMMU state. */
1214 spin_lock_init(&iommu->lock);
1215 iommu->ctx_lowest_free = 1;
1216
1217 /* Register addresses. */ 1213 /* Register addresses. */
1218 iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL; 1214 iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL;
1219 iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE; 1215 iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE;
@@ -1240,40 +1236,10 @@ static void psycho_iommu_init(struct pci_controller_info *p)
1240 /* Leave diag mode enabled for full-flushing done 1236 /* Leave diag mode enabled for full-flushing done
1241 * in pci_iommu.c 1237 * in pci_iommu.c
1242 */ 1238 */
1239 pci_iommu_table_init(iommu, IO_TSB_SIZE, 0xc0000000, 0xffffffff);
1243 1240
1244 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0); 1241 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE,
1245 if (!iommu->dummy_page) { 1242 __pa(iommu->page_table));
1246 prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
1247 prom_halt();
1248 }
1249 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
1250 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
1251
1252 /* Using assumed page size 8K with 128K entries we need 1MB iommu page
1253 * table (128K ioptes * 8 bytes per iopte). This is
1254 * page order 7 on UltraSparc.
1255 */
1256 tsbbase = __get_free_pages(GFP_KERNEL, get_order(IO_TSB_SIZE));
1257 if (!tsbbase) {
1258 prom_printf("PSYCHO_IOMMU: Error, gfp(tsb) failed.\n");
1259 prom_halt();
1260 }
1261 iommu->page_table = (iopte_t *)tsbbase;
1262 iommu->page_table_sz_bits = 17;
1263 iommu->page_table_map_base = 0xc0000000;
1264 iommu->dma_addr_mask = 0xffffffff;
1265 pci_iommu_table_init(iommu, IO_TSB_SIZE);
1266
1267 /* We start with no consistent mappings. */
1268 iommu->lowest_consistent_map =
1269 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
1270
1271 for (i = 0; i < PBM_NCLUSTERS; i++) {
1272 iommu->alloc_info[i].flush = 0;
1273 iommu->alloc_info[i].next = 0;
1274 }
1275
1276 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE, __pa(tsbbase));
1277 1243
1278 control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL); 1244 control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
1279 control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ); 1245 control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
@@ -1281,7 +1247,7 @@ static void psycho_iommu_init(struct pci_controller_info *p)
1281 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control); 1247 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
1282 1248
1283 /* If necessary, hook us up for starfire IRQ translations. */ 1249 /* If necessary, hook us up for starfire IRQ translations. */
1284 if(this_is_starfire) 1250 if (this_is_starfire)
1285 p->starfire_cookie = starfire_hookup(p->pbm_A.portid); 1251 p->starfire_cookie = starfire_hookup(p->pbm_A.portid);
1286 else 1252 else
1287 p->starfire_cookie = NULL; 1253 p->starfire_cookie = NULL;
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 0ee6bd5b9ac6..da8e1364194f 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -1267,13 +1267,9 @@ static void sabre_iommu_init(struct pci_controller_info *p,
1267 u32 dma_mask) 1267 u32 dma_mask)
1268{ 1268{
1269 struct pci_iommu *iommu = p->pbm_A.iommu; 1269 struct pci_iommu *iommu = p->pbm_A.iommu;
1270 unsigned long tsbbase, i, order; 1270 unsigned long i;
1271 u64 control; 1271 u64 control;
1272 1272
1273 /* Setup initial software IOMMU state. */
1274 spin_lock_init(&iommu->lock);
1275 iommu->ctx_lowest_free = 1;
1276
1277 /* Register addresses. */ 1273 /* Register addresses. */
1278 iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL; 1274 iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL;
1279 iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE; 1275 iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE;
@@ -1295,26 +1291,10 @@ static void sabre_iommu_init(struct pci_controller_info *p,
1295 /* Leave diag mode enabled for full-flushing done 1291 /* Leave diag mode enabled for full-flushing done
1296 * in pci_iommu.c 1292 * in pci_iommu.c
1297 */ 1293 */
1294 pci_iommu_table_init(iommu, tsbsize * 1024 * 8, dvma_offset, dma_mask);
1298 1295
1299 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0); 1296 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE,
1300 if (!iommu->dummy_page) { 1297 __pa(iommu->page_table));
1301 prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
1302 prom_halt();
1303 }
1304 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
1305 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
1306
1307 tsbbase = __get_free_pages(GFP_KERNEL, order = get_order(tsbsize * 1024 * 8));
1308 if (!tsbbase) {
1309 prom_printf("SABRE_IOMMU: Error, gfp(tsb) failed.\n");
1310 prom_halt();
1311 }
1312 iommu->page_table = (iopte_t *)tsbbase;
1313 iommu->page_table_map_base = dvma_offset;
1314 iommu->dma_addr_mask = dma_mask;
1315 pci_iommu_table_init(iommu, PAGE_SIZE << order);
1316
1317 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE, __pa(tsbbase));
1318 1298
1319 control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL); 1299 control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
1320 control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ); 1300 control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ);
@@ -1322,11 +1302,9 @@ static void sabre_iommu_init(struct pci_controller_info *p,
1322 switch(tsbsize) { 1302 switch(tsbsize) {
1323 case 64: 1303 case 64:
1324 control |= SABRE_IOMMU_TSBSZ_64K; 1304 control |= SABRE_IOMMU_TSBSZ_64K;
1325 iommu->page_table_sz_bits = 16;
1326 break; 1305 break;
1327 case 128: 1306 case 128:
1328 control |= SABRE_IOMMU_TSBSZ_128K; 1307 control |= SABRE_IOMMU_TSBSZ_128K;
1329 iommu->page_table_sz_bits = 17;
1330 break; 1308 break;
1331 default: 1309 default:
1332 prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize); 1310 prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
@@ -1334,15 +1312,6 @@ static void sabre_iommu_init(struct pci_controller_info *p,
1334 break; 1312 break;
1335 } 1313 }
1336 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control); 1314 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
1337
1338 /* We start with no consistent mappings. */
1339 iommu->lowest_consistent_map =
1340 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
1341
1342 for (i = 0; i < PBM_NCLUSTERS; i++) {
1343 iommu->alloc_info[i].flush = 0;
1344 iommu->alloc_info[i].next = 0;
1345 }
1346} 1315}
1347 1316
1348static void pbm_register_toplevel_resources(struct pci_controller_info *p, 1317static void pbm_register_toplevel_resources(struct pci_controller_info *p,
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index 331382e1a75d..d8c4e0919b4e 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -330,7 +330,7 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
330static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2) 330static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2)
331{ 331{
332 unsigned long sync_reg = (unsigned long) _arg2; 332 unsigned long sync_reg = (unsigned long) _arg2;
333 u64 mask = 1 << (__irq_ino(__irq(bucket)) & IMAP_INO); 333 u64 mask = 1UL << (__irq_ino(__irq(bucket)) & IMAP_INO);
334 u64 val; 334 u64 val;
335 int limit; 335 int limit;
336 336
@@ -1765,7 +1765,7 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
1765static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) 1765static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1766{ 1766{
1767 struct pci_iommu *iommu = pbm->iommu; 1767 struct pci_iommu *iommu = pbm->iommu;
1768 unsigned long tsbbase, i, tagbase, database, order; 1768 unsigned long i, tagbase, database;
1769 u32 vdma[2], dma_mask; 1769 u32 vdma[2], dma_mask;
1770 u64 control; 1770 u64 control;
1771 int err, tsbsize; 1771 int err, tsbsize;
@@ -1800,10 +1800,6 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1800 prom_halt(); 1800 prom_halt();
1801 }; 1801 };
1802 1802
1803 /* Setup initial software IOMMU state. */
1804 spin_lock_init(&iommu->lock);
1805 iommu->ctx_lowest_free = 1;
1806
1807 /* Register addresses, SCHIZO has iommu ctx flushing. */ 1803 /* Register addresses, SCHIZO has iommu ctx flushing. */
1808 iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; 1804 iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
1809 iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE; 1805 iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
@@ -1832,56 +1828,9 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1832 /* Leave diag mode enabled for full-flushing done 1828 /* Leave diag mode enabled for full-flushing done
1833 * in pci_iommu.c 1829 * in pci_iommu.c
1834 */ 1830 */
1831 pci_iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask);
1835 1832
1836 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0); 1833 schizo_write(iommu->iommu_tsbbase, __pa(iommu->page_table));
1837 if (!iommu->dummy_page) {
1838 prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
1839 prom_halt();
1840 }
1841 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
1842 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
1843
1844 /* Using assumed page size 8K with 128K entries we need 1MB iommu page
1845 * table (128K ioptes * 8 bytes per iopte). This is
1846 * page order 7 on UltraSparc.
1847 */
1848 order = get_order(tsbsize * 8 * 1024);
1849 tsbbase = __get_free_pages(GFP_KERNEL, order);
1850 if (!tsbbase) {
1851 prom_printf("%s: Error, gfp(tsb) failed.\n", pbm->name);
1852 prom_halt();
1853 }
1854
1855 iommu->page_table = (iopte_t *)tsbbase;
1856 iommu->page_table_map_base = vdma[0];
1857 iommu->dma_addr_mask = dma_mask;
1858 pci_iommu_table_init(iommu, PAGE_SIZE << order);
1859
1860 switch (tsbsize) {
1861 case 64:
1862 iommu->page_table_sz_bits = 16;
1863 break;
1864
1865 case 128:
1866 iommu->page_table_sz_bits = 17;
1867 break;
1868
1869 default:
1870 prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
1871 prom_halt();
1872 break;
1873 };
1874
1875 /* We start with no consistent mappings. */
1876 iommu->lowest_consistent_map =
1877 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
1878
1879 for (i = 0; i < PBM_NCLUSTERS; i++) {
1880 iommu->alloc_info[i].flush = 0;
1881 iommu->alloc_info[i].next = 0;
1882 }
1883
1884 schizo_write(iommu->iommu_tsbbase, __pa(tsbbase));
1885 1834
1886 control = schizo_read(iommu->iommu_control); 1835 control = schizo_read(iommu->iommu_control);
1887 control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ); 1836 control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ);
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
index 946cee0257ea..9e8362ea3104 100644
--- a/arch/sparc64/kernel/power.c
+++ b/arch/sparc64/kernel/power.c
@@ -17,6 +17,7 @@
17 17
18#include <asm/system.h> 18#include <asm/system.h>
19#include <asm/ebus.h> 19#include <asm/ebus.h>
20#include <asm/isa.h>
20#include <asm/auxio.h> 21#include <asm/auxio.h>
21 22
22#include <linux/unistd.h> 23#include <linux/unistd.h>
@@ -100,46 +101,83 @@ again:
100 return 0; 101 return 0;
101} 102}
102 103
103static int __init has_button_interrupt(struct linux_ebus_device *edev) 104static int __init has_button_interrupt(unsigned int irq, int prom_node)
104{ 105{
105 if (edev->irqs[0] == PCI_IRQ_NONE) 106 if (irq == PCI_IRQ_NONE)
106 return 0; 107 return 0;
107 if (!prom_node_has_property(edev->prom_node, "button")) 108 if (!prom_node_has_property(prom_node, "button"))
108 return 0; 109 return 0;
109 110
110 return 1; 111 return 1;
111} 112}
112 113
113void __init power_init(void) 114static int __init power_probe_ebus(struct resource **resp, unsigned int *irq_p, int *prom_node_p)
114{ 115{
115 struct linux_ebus *ebus; 116 struct linux_ebus *ebus;
116 struct linux_ebus_device *edev; 117 struct linux_ebus_device *edev;
118
119 for_each_ebus(ebus) {
120 for_each_ebusdev(edev, ebus) {
121 if (!strcmp(edev->prom_name, "power")) {
122 *resp = &edev->resource[0];
123 *irq_p = edev->irqs[0];
124 *prom_node_p = edev->prom_node;
125 return 0;
126 }
127 }
128 }
129 return -ENODEV;
130}
131
132static int __init power_probe_isa(struct resource **resp, unsigned int *irq_p, int *prom_node_p)
133{
134 struct sparc_isa_bridge *isa_bus;
135 struct sparc_isa_device *isa_dev;
136
137 for_each_isa(isa_bus) {
138 for_each_isadev(isa_dev, isa_bus) {
139 if (!strcmp(isa_dev->prom_name, "power")) {
140 *resp = &isa_dev->resource;
141 *irq_p = isa_dev->irq;
142 *prom_node_p = isa_dev->prom_node;
143 return 0;
144 }
145 }
146 }
147 return -ENODEV;
148}
149
150void __init power_init(void)
151{
152 struct resource *res = NULL;
153 unsigned int irq;
154 int prom_node;
117 static int invoked; 155 static int invoked;
118 156
119 if (invoked) 157 if (invoked)
120 return; 158 return;
121 invoked = 1; 159 invoked = 1;
122 160
123 for_each_ebus(ebus) { 161 if (!power_probe_ebus(&res, &irq, &prom_node))
124 for_each_ebusdev(edev, ebus) { 162 goto found;
125 if (!strcmp(edev->prom_name, "power")) 163
126 goto found; 164 if (!power_probe_isa(&res, &irq, &prom_node))
127 } 165 goto found;
128 } 166
129 return; 167 return;
130 168
131found: 169found:
132 power_reg = ioremap(edev->resource[0].start, 0x4); 170 power_reg = ioremap(res->start, 0x4);
133 printk("power: Control reg at %p ... ", power_reg); 171 printk("power: Control reg at %p ... ", power_reg);
134 poweroff_method = machine_halt; /* able to use the standard halt */ 172 poweroff_method = machine_halt; /* able to use the standard halt */
135 if (has_button_interrupt(edev)) { 173 if (has_button_interrupt(irq, prom_node)) {
136 if (kernel_thread(powerd, NULL, CLONE_FS) < 0) { 174 if (kernel_thread(powerd, NULL, CLONE_FS) < 0) {
137 printk("Failed to start power daemon.\n"); 175 printk("Failed to start power daemon.\n");
138 return; 176 return;
139 } 177 }
140 printk("powerd running.\n"); 178 printk("powerd running.\n");
141 179
142 if (request_irq(edev->irqs[0], 180 if (request_irq(irq,
143 power_handler, SA_SHIRQ, "power", NULL) < 0) 181 power_handler, SA_SHIRQ, "power", NULL) < 0)
144 printk("power: Error, cannot register IRQ handler.\n"); 182 printk("power: Error, cannot register IRQ handler.\n");
145 } else { 183 } else {
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index 23ad839d113f..774ecbb8a031 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -30,6 +30,8 @@
30#include <asm/psrcompat.h> 30#include <asm/psrcompat.h>
31#include <asm/visasm.h> 31#include <asm/visasm.h>
32#include <asm/spitfire.h> 32#include <asm/spitfire.h>
33#include <asm/page.h>
34#include <asm/cpudata.h>
33 35
34/* Returning from ptrace is a bit tricky because the syscall return 36/* Returning from ptrace is a bit tricky because the syscall return
35 * low level code assumes any value returned which is negative and 37 * low level code assumes any value returned which is negative and
@@ -128,20 +130,24 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
128 * is mapped to in the user's address space, we can skip the 130 * is mapped to in the user's address space, we can skip the
129 * D-cache flush. 131 * D-cache flush.
130 */ 132 */
131 if ((uaddr ^ kaddr) & (1UL << 13)) { 133 if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) {
132 unsigned long start = __pa(kaddr); 134 unsigned long start = __pa(kaddr);
133 unsigned long end = start + len; 135 unsigned long end = start + len;
136 unsigned long dcache_line_size;
137
138 dcache_line_size = local_cpu_data().dcache_line_size;
134 139
135 if (tlb_type == spitfire) { 140 if (tlb_type == spitfire) {
136 for (; start < end; start += 32) 141 for (; start < end; start += dcache_line_size)
137 spitfire_put_dcache_tag(va & 0x3fe0, 0x0); 142 spitfire_put_dcache_tag(start & 0x3fe0, 0x0);
138 } else { 143 } else {
139 for (; start < end; start += 32) 144 start &= ~(dcache_line_size - 1);
145 for (; start < end; start += dcache_line_size)
140 __asm__ __volatile__( 146 __asm__ __volatile__(
141 "stxa %%g0, [%0] %1\n\t" 147 "stxa %%g0, [%0] %1\n\t"
142 "membar #Sync" 148 "membar #Sync"
143 : /* no outputs */ 149 : /* no outputs */
144 : "r" (va), 150 : "r" (start),
145 "i" (ASI_DCACHE_INVALIDATE)); 151 "i" (ASI_DCACHE_INVALIDATE));
146 } 152 }
147 } 153 }
@@ -149,8 +155,11 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
149 if (write && tlb_type == spitfire) { 155 if (write && tlb_type == spitfire) {
150 unsigned long start = (unsigned long) kaddr; 156 unsigned long start = (unsigned long) kaddr;
151 unsigned long end = start + len; 157 unsigned long end = start + len;
158 unsigned long icache_line_size;
159
160 icache_line_size = local_cpu_data().icache_line_size;
152 161
153 for (; start < end; start += 32) 162 for (; start < end; start += icache_line_size)
154 flushi(start); 163 flushi(start);
155 } 164 }
156} 165}
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index fafd227735fa..090dcca00d2a 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -256,9 +256,8 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
256 brnz,pn %l3, kern_rtt 256 brnz,pn %l3, kern_rtt
257 mov PRIMARY_CONTEXT, %l7 257 mov PRIMARY_CONTEXT, %l7
258 ldxa [%l7 + %l7] ASI_DMMU, %l0 258 ldxa [%l7 + %l7] ASI_DMMU, %l0
259cplus_rtrap_insn_1: 259 sethi %hi(sparc64_kern_pri_nuc_bits), %l1
260 sethi %hi(0), %l1 260 ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
261 sllx %l1, 32, %l1
262 or %l0, %l1, %l0 261 or %l0, %l1, %l0
263 stxa %l0, [%l7] ASI_DMMU 262 stxa %l0, [%l7] ASI_DMMU
264 flush %g6 263 flush %g6
@@ -313,53 +312,36 @@ kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
313 wr %g1, FPRS_FEF, %fprs 312 wr %g1, FPRS_FEF, %fprs
314 ldx [%o1 + %o5], %g1 313 ldx [%o1 + %o5], %g1
315 add %g6, TI_XFSR, %o1 314 add %g6, TI_XFSR, %o1
316 membar #StoreLoad | #LoadLoad
317 sll %o0, 8, %o2 315 sll %o0, 8, %o2
318 add %g6, TI_FPREGS, %o3 316 add %g6, TI_FPREGS, %o3
319 brz,pn %l6, 1f 317 brz,pn %l6, 1f
320 add %g6, TI_FPREGS+0x40, %o4 318 add %g6, TI_FPREGS+0x40, %o4
321 319
320 membar #Sync
322 ldda [%o3 + %o2] ASI_BLK_P, %f0 321 ldda [%o3 + %o2] ASI_BLK_P, %f0
323 ldda [%o4 + %o2] ASI_BLK_P, %f16 322 ldda [%o4 + %o2] ASI_BLK_P, %f16
323 membar #Sync
3241: andcc %l2, FPRS_DU, %g0 3241: andcc %l2, FPRS_DU, %g0
325 be,pn %icc, 1f 325 be,pn %icc, 1f
326 wr %g1, 0, %gsr 326 wr %g1, 0, %gsr
327 add %o2, 0x80, %o2 327 add %o2, 0x80, %o2
328 membar #Sync
328 ldda [%o3 + %o2] ASI_BLK_P, %f32 329 ldda [%o3 + %o2] ASI_BLK_P, %f32
329 ldda [%o4 + %o2] ASI_BLK_P, %f48 330 ldda [%o4 + %o2] ASI_BLK_P, %f48
330
3311: membar #Sync 3311: membar #Sync
332 ldx [%o1 + %o5], %fsr 332 ldx [%o1 + %o5], %fsr
3332: stb %l5, [%g6 + TI_FPDEPTH] 3332: stb %l5, [%g6 + TI_FPDEPTH]
334 ba,pt %xcc, rt_continue 334 ba,pt %xcc, rt_continue
335 nop 335 nop
3365: wr %g0, FPRS_FEF, %fprs 3365: wr %g0, FPRS_FEF, %fprs
337 membar #StoreLoad | #LoadLoad
338 sll %o0, 8, %o2 337 sll %o0, 8, %o2
339 338
340 add %g6, TI_FPREGS+0x80, %o3 339 add %g6, TI_FPREGS+0x80, %o3
341 add %g6, TI_FPREGS+0xc0, %o4 340 add %g6, TI_FPREGS+0xc0, %o4
341 membar #Sync
342 ldda [%o3 + %o2] ASI_BLK_P, %f32 342 ldda [%o3 + %o2] ASI_BLK_P, %f32
343 ldda [%o4 + %o2] ASI_BLK_P, %f48 343 ldda [%o4 + %o2] ASI_BLK_P, %f48
344 membar #Sync 344 membar #Sync
345 wr %g0, FPRS_DU, %fprs 345 wr %g0, FPRS_DU, %fprs
346 ba,pt %xcc, rt_continue 346 ba,pt %xcc, rt_continue
347 stb %l5, [%g6 + TI_FPDEPTH] 347 stb %l5, [%g6 + TI_FPDEPTH]
348
349cplus_rinsn_1:
350 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
351
352 .globl cheetah_plus_patch_rtrap
353cheetah_plus_patch_rtrap:
354 /* We configure the dTLB512_0 for 4MB pages and the
355 * dTLB512_1 for 8K pages when in context zero.
356 */
357 sethi %hi(cplus_rinsn_1), %o0
358 sethi %hi(cplus_rtrap_insn_1), %o2
359 lduw [%o0 + %lo(cplus_rinsn_1)], %o1
360 or %o2, %lo(cplus_rtrap_insn_1), %o2
361 stw %o1, [%o2]
362 flush %o2
363
364 retl
365 nop
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index ddbed3341a23..c1f34237cdf2 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -187,17 +187,13 @@ int prom_callback(long *args)
187 } 187 }
188 188
189 if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) { 189 if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
190 unsigned long kernel_pctx = 0; 190 extern unsigned long sparc64_kern_pri_context;
191
192 if (tlb_type == cheetah_plus)
193 kernel_pctx |= (CTX_CHEETAH_PLUS_NUC |
194 CTX_CHEETAH_PLUS_CTX0);
195 191
196 /* Spitfire Errata #32 workaround */ 192 /* Spitfire Errata #32 workaround */
197 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 193 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
198 "flush %%g6" 194 "flush %%g6"
199 : /* No outputs */ 195 : /* No outputs */
200 : "r" (kernel_pctx), 196 : "r" (sparc64_kern_pri_context),
201 "r" (PRIMARY_CONTEXT), 197 "r" (PRIMARY_CONTEXT),
202 "i" (ASI_DMMU)); 198 "i" (ASI_DMMU));
203 199
@@ -464,8 +460,6 @@ static void __init boot_flags_init(char *commands)
464 } 460 }
465} 461}
466 462
467extern int prom_probe_memory(void);
468extern unsigned long start, end;
469extern void panic_setup(char *, int *); 463extern void panic_setup(char *, int *);
470 464
471extern unsigned short root_flags; 465extern unsigned short root_flags;
@@ -492,13 +486,8 @@ void register_prom_callbacks(void)
492 "' linux-.soft2 to .soft2"); 486 "' linux-.soft2 to .soft2");
493} 487}
494 488
495extern void paging_init(void);
496
497void __init setup_arch(char **cmdline_p) 489void __init setup_arch(char **cmdline_p)
498{ 490{
499 unsigned long highest_paddr;
500 int i;
501
502 /* Initialize PROM console and command line. */ 491 /* Initialize PROM console and command line. */
503 *cmdline_p = prom_getbootargs(); 492 *cmdline_p = prom_getbootargs();
504 strcpy(saved_command_line, *cmdline_p); 493 strcpy(saved_command_line, *cmdline_p);
@@ -517,40 +506,6 @@ void __init setup_arch(char **cmdline_p)
517 boot_flags_init(*cmdline_p); 506 boot_flags_init(*cmdline_p);
518 507
519 idprom_init(); 508 idprom_init();
520 (void) prom_probe_memory();
521
522 /* In paging_init() we tip off this value to see if we need
523 * to change init_mm.pgd to point to the real alias mapping.
524 */
525 phys_base = 0xffffffffffffffffUL;
526 highest_paddr = 0UL;
527 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
528 unsigned long top;
529
530 if (sp_banks[i].base_addr < phys_base)
531 phys_base = sp_banks[i].base_addr;
532 top = sp_banks[i].base_addr +
533 sp_banks[i].num_bytes;
534 if (highest_paddr < top)
535 highest_paddr = top;
536 }
537 pfn_base = phys_base >> PAGE_SHIFT;
538
539 switch (tlb_type) {
540 default:
541 case spitfire:
542 kern_base = spitfire_get_itlb_data(sparc64_highest_locked_tlbent());
543 kern_base &= _PAGE_PADDR_SF;
544 break;
545
546 case cheetah:
547 case cheetah_plus:
548 kern_base = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
549 kern_base &= _PAGE_PADDR;
550 break;
551 };
552
553 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
554 509
555 if (!root_flags) 510 if (!root_flags)
556 root_mountflags &= ~MS_RDONLY; 511 root_mountflags &= ~MS_RDONLY;
@@ -625,6 +580,9 @@ extern void smp_info(struct seq_file *);
625extern void smp_bogo(struct seq_file *); 580extern void smp_bogo(struct seq_file *);
626extern void mmu_info(struct seq_file *); 581extern void mmu_info(struct seq_file *);
627 582
583unsigned int dcache_parity_tl1_occurred;
584unsigned int icache_parity_tl1_occurred;
585
628static int show_cpuinfo(struct seq_file *m, void *__unused) 586static int show_cpuinfo(struct seq_file *m, void *__unused)
629{ 587{
630 seq_printf(m, 588 seq_printf(m,
@@ -635,6 +593,8 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
635 "type\t\t: sun4u\n" 593 "type\t\t: sun4u\n"
636 "ncpus probed\t: %ld\n" 594 "ncpus probed\t: %ld\n"
637 "ncpus active\t: %ld\n" 595 "ncpus active\t: %ld\n"
596 "D$ parity tl1\t: %u\n"
597 "I$ parity tl1\t: %u\n"
638#ifndef CONFIG_SMP 598#ifndef CONFIG_SMP
639 "Cpu0Bogo\t: %lu.%02lu\n" 599 "Cpu0Bogo\t: %lu.%02lu\n"
640 "Cpu0ClkTck\t: %016lx\n" 600 "Cpu0ClkTck\t: %016lx\n"
@@ -647,7 +607,9 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
647 (prom_prev >> 8) & 0xff, 607 (prom_prev >> 8) & 0xff,
648 prom_prev & 0xff, 608 prom_prev & 0xff,
649 (long)num_possible_cpus(), 609 (long)num_possible_cpus(),
650 (long)num_online_cpus() 610 (long)num_online_cpus(),
611 dcache_parity_tl1_occurred,
612 icache_parity_tl1_occurred
651#ifndef CONFIG_SMP 613#ifndef CONFIG_SMP
652 , cpu_data(0).udelay_val/(500000/HZ), 614 , cpu_data(0).udelay_val/(500000/HZ),
653 (cpu_data(0).udelay_val/(5000/HZ)) % 100, 615 (cpu_data(0).udelay_val/(5000/HZ)) % 100,
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index b4fc6a5462b2..b137fd63f5e1 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -93,6 +93,27 @@ void __init smp_store_cpu_info(int id)
93 cpu_data(id).pte_cache[1] = NULL; 93 cpu_data(id).pte_cache[1] = NULL;
94 cpu_data(id).pgd_cache = NULL; 94 cpu_data(id).pgd_cache = NULL;
95 cpu_data(id).idle_volume = 1; 95 cpu_data(id).idle_volume = 1;
96
97 cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
98 16 * 1024);
99 cpu_data(id).dcache_line_size =
100 prom_getintdefault(cpu_node, "dcache-line-size", 32);
101 cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
102 16 * 1024);
103 cpu_data(id).icache_line_size =
104 prom_getintdefault(cpu_node, "icache-line-size", 32);
105 cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
106 4 * 1024 * 1024);
107 cpu_data(id).ecache_line_size =
108 prom_getintdefault(cpu_node, "ecache-line-size", 64);
109 printk("CPU[%d]: Caches "
110 "D[sz(%d):line_sz(%d)] "
111 "I[sz(%d):line_sz(%d)] "
112 "E[sz(%d):line_sz(%d)]\n",
113 id,
114 cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
115 cpu_data(id).icache_size, cpu_data(id).icache_line_size,
116 cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
96} 117}
97 118
98static void smp_setup_percpu_timer(void); 119static void smp_setup_percpu_timer(void);
@@ -980,13 +1001,6 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
980 preempt_enable(); 1001 preempt_enable();
981} 1002}
982 1003
983extern unsigned long xcall_promstop;
984
985void smp_promstop_others(void)
986{
987 smp_cross_call(&xcall_promstop, 0, 0, 0);
988}
989
990#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier 1004#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
991#define prof_counter(__cpu) cpu_data(__cpu).counter 1005#define prof_counter(__cpu) cpu_data(__cpu).counter
992 1006
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index cbb5e59824e5..fb7a5370dbfc 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -163,9 +163,6 @@ EXPORT_SYMBOL(atomic64_add);
163EXPORT_SYMBOL(atomic64_add_ret); 163EXPORT_SYMBOL(atomic64_add_ret);
164EXPORT_SYMBOL(atomic64_sub); 164EXPORT_SYMBOL(atomic64_sub);
165EXPORT_SYMBOL(atomic64_sub_ret); 165EXPORT_SYMBOL(atomic64_sub_ret);
166#ifdef CONFIG_SMP
167EXPORT_SYMBOL(_atomic_dec_and_lock);
168#endif
169 166
170/* Atomic bit operations. */ 167/* Atomic bit operations. */
171EXPORT_SYMBOL(test_and_set_bit); 168EXPORT_SYMBOL(test_and_set_bit);
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S
index 5f9e4fae612e..9cd272ac3ac1 100644
--- a/arch/sparc64/kernel/sys32.S
+++ b/arch/sparc64/kernel/sys32.S
@@ -157,173 +157,199 @@ sys32_socketcall: /* %o0=call, %o1=args */
157 or %g2, %lo(__socketcall_table_begin), %g2 157 or %g2, %lo(__socketcall_table_begin), %g2
158 jmpl %g2 + %o0, %g0 158 jmpl %g2 + %o0, %g0
159 nop 159 nop
160do_einval:
161 retl
162 mov -EINVAL, %o0
160 163
161 /* Each entry is exactly 32 bytes. */
162 .align 32 164 .align 32
163__socketcall_table_begin: 165__socketcall_table_begin:
166
167 /* Each entry is exactly 32 bytes. */
164do_sys_socket: /* sys_socket(int, int, int) */ 168do_sys_socket: /* sys_socket(int, int, int) */
165 ldswa [%o1 + 0x0] %asi, %o0 1691: ldswa [%o1 + 0x0] %asi, %o0
166 sethi %hi(sys_socket), %g1 170 sethi %hi(sys_socket), %g1
167 ldswa [%o1 + 0x8] %asi, %o2 1712: ldswa [%o1 + 0x8] %asi, %o2
168 jmpl %g1 + %lo(sys_socket), %g0 172 jmpl %g1 + %lo(sys_socket), %g0
169 ldswa [%o1 + 0x4] %asi, %o1 1733: ldswa [%o1 + 0x4] %asi, %o1
170 nop 174 nop
171 nop 175 nop
172 nop 176 nop
173do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */ 177do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
174 ldswa [%o1 + 0x0] %asi, %o0 1784: ldswa [%o1 + 0x0] %asi, %o0
175 sethi %hi(sys_bind), %g1 179 sethi %hi(sys_bind), %g1
176 ldswa [%o1 + 0x8] %asi, %o2 1805: ldswa [%o1 + 0x8] %asi, %o2
177 jmpl %g1 + %lo(sys_bind), %g0 181 jmpl %g1 + %lo(sys_bind), %g0
178 lduwa [%o1 + 0x4] %asi, %o1 1826: lduwa [%o1 + 0x4] %asi, %o1
179 nop 183 nop
180 nop 184 nop
181 nop 185 nop
182do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */ 186do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
183 ldswa [%o1 + 0x0] %asi, %o0 1877: ldswa [%o1 + 0x0] %asi, %o0
184 sethi %hi(sys_connect), %g1 188 sethi %hi(sys_connect), %g1
185 ldswa [%o1 + 0x8] %asi, %o2 1898: ldswa [%o1 + 0x8] %asi, %o2
186 jmpl %g1 + %lo(sys_connect), %g0 190 jmpl %g1 + %lo(sys_connect), %g0
187 lduwa [%o1 + 0x4] %asi, %o1 1919: lduwa [%o1 + 0x4] %asi, %o1
188 nop 192 nop
189 nop 193 nop
190 nop 194 nop
191do_sys_listen: /* sys_listen(int, int) */ 195do_sys_listen: /* sys_listen(int, int) */
192 ldswa [%o1 + 0x0] %asi, %o0 19610: ldswa [%o1 + 0x0] %asi, %o0
193 sethi %hi(sys_listen), %g1 197 sethi %hi(sys_listen), %g1
194 jmpl %g1 + %lo(sys_listen), %g0 198 jmpl %g1 + %lo(sys_listen), %g0
195 ldswa [%o1 + 0x4] %asi, %o1 19911: ldswa [%o1 + 0x4] %asi, %o1
196 nop 200 nop
197 nop 201 nop
198 nop 202 nop
199 nop 203 nop
200do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */ 204do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
201 ldswa [%o1 + 0x0] %asi, %o0 20512: ldswa [%o1 + 0x0] %asi, %o0
202 sethi %hi(sys_accept), %g1 206 sethi %hi(sys_accept), %g1
203 lduwa [%o1 + 0x8] %asi, %o2 20713: lduwa [%o1 + 0x8] %asi, %o2
204 jmpl %g1 + %lo(sys_accept), %g0 208 jmpl %g1 + %lo(sys_accept), %g0
205 lduwa [%o1 + 0x4] %asi, %o1 20914: lduwa [%o1 + 0x4] %asi, %o1
206 nop 210 nop
207 nop 211 nop
208 nop 212 nop
209do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */ 213do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
210 ldswa [%o1 + 0x0] %asi, %o0 21415: ldswa [%o1 + 0x0] %asi, %o0
211 sethi %hi(sys_getsockname), %g1 215 sethi %hi(sys_getsockname), %g1
212 lduwa [%o1 + 0x8] %asi, %o2 21616: lduwa [%o1 + 0x8] %asi, %o2
213 jmpl %g1 + %lo(sys_getsockname), %g0 217 jmpl %g1 + %lo(sys_getsockname), %g0
214 lduwa [%o1 + 0x4] %asi, %o1 21817: lduwa [%o1 + 0x4] %asi, %o1
215 nop 219 nop
216 nop 220 nop
217 nop 221 nop
218do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */ 222do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
219 ldswa [%o1 + 0x0] %asi, %o0 22318: ldswa [%o1 + 0x0] %asi, %o0
220 sethi %hi(sys_getpeername), %g1 224 sethi %hi(sys_getpeername), %g1
221 lduwa [%o1 + 0x8] %asi, %o2 22519: lduwa [%o1 + 0x8] %asi, %o2
222 jmpl %g1 + %lo(sys_getpeername), %g0 226 jmpl %g1 + %lo(sys_getpeername), %g0
223 lduwa [%o1 + 0x4] %asi, %o1 22720: lduwa [%o1 + 0x4] %asi, %o1
224 nop 228 nop
225 nop 229 nop
226 nop 230 nop
227do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */ 231do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
228 ldswa [%o1 + 0x0] %asi, %o0 23221: ldswa [%o1 + 0x0] %asi, %o0
229 sethi %hi(sys_socketpair), %g1 233 sethi %hi(sys_socketpair), %g1
230 ldswa [%o1 + 0x8] %asi, %o2 23422: ldswa [%o1 + 0x8] %asi, %o2
231 lduwa [%o1 + 0xc] %asi, %o3 23523: lduwa [%o1 + 0xc] %asi, %o3
232 jmpl %g1 + %lo(sys_socketpair), %g0 236 jmpl %g1 + %lo(sys_socketpair), %g0
233 ldswa [%o1 + 0x4] %asi, %o1 23724: ldswa [%o1 + 0x4] %asi, %o1
234 nop 238 nop
235 nop 239 nop
236do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */ 240do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
237 ldswa [%o1 + 0x0] %asi, %o0 24125: ldswa [%o1 + 0x0] %asi, %o0
238 sethi %hi(sys_send), %g1 242 sethi %hi(sys_send), %g1
239 lduwa [%o1 + 0x8] %asi, %o2 24326: lduwa [%o1 + 0x8] %asi, %o2
240 lduwa [%o1 + 0xc] %asi, %o3 24427: lduwa [%o1 + 0xc] %asi, %o3
241 jmpl %g1 + %lo(sys_send), %g0 245 jmpl %g1 + %lo(sys_send), %g0
242 lduwa [%o1 + 0x4] %asi, %o1 24628: lduwa [%o1 + 0x4] %asi, %o1
243 nop 247 nop
244 nop 248 nop
245do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */ 249do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
246 ldswa [%o1 + 0x0] %asi, %o0 25029: ldswa [%o1 + 0x0] %asi, %o0
247 sethi %hi(sys_recv), %g1 251 sethi %hi(sys_recv), %g1
248 lduwa [%o1 + 0x8] %asi, %o2 25230: lduwa [%o1 + 0x8] %asi, %o2
249 lduwa [%o1 + 0xc] %asi, %o3 25331: lduwa [%o1 + 0xc] %asi, %o3
250 jmpl %g1 + %lo(sys_recv), %g0 254 jmpl %g1 + %lo(sys_recv), %g0
251 lduwa [%o1 + 0x4] %asi, %o1 25532: lduwa [%o1 + 0x4] %asi, %o1
252 nop 256 nop
253 nop 257 nop
254do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */ 258do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
255 ldswa [%o1 + 0x0] %asi, %o0 25933: ldswa [%o1 + 0x0] %asi, %o0
256 sethi %hi(sys_sendto), %g1 260 sethi %hi(sys_sendto), %g1
257 lduwa [%o1 + 0x8] %asi, %o2 26134: lduwa [%o1 + 0x8] %asi, %o2
258 lduwa [%o1 + 0xc] %asi, %o3 26235: lduwa [%o1 + 0xc] %asi, %o3
259 lduwa [%o1 + 0x10] %asi, %o4 26336: lduwa [%o1 + 0x10] %asi, %o4
260 ldswa [%o1 + 0x14] %asi, %o5 26437: ldswa [%o1 + 0x14] %asi, %o5
261 jmpl %g1 + %lo(sys_sendto), %g0 265 jmpl %g1 + %lo(sys_sendto), %g0
262 lduwa [%o1 + 0x4] %asi, %o1 26638: lduwa [%o1 + 0x4] %asi, %o1
263do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */ 267do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
264 ldswa [%o1 + 0x0] %asi, %o0 26839: ldswa [%o1 + 0x0] %asi, %o0
265 sethi %hi(sys_recvfrom), %g1 269 sethi %hi(sys_recvfrom), %g1
266 lduwa [%o1 + 0x8] %asi, %o2 27040: lduwa [%o1 + 0x8] %asi, %o2
267 lduwa [%o1 + 0xc] %asi, %o3 27141: lduwa [%o1 + 0xc] %asi, %o3
268 lduwa [%o1 + 0x10] %asi, %o4 27242: lduwa [%o1 + 0x10] %asi, %o4
269 lduwa [%o1 + 0x14] %asi, %o5 27343: lduwa [%o1 + 0x14] %asi, %o5
270 jmpl %g1 + %lo(sys_recvfrom), %g0 274 jmpl %g1 + %lo(sys_recvfrom), %g0
271 lduwa [%o1 + 0x4] %asi, %o1 27544: lduwa [%o1 + 0x4] %asi, %o1
272do_sys_shutdown: /* sys_shutdown(int, int) */ 276do_sys_shutdown: /* sys_shutdown(int, int) */
273 ldswa [%o1 + 0x0] %asi, %o0 27745: ldswa [%o1 + 0x0] %asi, %o0
274 sethi %hi(sys_shutdown), %g1 278 sethi %hi(sys_shutdown), %g1
275 jmpl %g1 + %lo(sys_shutdown), %g0 279 jmpl %g1 + %lo(sys_shutdown), %g0
276 ldswa [%o1 + 0x4] %asi, %o1 28046: ldswa [%o1 + 0x4] %asi, %o1
277 nop 281 nop
278 nop 282 nop
279 nop 283 nop
280 nop 284 nop
281do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */ 285do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */
282 ldswa [%o1 + 0x0] %asi, %o0 28647: ldswa [%o1 + 0x0] %asi, %o0
283 sethi %hi(compat_sys_setsockopt), %g1 287 sethi %hi(compat_sys_setsockopt), %g1
284 ldswa [%o1 + 0x8] %asi, %o2 28848: ldswa [%o1 + 0x8] %asi, %o2
285 lduwa [%o1 + 0xc] %asi, %o3 28949: lduwa [%o1 + 0xc] %asi, %o3
286 ldswa [%o1 + 0x10] %asi, %o4 29050: ldswa [%o1 + 0x10] %asi, %o4
287 jmpl %g1 + %lo(compat_sys_setsockopt), %g0 291 jmpl %g1 + %lo(compat_sys_setsockopt), %g0
288 ldswa [%o1 + 0x4] %asi, %o1 29251: ldswa [%o1 + 0x4] %asi, %o1
289 nop 293 nop
290do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */ 294do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */
291 ldswa [%o1 + 0x0] %asi, %o0 29552: ldswa [%o1 + 0x0] %asi, %o0
292 sethi %hi(compat_sys_getsockopt), %g1 296 sethi %hi(compat_sys_getsockopt), %g1
293 ldswa [%o1 + 0x8] %asi, %o2 29753: ldswa [%o1 + 0x8] %asi, %o2
294 lduwa [%o1 + 0xc] %asi, %o3 29854: lduwa [%o1 + 0xc] %asi, %o3
295 lduwa [%o1 + 0x10] %asi, %o4 29955: lduwa [%o1 + 0x10] %asi, %o4
296 jmpl %g1 + %lo(compat_sys_getsockopt), %g0 300 jmpl %g1 + %lo(compat_sys_getsockopt), %g0
297 ldswa [%o1 + 0x4] %asi, %o1 30156: ldswa [%o1 + 0x4] %asi, %o1
298 nop 302 nop
299do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */ 303do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
300 ldswa [%o1 + 0x0] %asi, %o0 30457: ldswa [%o1 + 0x0] %asi, %o0
301 sethi %hi(compat_sys_sendmsg), %g1 305 sethi %hi(compat_sys_sendmsg), %g1
302 lduwa [%o1 + 0x8] %asi, %o2 30658: lduwa [%o1 + 0x8] %asi, %o2
303 jmpl %g1 + %lo(compat_sys_sendmsg), %g0 307 jmpl %g1 + %lo(compat_sys_sendmsg), %g0
304 lduwa [%o1 + 0x4] %asi, %o1 30859: lduwa [%o1 + 0x4] %asi, %o1
305 nop 309 nop
306 nop 310 nop
307 nop 311 nop
308do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */ 312do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
309 ldswa [%o1 + 0x0] %asi, %o0 31360: ldswa [%o1 + 0x0] %asi, %o0
310 sethi %hi(compat_sys_recvmsg), %g1 314 sethi %hi(compat_sys_recvmsg), %g1
311 lduwa [%o1 + 0x8] %asi, %o2 31561: lduwa [%o1 + 0x8] %asi, %o2
312 jmpl %g1 + %lo(compat_sys_recvmsg), %g0 316 jmpl %g1 + %lo(compat_sys_recvmsg), %g0
313 lduwa [%o1 + 0x4] %asi, %o1 31762: lduwa [%o1 + 0x4] %asi, %o1
314 nop 318 nop
315 nop 319 nop
316 nop 320 nop
317__socketcall_table_end:
318
319do_einval:
320 retl
321 mov -EINVAL, %o0
322do_efault:
323 retl
324 mov -EFAULT, %o0
325 321
326 .section __ex_table 322 .section __ex_table
327 .align 4 323 .align 4
328 .word __socketcall_table_begin, 0, __socketcall_table_end, do_efault 324 .word 1b, __retl_efault, 2b, __retl_efault
325 .word 3b, __retl_efault, 4b, __retl_efault
326 .word 5b, __retl_efault, 6b, __retl_efault
327 .word 7b, __retl_efault, 8b, __retl_efault
328 .word 9b, __retl_efault, 10b, __retl_efault
329 .word 11b, __retl_efault, 12b, __retl_efault
330 .word 13b, __retl_efault, 14b, __retl_efault
331 .word 15b, __retl_efault, 16b, __retl_efault
332 .word 17b, __retl_efault, 18b, __retl_efault
333 .word 19b, __retl_efault, 20b, __retl_efault
334 .word 21b, __retl_efault, 22b, __retl_efault
335 .word 23b, __retl_efault, 24b, __retl_efault
336 .word 25b, __retl_efault, 26b, __retl_efault
337 .word 27b, __retl_efault, 28b, __retl_efault
338 .word 29b, __retl_efault, 30b, __retl_efault
339 .word 31b, __retl_efault, 32b, __retl_efault
340 .word 33b, __retl_efault, 34b, __retl_efault
341 .word 35b, __retl_efault, 36b, __retl_efault
342 .word 37b, __retl_efault, 38b, __retl_efault
343 .word 39b, __retl_efault, 40b, __retl_efault
344 .word 41b, __retl_efault, 42b, __retl_efault
345 .word 43b, __retl_efault, 44b, __retl_efault
346 .word 45b, __retl_efault, 46b, __retl_efault
347 .word 47b, __retl_efault, 48b, __retl_efault
348 .word 49b, __retl_efault, 50b, __retl_efault
349 .word 51b, __retl_efault, 52b, __retl_efault
350 .word 53b, __retl_efault, 54b, __retl_efault
351 .word 55b, __retl_efault, 56b, __retl_efault
352 .word 57b, __retl_efault, 58b, __retl_efault
353 .word 59b, __retl_efault, 60b, __retl_efault
354 .word 61b, __retl_efault, 62b, __retl_efault
329 .previous 355 .previous
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index 3a145fc39cf2..9478551cb020 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -119,8 +119,8 @@ startup_continue:
119 sethi %hi(itlb_load), %g2 119 sethi %hi(itlb_load), %g2
120 or %g2, %lo(itlb_load), %g2 120 or %g2, %lo(itlb_load), %g2
121 stx %g2, [%sp + 2047 + 128 + 0x18] 121 stx %g2, [%sp + 2047 + 128 + 0x18]
122 sethi %hi(mmu_ihandle_cache), %g2 122 sethi %hi(prom_mmu_ihandle_cache), %g2
123 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 123 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
124 stx %g2, [%sp + 2047 + 128 + 0x20] 124 stx %g2, [%sp + 2047 + 128 + 0x20]
125 sethi %hi(KERNBASE), %g2 125 sethi %hi(KERNBASE), %g2
126 stx %g2, [%sp + 2047 + 128 + 0x28] 126 stx %g2, [%sp + 2047 + 128 + 0x28]
@@ -156,8 +156,8 @@ startup_continue:
156 sethi %hi(itlb_load), %g2 156 sethi %hi(itlb_load), %g2
157 or %g2, %lo(itlb_load), %g2 157 or %g2, %lo(itlb_load), %g2
158 stx %g2, [%sp + 2047 + 128 + 0x18] 158 stx %g2, [%sp + 2047 + 128 + 0x18]
159 sethi %hi(mmu_ihandle_cache), %g2 159 sethi %hi(prom_mmu_ihandle_cache), %g2
160 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 160 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
161 stx %g2, [%sp + 2047 + 128 + 0x20] 161 stx %g2, [%sp + 2047 + 128 + 0x20]
162 sethi %hi(KERNBASE + 0x400000), %g2 162 sethi %hi(KERNBASE + 0x400000), %g2
163 stx %g2, [%sp + 2047 + 128 + 0x28] 163 stx %g2, [%sp + 2047 + 128 + 0x28]
@@ -190,8 +190,8 @@ do_dtlb:
190 sethi %hi(dtlb_load), %g2 190 sethi %hi(dtlb_load), %g2
191 or %g2, %lo(dtlb_load), %g2 191 or %g2, %lo(dtlb_load), %g2
192 stx %g2, [%sp + 2047 + 128 + 0x18] 192 stx %g2, [%sp + 2047 + 128 + 0x18]
193 sethi %hi(mmu_ihandle_cache), %g2 193 sethi %hi(prom_mmu_ihandle_cache), %g2
194 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 194 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
195 stx %g2, [%sp + 2047 + 128 + 0x20] 195 stx %g2, [%sp + 2047 + 128 + 0x20]
196 sethi %hi(KERNBASE), %g2 196 sethi %hi(KERNBASE), %g2
197 stx %g2, [%sp + 2047 + 128 + 0x28] 197 stx %g2, [%sp + 2047 + 128 + 0x28]
@@ -228,8 +228,8 @@ do_dtlb:
228 sethi %hi(dtlb_load), %g2 228 sethi %hi(dtlb_load), %g2
229 or %g2, %lo(dtlb_load), %g2 229 or %g2, %lo(dtlb_load), %g2
230 stx %g2, [%sp + 2047 + 128 + 0x18] 230 stx %g2, [%sp + 2047 + 128 + 0x18]
231 sethi %hi(mmu_ihandle_cache), %g2 231 sethi %hi(prom_mmu_ihandle_cache), %g2
232 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 232 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
233 stx %g2, [%sp + 2047 + 128 + 0x20] 233 stx %g2, [%sp + 2047 + 128 + 0x20]
234 sethi %hi(KERNBASE + 0x400000), %g2 234 sethi %hi(KERNBASE + 0x400000), %g2
235 stx %g2, [%sp + 2047 + 128 + 0x28] 235 stx %g2, [%sp + 2047 + 128 + 0x28]
@@ -336,20 +336,13 @@ do_unlock:
336 call init_irqwork_curcpu 336 call init_irqwork_curcpu
337 nop 337 nop
338 338
339 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f) 339 /* Start using proper page size encodings in ctx register. */
340 ba,pt %xcc, 2f 340 sethi %hi(sparc64_kern_pri_context), %g3
341 nop 341 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
342
3431: /* Start using proper page size encodings in ctx register. */
344 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
345 mov PRIMARY_CONTEXT, %g1 342 mov PRIMARY_CONTEXT, %g1
346 sllx %g3, 32, %g3 343 stxa %g2, [%g1] ASI_DMMU
347 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
348 or %g3, %g2, %g3
349 stxa %g3, [%g1] ASI_DMMU
350 membar #Sync 344 membar #Sync
351 345
3522:
353 rdpr %pstate, %o1 346 rdpr %pstate, %o1
354 or %o1, PSTATE_IE, %o1 347 or %o1, PSTATE_IE, %o1
355 wrpr %o1, 0, %pstate 348 wrpr %o1, 0, %pstate
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index b280b2ef674f..5570e7bb22bb 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -189,19 +189,18 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un
189 189
190 if (regs->tstate & TSTATE_PRIV) { 190 if (regs->tstate & TSTATE_PRIV) {
191 /* Test if this comes from uaccess places. */ 191 /* Test if this comes from uaccess places. */
192 unsigned long fixup; 192 const struct exception_table_entry *entry;
193 unsigned long g2 = regs->u_regs[UREG_G2];
194 193
195 if ((fixup = search_extables_range(regs->tpc, &g2))) { 194 entry = search_exception_tables(regs->tpc);
196 /* Ouch, somebody is trying ugly VM hole tricks on us... */ 195 if (entry) {
196 /* Ouch, somebody is trying VM hole tricks on us... */
197#ifdef DEBUG_EXCEPTIONS 197#ifdef DEBUG_EXCEPTIONS
198 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); 198 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
199 printk("EX_TABLE: insn<%016lx> fixup<%016lx> " 199 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
200 "g2<%016lx>\n", regs->tpc, fixup, g2); 200 regs->tpc, entry->fixup);
201#endif 201#endif
202 regs->tpc = fixup; 202 regs->tpc = entry->fixup;
203 regs->tnpc = regs->tpc + 4; 203 regs->tnpc = regs->tpc + 4;
204 regs->u_regs[UREG_G2] = g2;
205 return; 204 return;
206 } 205 }
207 /* Shit... */ 206 /* Shit... */
@@ -758,26 +757,12 @@ void __init cheetah_ecache_flush_init(void)
758 ecache_flush_size = (2 * largest_size); 757 ecache_flush_size = (2 * largest_size);
759 ecache_flush_linesize = smallest_linesize; 758 ecache_flush_linesize = smallest_linesize;
760 759
761 /* Discover a physically contiguous chunk of physical 760 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
762 * memory in 'sp_banks' of size ecache_flush_size calculated
763 * above. Store the physical base of this area at
764 * ecache_flush_physbase.
765 */
766 for (node = 0; ; node++) {
767 if (sp_banks[node].num_bytes == 0)
768 break;
769 if (sp_banks[node].num_bytes >= ecache_flush_size) {
770 ecache_flush_physbase = sp_banks[node].base_addr;
771 break;
772 }
773 }
774 761
775 /* Note: Zero would be a valid value of ecache_flush_physbase so 762 if (ecache_flush_physbase == ~0UL) {
776 * don't use that as the success test. :-)
777 */
778 if (sp_banks[node].num_bytes == 0) {
779 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte " 763 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
780 "contiguous physical memory.\n", ecache_flush_size); 764 "contiguous physical memory.\n",
765 ecache_flush_size);
781 prom_halt(); 766 prom_halt();
782 } 767 }
783 768
@@ -869,14 +854,19 @@ static void cheetah_flush_ecache_line(unsigned long physaddr)
869 */ 854 */
870static void __cheetah_flush_icache(void) 855static void __cheetah_flush_icache(void)
871{ 856{
872 unsigned long i; 857 unsigned int icache_size, icache_line_size;
858 unsigned long addr;
859
860 icache_size = local_cpu_data().icache_size;
861 icache_line_size = local_cpu_data().icache_line_size;
873 862
874 /* Clear the valid bits in all the tags. */ 863 /* Clear the valid bits in all the tags. */
875 for (i = 0; i < (1 << 15); i += (1 << 5)) { 864 for (addr = 0; addr < icache_size; addr += icache_line_size) {
876 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 865 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
877 "membar #Sync" 866 "membar #Sync"
878 : /* no outputs */ 867 : /* no outputs */
879 : "r" (i | (2 << 3)), "i" (ASI_IC_TAG)); 868 : "r" (addr | (2 << 3)),
869 "i" (ASI_IC_TAG));
880 } 870 }
881} 871}
882 872
@@ -904,13 +894,17 @@ static void cheetah_flush_icache(void)
904 894
905static void cheetah_flush_dcache(void) 895static void cheetah_flush_dcache(void)
906{ 896{
907 unsigned long i; 897 unsigned int dcache_size, dcache_line_size;
898 unsigned long addr;
908 899
909 for (i = 0; i < (1 << 16); i += (1 << 5)) { 900 dcache_size = local_cpu_data().dcache_size;
901 dcache_line_size = local_cpu_data().dcache_line_size;
902
903 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
910 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 904 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
911 "membar #Sync" 905 "membar #Sync"
912 : /* no outputs */ 906 : /* no outputs */
913 : "r" (i), "i" (ASI_DCACHE_TAG)); 907 : "r" (addr), "i" (ASI_DCACHE_TAG));
914 } 908 }
915} 909}
916 910
@@ -921,24 +915,29 @@ static void cheetah_flush_dcache(void)
921 */ 915 */
922static void cheetah_plus_zap_dcache_parity(void) 916static void cheetah_plus_zap_dcache_parity(void)
923{ 917{
924 unsigned long i; 918 unsigned int dcache_size, dcache_line_size;
919 unsigned long addr;
920
921 dcache_size = local_cpu_data().dcache_size;
922 dcache_line_size = local_cpu_data().dcache_line_size;
925 923
926 for (i = 0; i < (1 << 16); i += (1 << 5)) { 924 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
927 unsigned long tag = (i >> 14); 925 unsigned long tag = (addr >> 14);
928 unsigned long j; 926 unsigned long line;
929 927
930 __asm__ __volatile__("membar #Sync\n\t" 928 __asm__ __volatile__("membar #Sync\n\t"
931 "stxa %0, [%1] %2\n\t" 929 "stxa %0, [%1] %2\n\t"
932 "membar #Sync" 930 "membar #Sync"
933 : /* no outputs */ 931 : /* no outputs */
934 : "r" (tag), "r" (i), 932 : "r" (tag), "r" (addr),
935 "i" (ASI_DCACHE_UTAG)); 933 "i" (ASI_DCACHE_UTAG));
936 for (j = i; j < i + (1 << 5); j += (1 << 3)) 934 for (line = addr; line < addr + dcache_line_size; line += 8)
937 __asm__ __volatile__("membar #Sync\n\t" 935 __asm__ __volatile__("membar #Sync\n\t"
938 "stxa %%g0, [%0] %1\n\t" 936 "stxa %%g0, [%0] %1\n\t"
939 "membar #Sync" 937 "membar #Sync"
940 : /* no outputs */ 938 : /* no outputs */
941 : "r" (j), "i" (ASI_DCACHE_DATA)); 939 : "r" (line),
940 "i" (ASI_DCACHE_DATA));
942 } 941 }
943} 942}
944 943
@@ -1332,16 +1331,12 @@ static int cheetah_fix_ce(unsigned long physaddr)
1332/* Return non-zero if PADDR is a valid physical memory address. */ 1331/* Return non-zero if PADDR is a valid physical memory address. */
1333static int cheetah_check_main_memory(unsigned long paddr) 1332static int cheetah_check_main_memory(unsigned long paddr)
1334{ 1333{
1335 int i; 1334 unsigned long vaddr = PAGE_OFFSET + paddr;
1336 1335
1337 for (i = 0; ; i++) { 1336 if (vaddr > (unsigned long) high_memory)
1338 if (sp_banks[i].num_bytes == 0) 1337 return 0;
1339 break; 1338
1340 if (paddr >= sp_banks[i].base_addr && 1339 return kern_addr_valid(vaddr);
1341 paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
1342 return 1;
1343 }
1344 return 0;
1345} 1340}
1346 1341
1347void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) 1342void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
@@ -1596,10 +1591,10 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
1596 /* OK, usermode access. */ 1591 /* OK, usermode access. */
1597 recoverable = 1; 1592 recoverable = 1;
1598 } else { 1593 } else {
1599 unsigned long g2 = regs->u_regs[UREG_G2]; 1594 const struct exception_table_entry *entry;
1600 unsigned long fixup = search_extables_range(regs->tpc, &g2);
1601 1595
1602 if (fixup != 0UL) { 1596 entry = search_exception_tables(regs->tpc);
1597 if (entry) {
1603 /* OK, kernel access to userspace. */ 1598 /* OK, kernel access to userspace. */
1604 recoverable = 1; 1599 recoverable = 1;
1605 1600
@@ -1618,9 +1613,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
1618 * recoverable condition. 1613 * recoverable condition.
1619 */ 1614 */
1620 if (recoverable) { 1615 if (recoverable) {
1621 regs->tpc = fixup; 1616 regs->tpc = entry->fixup;
1622 regs->tnpc = regs->tpc + 4; 1617 regs->tnpc = regs->tpc + 4;
1623 regs->u_regs[UREG_G2] = g2;
1624 } 1618 }
1625 } 1619 }
1626 } 1620 }
diff --git a/arch/sparc64/kernel/una_asm.S b/arch/sparc64/kernel/una_asm.S
index cbb40585253c..1f5b5b708ce7 100644
--- a/arch/sparc64/kernel/una_asm.S
+++ b/arch/sparc64/kernel/una_asm.S
@@ -6,18 +6,11 @@
6 6
7 .text 7 .text
8 8
9kernel_unaligned_trap_fault:
10 call kernel_mna_trap_fault
11 nop
12 retl
13 nop
14 .size kern_unaligned_trap_fault, .-kern_unaligned_trap_fault
15
16 .globl __do_int_store 9 .globl __do_int_store
17__do_int_store: 10__do_int_store:
18 rd %asi, %o4 11 rd %asi, %o4
19 wr %o3, 0, %asi 12 wr %o3, 0, %asi
20 ldx [%o2], %g3 13 mov %o2, %g3
21 cmp %o1, 2 14 cmp %o1, 2
22 be,pn %icc, 2f 15 be,pn %icc, 2f
23 cmp %o1, 4 16 cmp %o1, 4
@@ -51,24 +44,24 @@ __do_int_store:
510: 440:
52 wr %o4, 0x0, %asi 45 wr %o4, 0x0, %asi
53 retl 46 retl
54 nop 47 mov 0, %o0
55 .size __do_int_store, .-__do_int_store 48 .size __do_int_store, .-__do_int_store
56 49
57 .section __ex_table 50 .section __ex_table
58 .word 4b, kernel_unaligned_trap_fault 51 .word 4b, __retl_efault
59 .word 5b, kernel_unaligned_trap_fault 52 .word 5b, __retl_efault
60 .word 6b, kernel_unaligned_trap_fault 53 .word 6b, __retl_efault
61 .word 7b, kernel_unaligned_trap_fault 54 .word 7b, __retl_efault
62 .word 8b, kernel_unaligned_trap_fault 55 .word 8b, __retl_efault
63 .word 9b, kernel_unaligned_trap_fault 56 .word 9b, __retl_efault
64 .word 10b, kernel_unaligned_trap_fault 57 .word 10b, __retl_efault
65 .word 11b, kernel_unaligned_trap_fault 58 .word 11b, __retl_efault
66 .word 12b, kernel_unaligned_trap_fault 59 .word 12b, __retl_efault
67 .word 13b, kernel_unaligned_trap_fault 60 .word 13b, __retl_efault
68 .word 14b, kernel_unaligned_trap_fault 61 .word 14b, __retl_efault
69 .word 15b, kernel_unaligned_trap_fault 62 .word 15b, __retl_efault
70 .word 16b, kernel_unaligned_trap_fault 63 .word 16b, __retl_efault
71 .word 17b, kernel_unaligned_trap_fault 64 .word 17b, __retl_efault
72 .previous 65 .previous
73 66
74 .globl do_int_load 67 .globl do_int_load
@@ -133,21 +126,21 @@ do_int_load:
1330: 1260:
134 wr %o5, 0x0, %asi 127 wr %o5, 0x0, %asi
135 retl 128 retl
136 nop 129 mov 0, %o0
137 .size __do_int_load, .-__do_int_load 130 .size __do_int_load, .-__do_int_load
138 131
139 .section __ex_table 132 .section __ex_table
140 .word 4b, kernel_unaligned_trap_fault 133 .word 4b, __retl_efault
141 .word 5b, kernel_unaligned_trap_fault 134 .word 5b, __retl_efault
142 .word 6b, kernel_unaligned_trap_fault 135 .word 6b, __retl_efault
143 .word 7b, kernel_unaligned_trap_fault 136 .word 7b, __retl_efault
144 .word 8b, kernel_unaligned_trap_fault 137 .word 8b, __retl_efault
145 .word 9b, kernel_unaligned_trap_fault 138 .word 9b, __retl_efault
146 .word 10b, kernel_unaligned_trap_fault 139 .word 10b, __retl_efault
147 .word 11b, kernel_unaligned_trap_fault 140 .word 11b, __retl_efault
148 .word 12b, kernel_unaligned_trap_fault 141 .word 12b, __retl_efault
149 .word 13b, kernel_unaligned_trap_fault 142 .word 13b, __retl_efault
150 .word 14b, kernel_unaligned_trap_fault 143 .word 14b, __retl_efault
151 .word 15b, kernel_unaligned_trap_fault 144 .word 15b, __retl_efault
152 .word 16b, kernel_unaligned_trap_fault 145 .word 16b, __retl_efault
153 .previous 146 .previous
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
index da9739f0d437..70faf630603b 100644
--- a/arch/sparc64/kernel/unaligned.c
+++ b/arch/sparc64/kernel/unaligned.c
@@ -180,17 +180,18 @@ static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs)
180 die_if_kernel(str, regs); 180 die_if_kernel(str, regs);
181} 181}
182 182
183extern void do_int_load(unsigned long *dest_reg, int size, 183extern int do_int_load(unsigned long *dest_reg, int size,
184 unsigned long *saddr, int is_signed, int asi); 184 unsigned long *saddr, int is_signed, int asi);
185 185
186extern void __do_int_store(unsigned long *dst_addr, int size, 186extern int __do_int_store(unsigned long *dst_addr, int size,
187 unsigned long *src_val, int asi); 187 unsigned long src_val, int asi);
188 188
189static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, 189static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
190 struct pt_regs *regs, int asi) 190 struct pt_regs *regs, int asi, int orig_asi)
191{ 191{
192 unsigned long zero = 0; 192 unsigned long zero = 0;
193 unsigned long *src_val = &zero; 193 unsigned long *src_val_p = &zero;
194 unsigned long src_val;
194 195
195 if (size == 16) { 196 if (size == 16) {
196 size = 8; 197 size = 8;
@@ -198,9 +199,27 @@ static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
198 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | 199 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
199 (unsigned)fetch_reg(reg_num + 1, regs); 200 (unsigned)fetch_reg(reg_num + 1, regs);
200 } else if (reg_num) { 201 } else if (reg_num) {
201 src_val = fetch_reg_addr(reg_num, regs); 202 src_val_p = fetch_reg_addr(reg_num, regs);
202 } 203 }
203 __do_int_store(dst_addr, size, src_val, asi); 204 src_val = *src_val_p;
205 if (unlikely(asi != orig_asi)) {
206 switch (size) {
207 case 2:
208 src_val = swab16(src_val);
209 break;
210 case 4:
211 src_val = swab32(src_val);
212 break;
213 case 8:
214 src_val = swab64(src_val);
215 break;
216 case 16:
217 default:
218 BUG();
219 break;
220 };
221 }
222 return __do_int_store(dst_addr, size, src_val, asi);
204} 223}
205 224
206static inline void advance(struct pt_regs *regs) 225static inline void advance(struct pt_regs *regs)
@@ -223,14 +242,14 @@ static inline int ok_for_kernel(unsigned int insn)
223 return !floating_point_load_or_store_p(insn); 242 return !floating_point_load_or_store_p(insn);
224} 243}
225 244
226void kernel_mna_trap_fault(void) 245static void kernel_mna_trap_fault(void)
227{ 246{
228 struct pt_regs *regs = current_thread_info()->kern_una_regs; 247 struct pt_regs *regs = current_thread_info()->kern_una_regs;
229 unsigned int insn = current_thread_info()->kern_una_insn; 248 unsigned int insn = current_thread_info()->kern_una_insn;
230 unsigned long g2 = regs->u_regs[UREG_G2]; 249 const struct exception_table_entry *entry;
231 unsigned long fixup = search_extables_range(regs->tpc, &g2);
232 250
233 if (!fixup) { 251 entry = search_exception_tables(regs->tpc);
252 if (!entry) {
234 unsigned long address; 253 unsigned long address;
235 254
236 address = compute_effective_address(regs, insn, 255 address = compute_effective_address(regs, insn,
@@ -251,9 +270,8 @@ void kernel_mna_trap_fault(void)
251 die_if_kernel("Oops", regs); 270 die_if_kernel("Oops", regs);
252 /* Not reached */ 271 /* Not reached */
253 } 272 }
254 regs->tpc = fixup; 273 regs->tpc = entry->fixup;
255 regs->tnpc = regs->tpc + 4; 274 regs->tnpc = regs->tpc + 4;
256 regs->u_regs [UREG_G2] = g2;
257 275
258 regs->tstate &= ~TSTATE_ASI; 276 regs->tstate &= ~TSTATE_ASI;
259 regs->tstate |= (ASI_AIUS << 24UL); 277 regs->tstate |= (ASI_AIUS << 24UL);
@@ -275,7 +293,8 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
275 293
276 kernel_mna_trap_fault(); 294 kernel_mna_trap_fault();
277 } else { 295 } else {
278 unsigned long addr; 296 unsigned long addr, *reg_addr;
297 int orig_asi, asi, err;
279 298
280 addr = compute_effective_address(regs, insn, 299 addr = compute_effective_address(regs, insn,
281 ((insn >> 25) & 0x1f)); 300 ((insn >> 25) & 0x1f));
@@ -285,25 +304,59 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
285 regs->tpc, dirstrings[dir], addr, size, 304 regs->tpc, dirstrings[dir], addr, size,
286 regs->u_regs[UREG_RETPC]); 305 regs->u_regs[UREG_RETPC]);
287#endif 306#endif
307 orig_asi = asi = decode_asi(insn, regs);
308 switch (asi) {
309 case ASI_NL:
310 case ASI_AIUPL:
311 case ASI_AIUSL:
312 case ASI_PL:
313 case ASI_SL:
314 case ASI_PNFL:
315 case ASI_SNFL:
316 asi &= ~0x08;
317 break;
318 };
288 switch (dir) { 319 switch (dir) {
289 case load: 320 case load:
290 do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), 321 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
291 size, (unsigned long *) addr, 322 err = do_int_load(reg_addr, size,
292 decode_signedness(insn), 323 (unsigned long *) addr,
293 decode_asi(insn, regs)); 324 decode_signedness(insn), asi);
325 if (likely(!err) && unlikely(asi != orig_asi)) {
326 unsigned long val_in = *reg_addr;
327 switch (size) {
328 case 2:
329 val_in = swab16(val_in);
330 break;
331 case 4:
332 val_in = swab32(val_in);
333 break;
334 case 8:
335 val_in = swab64(val_in);
336 break;
337 case 16:
338 default:
339 BUG();
340 break;
341 };
342 *reg_addr = val_in;
343 }
294 break; 344 break;
295 345
296 case store: 346 case store:
297 do_int_store(((insn>>25)&0x1f), size, 347 err = do_int_store(((insn>>25)&0x1f), size,
298 (unsigned long *) addr, regs, 348 (unsigned long *) addr, regs,
299 decode_asi(insn, regs)); 349 asi, orig_asi);
300 break; 350 break;
301 351
302 default: 352 default:
303 panic("Impossible kernel unaligned trap."); 353 panic("Impossible kernel unaligned trap.");
304 /* Not reached... */ 354 /* Not reached... */
305 } 355 }
306 advance(regs); 356 if (unlikely(err))
357 kernel_mna_trap_fault();
358 else
359 advance(regs);
307 } 360 }
308} 361}
309 362
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c
index 9080e7cd4bb0..0340041f6143 100644
--- a/arch/sparc64/kernel/us3_cpufreq.c
+++ b/arch/sparc64/kernel/us3_cpufreq.c
@@ -208,7 +208,10 @@ static int __init us3_freq_init(void)
208 impl = ((ver >> 32) & 0xffff); 208 impl = ((ver >> 32) & 0xffff);
209 209
210 if (manuf == CHEETAH_MANUF && 210 if (manuf == CHEETAH_MANUF &&
211 (impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL)) { 211 (impl == CHEETAH_IMPL ||
212 impl == CHEETAH_PLUS_IMPL ||
213 impl == JAGUAR_IMPL ||
214 impl == PANTHER_IMPL)) {
212 struct cpufreq_driver *driver; 215 struct cpufreq_driver *driver;
213 216
214 ret = -ENOMEM; 217 ret = -ENOMEM;
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index f47d0be39378..2af0cf0a8640 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -9,8 +9,7 @@ ENTRY(_start)
9jiffies = jiffies_64; 9jiffies = jiffies_64;
10SECTIONS 10SECTIONS
11{ 11{
12 swapper_pmd_dir = 0x0000000000402000; 12 swapper_low_pmd_dir = 0x0000000000402000;
13 empty_pg_dir = 0x0000000000403000;
14 . = 0x4000; 13 . = 0x4000;
15 .text 0x0000000000404000 : 14 .text 0x0000000000404000 :
16 { 15 {
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
index 99c809a1e5ac..39160926267b 100644
--- a/arch/sparc64/kernel/winfixup.S
+++ b/arch/sparc64/kernel/winfixup.S
@@ -16,23 +16,14 @@
16 .text 16 .text
17 17
18set_pcontext: 18set_pcontext:
19cplus_winfixup_insn_1: 19 sethi %hi(sparc64_kern_pri_context), %l1
20 sethi %hi(0), %l1 20 ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1
21 mov PRIMARY_CONTEXT, %g1 21 mov PRIMARY_CONTEXT, %g1
22 sllx %l1, 32, %l1
23cplus_winfixup_insn_2:
24 sethi %hi(0), %g2
25 or %l1, %g2, %l1
26 stxa %l1, [%g1] ASI_DMMU 22 stxa %l1, [%g1] ASI_DMMU
27 flush %g6 23 flush %g6
28 retl 24 retl
29 nop 25 nop
30 26
31cplus_wfinsn_1:
32 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
33cplus_wfinsn_2:
34 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
35
36 .align 32 27 .align 32
37 28
38 /* Here are the rules, pay attention. 29 /* Here are the rules, pay attention.
@@ -395,23 +386,3 @@ window_dax_from_user_common:
395 add %sp, PTREGS_OFF, %o0 386 add %sp, PTREGS_OFF, %o0
396 ba,pt %xcc, rtrap 387 ba,pt %xcc, rtrap
397 clr %l6 388 clr %l6
398
399
400 .globl cheetah_plus_patch_winfixup
401cheetah_plus_patch_winfixup:
402 sethi %hi(cplus_wfinsn_1), %o0
403 sethi %hi(cplus_winfixup_insn_1), %o2
404 lduw [%o0 + %lo(cplus_wfinsn_1)], %o1
405 or %o2, %lo(cplus_winfixup_insn_1), %o2
406 stw %o1, [%o2]
407 flush %o2
408
409 sethi %hi(cplus_wfinsn_2), %o0
410 sethi %hi(cplus_winfixup_insn_2), %o2
411 lduw [%o0 + %lo(cplus_wfinsn_2)], %o1
412 or %o2, %lo(cplus_winfixup_insn_2), %o2
413 stw %o1, [%o2]
414 flush %o2
415
416 retl
417 nop
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index d968aebe83b2..c295806500f7 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -14,6 +14,4 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
14 copy_in_user.o user_fixup.o memmove.o \ 14 copy_in_user.o user_fixup.o memmove.o \
15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o 15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
16 16
17lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
18
19obj-y += iomap.o 17obj-y += iomap.o
diff --git a/arch/sparc64/lib/VISsave.S b/arch/sparc64/lib/VISsave.S
index 4e18989bd602..a0ded5c5aa5c 100644
--- a/arch/sparc64/lib/VISsave.S
+++ b/arch/sparc64/lib/VISsave.S
@@ -59,15 +59,17 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
59 be,pn %icc, 9b 59 be,pn %icc, 9b
60 add %g6, TI_FPREGS, %g2 60 add %g6, TI_FPREGS, %g2
61 andcc %o5, FPRS_DL, %g0 61 andcc %o5, FPRS_DL, %g0
62 membar #StoreStore | #LoadStore
63 62
64 be,pn %icc, 4f 63 be,pn %icc, 4f
65 add %g6, TI_FPREGS+0x40, %g3 64 add %g6, TI_FPREGS+0x40, %g3
65 membar #Sync
66 stda %f0, [%g2 + %g1] ASI_BLK_P 66 stda %f0, [%g2 + %g1] ASI_BLK_P
67 stda %f16, [%g3 + %g1] ASI_BLK_P 67 stda %f16, [%g3 + %g1] ASI_BLK_P
68 membar #Sync
68 andcc %o5, FPRS_DU, %g0 69 andcc %o5, FPRS_DU, %g0
69 be,pn %icc, 5f 70 be,pn %icc, 5f
704: add %g1, 128, %g1 714: add %g1, 128, %g1
72 membar #Sync
71 stda %f32, [%g2 + %g1] ASI_BLK_P 73 stda %f32, [%g2 + %g1] ASI_BLK_P
72 74
73 stda %f48, [%g3 + %g1] ASI_BLK_P 75 stda %f48, [%g3 + %g1] ASI_BLK_P
@@ -87,7 +89,7 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
87 sll %g1, 5, %g1 89 sll %g1, 5, %g1
88 add %g6, TI_FPREGS+0xc0, %g3 90 add %g6, TI_FPREGS+0xc0, %g3
89 wr %g0, FPRS_FEF, %fprs 91 wr %g0, FPRS_FEF, %fprs
90 membar #StoreStore | #LoadStore 92 membar #Sync
91 stda %f32, [%g2 + %g1] ASI_BLK_P 93 stda %f32, [%g2 + %g1] ASI_BLK_P
92 stda %f48, [%g3 + %g1] ASI_BLK_P 94 stda %f48, [%g3 + %g1] ASI_BLK_P
93 membar #Sync 95 membar #Sync
@@ -128,8 +130,8 @@ VISenterhalf:
128 be,pn %icc, 4f 130 be,pn %icc, 4f
129 add %g6, TI_FPREGS, %g2 131 add %g6, TI_FPREGS, %g2
130 132
131 membar #StoreStore | #LoadStore
132 add %g6, TI_FPREGS+0x40, %g3 133 add %g6, TI_FPREGS+0x40, %g3
134 membar #Sync
133 stda %f0, [%g2 + %g1] ASI_BLK_P 135 stda %f0, [%g2 + %g1] ASI_BLK_P
134 stda %f16, [%g3 + %g1] ASI_BLK_P 136 stda %f16, [%g3 + %g1] ASI_BLK_P
135 membar #Sync 137 membar #Sync
diff --git a/arch/sparc64/lib/dec_and_lock.S b/arch/sparc64/lib/dec_and_lock.S
deleted file mode 100644
index 8ee288dd0afc..000000000000
--- a/arch/sparc64/lib/dec_and_lock.S
+++ /dev/null
@@ -1,80 +0,0 @@
1/* $Id: dec_and_lock.S,v 1.5 2001/11/18 00:12:56 davem Exp $
2 * dec_and_lock.S: Sparc64 version of "atomic_dec_and_lock()"
3 * using cas and ldstub instructions.
4 *
5 * Copyright (C) 2000 David S. Miller (davem@redhat.com)
6 */
7#include <linux/config.h>
8#include <asm/thread_info.h>
9
10 .text
11 .align 64
12
13 /* CAS basically works like this:
14 *
15 * void CAS(MEM, REG1, REG2)
16 * {
17 * START_ATOMIC();
18 * if (*(MEM) == REG1) {
19 * TMP = *(MEM);
20 * *(MEM) = REG2;
21 * REG2 = TMP;
22 * } else
23 * REG2 = *(MEM);
24 * END_ATOMIC();
25 * }
26 */
27
28 .globl _atomic_dec_and_lock
29_atomic_dec_and_lock: /* %o0 = counter, %o1 = lock */
30loop1: lduw [%o0], %g2
31 subcc %g2, 1, %g7
32 be,pn %icc, start_to_zero
33 nop
34nzero: cas [%o0], %g2, %g7
35 cmp %g2, %g7
36 bne,pn %icc, loop1
37 mov 0, %g1
38
39out:
40 membar #StoreLoad | #StoreStore
41 retl
42 mov %g1, %o0
43start_to_zero:
44#ifdef CONFIG_PREEMPT
45 ldsw [%g6 + TI_PRE_COUNT], %g3
46 add %g3, 1, %g3
47 stw %g3, [%g6 + TI_PRE_COUNT]
48#endif
49to_zero:
50 ldstub [%o1], %g3
51 membar #StoreLoad | #StoreStore
52 brnz,pn %g3, spin_on_lock
53 nop
54loop2: cas [%o0], %g2, %g7 /* ASSERT(g7 == 0) */
55 cmp %g2, %g7
56
57 be,pt %icc, out
58 mov 1, %g1
59 lduw [%o0], %g2
60 subcc %g2, 1, %g7
61 be,pn %icc, loop2
62 nop
63 membar #StoreStore | #LoadStore
64 stb %g0, [%o1]
65#ifdef CONFIG_PREEMPT
66 ldsw [%g6 + TI_PRE_COUNT], %g3
67 sub %g3, 1, %g3
68 stw %g3, [%g6 + TI_PRE_COUNT]
69#endif
70
71 b,pt %xcc, nzero
72 nop
73spin_on_lock:
74 ldub [%o1], %g3
75 membar #LoadLoad
76 brnz,pt %g3, spin_on_lock
77 nop
78 ba,pt %xcc, to_zero
79 nop
80 nop
diff --git a/arch/sparc64/lib/strncpy_from_user.S b/arch/sparc64/lib/strncpy_from_user.S
index 09cbbaa0ebf4..e1264650ca7a 100644
--- a/arch/sparc64/lib/strncpy_from_user.S
+++ b/arch/sparc64/lib/strncpy_from_user.S
@@ -125,15 +125,11 @@ __strncpy_from_user:
125 add %o2, %o3, %o0 125 add %o2, %o3, %o0
126 .size __strncpy_from_user, .-__strncpy_from_user 126 .size __strncpy_from_user, .-__strncpy_from_user
127 127
128 .section .fixup,#alloc,#execinstr
129 .align 4
1304: retl
131 mov -EFAULT, %o0
132
133 .section __ex_table,#alloc 128 .section __ex_table,#alloc
134 .align 4 129 .align 4
135 .word 60b, 4b 130 .word 60b, __retl_efault
136 .word 61b, 4b 131 .word 61b, __retl_efault
137 .word 62b, 4b 132 .word 62b, __retl_efault
138 .word 63b, 4b 133 .word 63b, __retl_efault
139 .word 64b, 4b 134 .word 64b, __retl_efault
135 .previous
diff --git a/arch/sparc64/lib/user_fixup.c b/arch/sparc64/lib/user_fixup.c
index 0278e34125db..19d1fdb17d0e 100644
--- a/arch/sparc64/lib/user_fixup.c
+++ b/arch/sparc64/lib/user_fixup.c
@@ -11,61 +11,56 @@
11 11
12/* Calculating the exact fault address when using 12/* Calculating the exact fault address when using
13 * block loads and stores can be very complicated. 13 * block loads and stores can be very complicated.
14 *
14 * Instead of trying to be clever and handling all 15 * Instead of trying to be clever and handling all
15 * of the cases, just fix things up simply here. 16 * of the cases, just fix things up simply here.
16 */ 17 */
17 18
18unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size) 19static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
19{ 20{
20 char *dst = to; 21 unsigned long fault_addr = current_thread_info()->fault_address;
21 const char __user *src = from; 22 unsigned long end = start + size;
22 23
23 while (size) { 24 if (fault_addr < start || fault_addr >= end) {
24 if (__get_user(*dst, src)) 25 *offset = 0;
25 break; 26 } else {
26 dst++; 27 *offset = start - fault_addr;
27 src++; 28 size = end - fault_addr;
28 size--;
29 } 29 }
30 return size;
31}
30 32
31 if (size) 33unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
32 memset(dst, 0, size); 34{
35 unsigned long offset;
36
37 size = compute_size((unsigned long) from, size, &offset);
38 if (likely(size))
39 memset(to + offset, 0, size);
33 40
34 return size; 41 return size;
35} 42}
36 43
37unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size) 44unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
38{ 45{
39 char __user *dst = to; 46 unsigned long offset;
40 const char *src = from;
41
42 while (size) {
43 if (__put_user(*src, dst))
44 break;
45 dst++;
46 src++;
47 size--;
48 }
49 47
50 return size; 48 return compute_size((unsigned long) to, size, &offset);
51} 49}
52 50
53unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size) 51unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
54{ 52{
55 char __user *dst = to; 53 unsigned long fault_addr = current_thread_info()->fault_address;
56 char __user *src = from; 54 unsigned long start = (unsigned long) to;
55 unsigned long end = start + size;
57 56
58 while (size) { 57 if (fault_addr >= start && fault_addr < end)
59 char tmp; 58 return end - fault_addr;
60 59
61 if (__get_user(tmp, src)) 60 start = (unsigned long) from;
62 break; 61 end = start + size;
63 if (__put_user(tmp, dst)) 62 if (fault_addr >= start && fault_addr < end)
64 break; 63 return end - fault_addr;
65 dst++;
66 src++;
67 size--;
68 }
69 64
70 return size; 65 return size;
71} 66}
diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile
index cda87333a77b..9d0960e69f48 100644
--- a/arch/sparc64/mm/Makefile
+++ b/arch/sparc64/mm/Makefile
@@ -5,6 +5,6 @@
5EXTRA_AFLAGS := -ansi 5EXTRA_AFLAGS := -ansi
6EXTRA_CFLAGS := -Werror 6EXTRA_CFLAGS := -Werror
7 7
8obj-y := ultra.o tlb.o fault.o init.o generic.o extable.o 8obj-y := ultra.o tlb.o fault.o init.o generic.o
9 9
10obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 10obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc64/mm/extable.c b/arch/sparc64/mm/extable.c
deleted file mode 100644
index ec334297ff4f..000000000000
--- a/arch/sparc64/mm/extable.c
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * linux/arch/sparc64/mm/extable.c
3 */
4
5#include <linux/config.h>
6#include <linux/module.h>
7#include <asm/uaccess.h>
8
9extern const struct exception_table_entry __start___ex_table[];
10extern const struct exception_table_entry __stop___ex_table[];
11
12void sort_extable(struct exception_table_entry *start,
13 struct exception_table_entry *finish)
14{
15}
16
17/* Caller knows they are in a range if ret->fixup == 0 */
18const struct exception_table_entry *
19search_extable(const struct exception_table_entry *start,
20 const struct exception_table_entry *last,
21 unsigned long value)
22{
23 const struct exception_table_entry *walk;
24
25 /* Single insn entries are encoded as:
26 * word 1: insn address
27 * word 2: fixup code address
28 *
29 * Range entries are encoded as:
30 * word 1: first insn address
31 * word 2: 0
32 * word 3: last insn address + 4 bytes
33 * word 4: fixup code address
34 *
35 * See asm/uaccess.h for more details.
36 */
37
38 /* 1. Try to find an exact match. */
39 for (walk = start; walk <= last; walk++) {
40 if (walk->fixup == 0) {
41 /* A range entry, skip both parts. */
42 walk++;
43 continue;
44 }
45
46 if (walk->insn == value)
47 return walk;
48 }
49
50 /* 2. Try to find a range match. */
51 for (walk = start; walk <= (last - 1); walk++) {
52 if (walk->fixup)
53 continue;
54
55 if (walk[0].insn <= value && walk[1].insn > value)
56 return walk;
57
58 walk++;
59 }
60
61 return NULL;
62}
63
64/* Special extable search, which handles ranges. Returns fixup */
65unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
66{
67 const struct exception_table_entry *entry;
68
69 entry = search_exception_tables(addr);
70 if (!entry)
71 return 0;
72
73 /* Inside range? Fix g2 and return correct fixup */
74 if (!entry->fixup) {
75 *g2 = (addr - entry->insn) / 4;
76 return (entry + 1)->fixup;
77 }
78
79 return entry->fixup;
80}
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index db1e3310e907..31fbc67719a1 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -32,8 +32,6 @@
32 32
33#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0])) 33#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
34 34
35extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
36
37/* 35/*
38 * To debug kernel to catch accesses to certain virtual/physical addresses. 36 * To debug kernel to catch accesses to certain virtual/physical addresses.
39 * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints. 37 * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints.
@@ -71,53 +69,6 @@ void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode)
71 : "memory"); 69 : "memory");
72} 70}
73 71
74/* Nice, simple, prom library does all the sweating for us. ;) */
75unsigned long __init prom_probe_memory (void)
76{
77 register struct linux_mlist_p1275 *mlist;
78 register unsigned long bytes, base_paddr, tally;
79 register int i;
80
81 i = 0;
82 mlist = *prom_meminfo()->p1275_available;
83 bytes = tally = mlist->num_bytes;
84 base_paddr = mlist->start_adr;
85
86 sp_banks[0].base_addr = base_paddr;
87 sp_banks[0].num_bytes = bytes;
88
89 while (mlist->theres_more != (void *) 0) {
90 i++;
91 mlist = mlist->theres_more;
92 bytes = mlist->num_bytes;
93 tally += bytes;
94 if (i >= SPARC_PHYS_BANKS-1) {
95 printk ("The machine has more banks than "
96 "this kernel can support\n"
97 "Increase the SPARC_PHYS_BANKS "
98 "setting (currently %d)\n",
99 SPARC_PHYS_BANKS);
100 i = SPARC_PHYS_BANKS-1;
101 break;
102 }
103
104 sp_banks[i].base_addr = mlist->start_adr;
105 sp_banks[i].num_bytes = mlist->num_bytes;
106 }
107
108 i++;
109 sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
110 sp_banks[i].num_bytes = 0;
111
112 /* Now mask all bank sizes on a page boundary, it is all we can
113 * use anyways.
114 */
115 for (i = 0; sp_banks[i].num_bytes != 0; i++)
116 sp_banks[i].num_bytes &= PAGE_MASK;
117
118 return tally;
119}
120
121static void __kprobes unhandled_fault(unsigned long address, 72static void __kprobes unhandled_fault(unsigned long address,
122 struct task_struct *tsk, 73 struct task_struct *tsk,
123 struct pt_regs *regs) 74 struct pt_regs *regs)
@@ -242,7 +193,6 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
242static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, 193static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
243 unsigned int insn, unsigned long address) 194 unsigned int insn, unsigned long address)
244{ 195{
245 unsigned long g2;
246 unsigned char asi = ASI_P; 196 unsigned char asi = ASI_P;
247 197
248 if ((!insn) && (regs->tstate & TSTATE_PRIV)) 198 if ((!insn) && (regs->tstate & TSTATE_PRIV))
@@ -273,11 +223,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
273 } 223 }
274 } 224 }
275 225
276 g2 = regs->u_regs[UREG_G2];
277
278 /* Is this in ex_table? */ 226 /* Is this in ex_table? */
279 if (regs->tstate & TSTATE_PRIV) { 227 if (regs->tstate & TSTATE_PRIV) {
280 unsigned long fixup; 228 const struct exception_table_entry *entry;
281 229
282 if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) { 230 if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) {
283 if (insn & 0x2000) 231 if (insn & 0x2000)
@@ -288,10 +236,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
288 236
289 /* Look in asi.h: All _S asis have LS bit set */ 237 /* Look in asi.h: All _S asis have LS bit set */
290 if ((asi & 0x1) && 238 if ((asi & 0x1) &&
291 (fixup = search_extables_range(regs->tpc, &g2))) { 239 (entry = search_exception_tables(regs->tpc))) {
292 regs->tpc = fixup; 240 regs->tpc = entry->fixup;
293 regs->tnpc = regs->tpc + 4; 241 regs->tnpc = regs->tpc + 4;
294 regs->u_regs[UREG_G2] = g2;
295 return; 242 return;
296 } 243 }
297 } else { 244 } else {
@@ -461,7 +408,7 @@ good_area:
461 } 408 }
462 409
463 up_read(&mm->mmap_sem); 410 up_read(&mm->mmap_sem);
464 goto fault_done; 411 return;
465 412
466 /* 413 /*
467 * Something tried to access memory that isn't in our memory map.. 414 * Something tried to access memory that isn't in our memory map..
@@ -473,8 +420,7 @@ bad_area:
473 420
474handle_kernel_fault: 421handle_kernel_fault:
475 do_kernel_fault(regs, si_code, fault_code, insn, address); 422 do_kernel_fault(regs, si_code, fault_code, insn, address);
476 423 return;
477 goto fault_done;
478 424
479/* 425/*
480 * We ran out of memory, or some other thing happened to us that made 426 * We ran out of memory, or some other thing happened to us that made
@@ -505,9 +451,4 @@ do_sigbus:
505 /* Kernel mode? Handle exceptions or die */ 451 /* Kernel mode? Handle exceptions or die */
506 if (regs->tstate & TSTATE_PRIV) 452 if (regs->tstate & TSTATE_PRIV)
507 goto handle_kernel_fault; 453 goto handle_kernel_fault;
508
509fault_done:
510 /* These values are no longer needed, clear them. */
511 set_thread_fault_code(0);
512 current_thread_info()->fault_address = 0;
513} 454}
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index fdb1ebb308c9..1e44ee26cee8 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -20,6 +20,8 @@
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/kprobes.h> 22#include <linux/kprobes.h>
23#include <linux/cache.h>
24#include <linux/sort.h>
23 25
24#include <asm/head.h> 26#include <asm/head.h>
25#include <asm/system.h> 27#include <asm/system.h>
@@ -40,24 +42,80 @@
40 42
41extern void device_scan(void); 43extern void device_scan(void);
42 44
43struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; 45#define MAX_BANKS 32
44 46
45unsigned long *sparc64_valid_addr_bitmap; 47static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
48static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
49static int pavail_ents __initdata;
50static int pavail_rescan_ents __initdata;
51
52static int cmp_p64(const void *a, const void *b)
53{
54 const struct linux_prom64_registers *x = a, *y = b;
55
56 if (x->phys_addr > y->phys_addr)
57 return 1;
58 if (x->phys_addr < y->phys_addr)
59 return -1;
60 return 0;
61}
62
63static void __init read_obp_memory(const char *property,
64 struct linux_prom64_registers *regs,
65 int *num_ents)
66{
67 int node = prom_finddevice("/memory");
68 int prop_size = prom_getproplen(node, property);
69 int ents, ret, i;
70
71 ents = prop_size / sizeof(struct linux_prom64_registers);
72 if (ents > MAX_BANKS) {
73 prom_printf("The machine has more %s property entries than "
74 "this kernel can support (%d).\n",
75 property, MAX_BANKS);
76 prom_halt();
77 }
78
79 ret = prom_getproperty(node, property, (char *) regs, prop_size);
80 if (ret == -1) {
81 prom_printf("Couldn't get %s property from /memory.\n");
82 prom_halt();
83 }
84
85 *num_ents = ents;
86
87 /* Sanitize what we got from the firmware, by page aligning
88 * everything.
89 */
90 for (i = 0; i < ents; i++) {
91 unsigned long base, size;
92
93 base = regs[i].phys_addr;
94 size = regs[i].reg_size;
95
96 size &= PAGE_MASK;
97 if (base & ~PAGE_MASK) {
98 unsigned long new_base = PAGE_ALIGN(base);
99
100 size -= new_base - base;
101 if ((long) size < 0L)
102 size = 0UL;
103 base = new_base;
104 }
105 regs[i].phys_addr = base;
106 regs[i].reg_size = size;
107 }
108 sort(regs, ents, sizeof(struct linux_prom64_registers),
109 cmp_p64, NULL);
110}
111
112unsigned long *sparc64_valid_addr_bitmap __read_mostly;
46 113
47/* Ugly, but necessary... -DaveM */ 114/* Ugly, but necessary... -DaveM */
48unsigned long phys_base; 115unsigned long phys_base __read_mostly;
49unsigned long kern_base; 116unsigned long kern_base __read_mostly;
50unsigned long kern_size; 117unsigned long kern_size __read_mostly;
51unsigned long pfn_base; 118unsigned long pfn_base __read_mostly;
52
53/* This is even uglier. We have a problem where the kernel may not be
54 * located at phys_base. However, initial __alloc_bootmem() calls need to
55 * be adjusted to be within the 4-8Megs that the kernel is mapped to, else
56 * those page mappings wont work. Things are ok after inherit_prom_mappings
57 * is called though. Dave says he'll clean this up some other time.
58 * -- BenC
59 */
60static unsigned long bootmap_base;
61 119
62/* get_new_mmu_context() uses "cache + 1". */ 120/* get_new_mmu_context() uses "cache + 1". */
63DEFINE_SPINLOCK(ctx_alloc_lock); 121DEFINE_SPINLOCK(ctx_alloc_lock);
@@ -73,7 +131,13 @@ extern unsigned long sparc_ramdisk_image64;
73extern unsigned int sparc_ramdisk_image; 131extern unsigned int sparc_ramdisk_image;
74extern unsigned int sparc_ramdisk_size; 132extern unsigned int sparc_ramdisk_size;
75 133
76struct page *mem_map_zero; 134struct page *mem_map_zero __read_mostly;
135
136unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
137
138unsigned long sparc64_kern_pri_context __read_mostly;
139unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
140unsigned long sparc64_kern_sec_context __read_mostly;
77 141
78int bigkernel = 0; 142int bigkernel = 0;
79 143
@@ -179,8 +243,6 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
179 : "g1", "g7"); 243 : "g1", "g7");
180} 244}
181 245
182extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);
183
184void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 246void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
185{ 247{
186 struct page *page; 248 struct page *page;
@@ -207,10 +269,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
207 269
208 put_cpu(); 270 put_cpu();
209 } 271 }
210
211 if (get_thread_fault_code())
212 __update_mmu_cache(CTX_NRBITS(vma->vm_mm->context),
213 address, pte, get_thread_fault_code());
214} 272}
215 273
216void flush_dcache_page(struct page *page) 274void flush_dcache_page(struct page *page)
@@ -310,6 +368,11 @@ struct linux_prom_translation {
310 unsigned long data; 368 unsigned long data;
311}; 369};
312 370
371/* Exported for kernel TLB miss handling in ktlb.S */
372struct linux_prom_translation prom_trans[512] __read_mostly;
373unsigned int prom_trans_ents __read_mostly;
374unsigned int swapper_pgd_zero __read_mostly;
375
313extern unsigned long prom_boot_page; 376extern unsigned long prom_boot_page;
314extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); 377extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
315extern int prom_get_mmu_ihandle(void); 378extern int prom_get_mmu_ihandle(void);
@@ -318,297 +381,162 @@ extern void register_prom_callbacks(void);
318/* Exported for SMP bootup purposes. */ 381/* Exported for SMP bootup purposes. */
319unsigned long kern_locked_tte_data; 382unsigned long kern_locked_tte_data;
320 383
321void __init early_pgtable_allocfail(char *type)
322{
323 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
324 prom_halt();
325}
326
327#define BASE_PAGE_SIZE 8192
328static pmd_t *prompmd;
329
330/* 384/*
331 * Translate PROM's mapping we capture at boot time into physical address. 385 * Translate PROM's mapping we capture at boot time into physical address.
332 * The second parameter is only set from prom_callback() invocations. 386 * The second parameter is only set from prom_callback() invocations.
333 */ 387 */
334unsigned long prom_virt_to_phys(unsigned long promva, int *error) 388unsigned long prom_virt_to_phys(unsigned long promva, int *error)
335{ 389{
336 pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff); 390 int i;
337 pte_t *ptep; 391
338 unsigned long base; 392 for (i = 0; i < prom_trans_ents; i++) {
339 393 struct linux_prom_translation *p = &prom_trans[i];
340 if (pmd_none(*pmdp)) { 394
341 if (error) 395 if (promva >= p->virt &&
342 *error = 1; 396 promva < (p->virt + p->size)) {
343 return(0); 397 unsigned long base = p->data & _PAGE_PADDR;
344 } 398
345 ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff); 399 if (error)
346 if (!pte_present(*ptep)) { 400 *error = 0;
347 if (error) 401 return base + (promva & (8192 - 1));
348 *error = 1; 402 }
349 return(0);
350 }
351 if (error) {
352 *error = 0;
353 return(pte_val(*ptep));
354 } 403 }
355 base = pte_val(*ptep) & _PAGE_PADDR; 404 if (error)
356 return(base + (promva & (BASE_PAGE_SIZE - 1))); 405 *error = 1;
406 return 0UL;
357} 407}
358 408
359static void inherit_prom_mappings(void) 409/* The obp translations are saved based on 8k pagesize, since obp can
410 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
411 * HI_OBP_ADDRESS range are handled in ktlb.S and do not use the vpte
412 * scheme (also, see rant in inherit_locked_prom_mappings()).
413 */
414static inline int in_obp_range(unsigned long vaddr)
360{ 415{
361 struct linux_prom_translation *trans; 416 return (vaddr >= LOW_OBP_ADDRESS &&
362 unsigned long phys_page, tte_vaddr, tte_data; 417 vaddr < HI_OBP_ADDRESS);
363 void (*remap_func)(unsigned long, unsigned long, int); 418}
364 pmd_t *pmdp; 419
365 pte_t *ptep; 420static int cmp_ptrans(const void *a, const void *b)
366 int node, n, i, tsz; 421{
367 extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2]; 422 const struct linux_prom_translation *x = a, *y = b;
423
424 if (x->virt > y->virt)
425 return 1;
426 if (x->virt < y->virt)
427 return -1;
428 return 0;
429}
430
431/* Read OBP translations property into 'prom_trans[]'. */
432static void __init read_obp_translations(void)
433{
434 int n, node, ents, first, last, i;
368 435
369 node = prom_finddevice("/virtual-memory"); 436 node = prom_finddevice("/virtual-memory");
370 n = prom_getproplen(node, "translations"); 437 n = prom_getproplen(node, "translations");
371 if (n == 0 || n == -1) { 438 if (unlikely(n == 0 || n == -1)) {
372 prom_printf("Couldn't get translation property\n"); 439 prom_printf("prom_mappings: Couldn't get size.\n");
373 prom_halt(); 440 prom_halt();
374 } 441 }
375 n += 5 * sizeof(struct linux_prom_translation); 442 if (unlikely(n > sizeof(prom_trans))) {
376 for (tsz = 1; tsz < n; tsz <<= 1) 443 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
377 /* empty */;
378 trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base);
379 if (trans == NULL) {
380 prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");
381 prom_halt(); 444 prom_halt();
382 } 445 }
383 memset(trans, 0, tsz);
384 446
385 if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) { 447 if ((n = prom_getproperty(node, "translations",
386 prom_printf("Couldn't get translation property\n"); 448 (char *)&prom_trans[0],
449 sizeof(prom_trans))) == -1) {
450 prom_printf("prom_mappings: Couldn't get property.\n");
387 prom_halt(); 451 prom_halt();
388 } 452 }
389 n = n / sizeof(*trans);
390 453
391 /* 454 n = n / sizeof(struct linux_prom_translation);
392 * The obp translations are saved based on 8k pagesize, since obp can
393 * use a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000,
394 * ie obp range, are handled in entry.S and do not use the vpte scheme
395 * (see rant in inherit_locked_prom_mappings()).
396 */
397#define OBP_PMD_SIZE 2048
398 prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base);
399 if (prompmd == NULL)
400 early_pgtable_allocfail("pmd");
401 memset(prompmd, 0, OBP_PMD_SIZE);
402 for (i = 0; i < n; i++) {
403 unsigned long vaddr;
404
405 if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) {
406 for (vaddr = trans[i].virt;
407 ((vaddr < trans[i].virt + trans[i].size) &&
408 (vaddr < HI_OBP_ADDRESS));
409 vaddr += BASE_PAGE_SIZE) {
410 unsigned long val;
411
412 pmdp = prompmd + ((vaddr >> 23) & 0x7ff);
413 if (pmd_none(*pmdp)) {
414 ptep = __alloc_bootmem(BASE_PAGE_SIZE,
415 BASE_PAGE_SIZE,
416 bootmap_base);
417 if (ptep == NULL)
418 early_pgtable_allocfail("pte");
419 memset(ptep, 0, BASE_PAGE_SIZE);
420 pmd_set(pmdp, ptep);
421 }
422 ptep = (pte_t *)__pmd_page(*pmdp) +
423 ((vaddr >> 13) & 0x3ff);
424 455
425 val = trans[i].data; 456 ents = n;
426 457
427 /* Clear diag TTE bits. */ 458 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
428 if (tlb_type == spitfire) 459 cmp_ptrans, NULL);
429 val &= ~0x0003fe0000000000UL;
430 460
431 set_pte_at(&init_mm, vaddr, 461 /* Now kick out all the non-OBP entries. */
432 ptep, __pte(val | _PAGE_MODIFIED)); 462 for (i = 0; i < ents; i++) {
433 trans[i].data += BASE_PAGE_SIZE; 463 if (in_obp_range(prom_trans[i].virt))
434 } 464 break;
435 } 465 }
466 first = i;
467 for (; i < ents; i++) {
468 if (!in_obp_range(prom_trans[i].virt))
469 break;
436 } 470 }
437 phys_page = __pa(prompmd); 471 last = i;
438 obp_iaddr_patch[0] |= (phys_page >> 10);
439 obp_iaddr_patch[1] |= (phys_page & 0x3ff);
440 flushi((long)&obp_iaddr_patch[0]);
441 obp_daddr_patch[0] |= (phys_page >> 10);
442 obp_daddr_patch[1] |= (phys_page & 0x3ff);
443 flushi((long)&obp_daddr_patch[0]);
444 472
445 /* Now fixup OBP's idea about where we really are mapped. */ 473 for (i = 0; i < (last - first); i++) {
446 prom_printf("Remapping the kernel... "); 474 struct linux_prom_translation *src = &prom_trans[i + first];
475 struct linux_prom_translation *dest = &prom_trans[i];
447 476
448 /* Spitfire Errata #32 workaround */ 477 *dest = *src;
449 /* NOTE: Using plain zero for the context value is 478 }
450 * correct here, we are not using the Linux trap 479 for (; i < ents; i++) {
451 * tables yet so we should not use the special 480 struct linux_prom_translation *dest = &prom_trans[i];
452 * UltraSPARC-III+ page size encodings yet. 481 dest->virt = dest->size = dest->data = 0x0UL;
453 */ 482 }
454 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 483
455 "flush %%g6" 484 prom_trans_ents = last - first;
456 : /* No outputs */
457 : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
458
459 switch (tlb_type) {
460 default:
461 case spitfire:
462 phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
463 break;
464
465 case cheetah:
466 case cheetah_plus:
467 phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
468 break;
469 };
470
471 phys_page &= _PAGE_PADDR;
472 phys_page += ((unsigned long)&prom_boot_page -
473 (unsigned long)KERNBASE);
474 485
475 if (tlb_type == spitfire) { 486 if (tlb_type == spitfire) {
476 /* Lock this into i/d tlb entry 59 */ 487 /* Clear diag TTE bits. */
477 __asm__ __volatile__( 488 for (i = 0; i < prom_trans_ents; i++)
478 "stxa %%g0, [%2] %3\n\t" 489 prom_trans[i].data &= ~0x0003fe0000000000UL;
479 "stxa %0, [%1] %4\n\t"
480 "membar #Sync\n\t"
481 "flush %%g6\n\t"
482 "stxa %%g0, [%2] %5\n\t"
483 "stxa %0, [%1] %6\n\t"
484 "membar #Sync\n\t"
485 "flush %%g6"
486 : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
487 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
488 "r" (59 << 3), "r" (TLB_TAG_ACCESS),
489 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
490 "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
491 : "memory");
492 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
493 /* Lock this into i/d tlb-0 entry 11 */
494 __asm__ __volatile__(
495 "stxa %%g0, [%2] %3\n\t"
496 "stxa %0, [%1] %4\n\t"
497 "membar #Sync\n\t"
498 "flush %%g6\n\t"
499 "stxa %%g0, [%2] %5\n\t"
500 "stxa %0, [%1] %6\n\t"
501 "membar #Sync\n\t"
502 "flush %%g6"
503 : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
504 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
505 "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS),
506 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
507 "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
508 : "memory");
509 } else {
510 /* Implement me :-) */
511 BUG();
512 } 490 }
491}
513 492
514 tte_vaddr = (unsigned long) KERNBASE; 493static void __init remap_kernel(void)
494{
495 unsigned long phys_page, tte_vaddr, tte_data;
496 int tlb_ent = sparc64_highest_locked_tlbent();
515 497
516 /* Spitfire Errata #32 workaround */ 498 tte_vaddr = (unsigned long) KERNBASE;
517 /* NOTE: Using plain zero for the context value is 499 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
518 * correct here, we are not using the Linux trap 500 tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB |
519 * tables yet so we should not use the special 501 _PAGE_CP | _PAGE_CV | _PAGE_P |
520 * UltraSPARC-III+ page size encodings yet. 502 _PAGE_L | _PAGE_W));
521 */
522 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
523 "flush %%g6"
524 : /* No outputs */
525 : "r" (0),
526 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
527
528 if (tlb_type == spitfire)
529 tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
530 else
531 tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent());
532 503
533 kern_locked_tte_data = tte_data; 504 kern_locked_tte_data = tte_data;
534 505
535 remap_func = (void *) ((unsigned long) &prom_remap - 506 /* Now lock us into the TLBs via OBP. */
536 (unsigned long) &prom_boot_page); 507 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
537 508 prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
538
539 /* Spitfire Errata #32 workaround */
540 /* NOTE: Using plain zero for the context value is
541 * correct here, we are not using the Linux trap
542 * tables yet so we should not use the special
543 * UltraSPARC-III+ page size encodings yet.
544 */
545 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
546 "flush %%g6"
547 : /* No outputs */
548 : "r" (0),
549 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
550
551 remap_func((tlb_type == spitfire ?
552 (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) :
553 (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)),
554 (unsigned long) KERNBASE,
555 prom_get_mmu_ihandle());
556
557 if (bigkernel)
558 remap_func(((tte_data + 0x400000) & _PAGE_PADDR),
559 (unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle());
560
561 /* Flush out that temporary mapping. */
562 spitfire_flush_dtlb_nucleus_page(0x0);
563 spitfire_flush_itlb_nucleus_page(0x0);
564
565 /* Now lock us back into the TLBs via OBP. */
566 prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
567 prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
568 if (bigkernel) { 509 if (bigkernel) {
569 prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, 510 tlb_ent -= 1;
570 tte_vaddr + 0x400000); 511 prom_dtlb_load(tlb_ent,
571 prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, 512 tte_data + 0x400000,
572 tte_vaddr + 0x400000); 513 tte_vaddr + 0x400000);
514 prom_itlb_load(tlb_ent,
515 tte_data + 0x400000,
516 tte_vaddr + 0x400000);
573 } 517 }
574 518 sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
575 /* Re-read translations property. */ 519 if (tlb_type == cheetah_plus) {
576 if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) { 520 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
577 prom_printf("Couldn't get translation property\n"); 521 CTX_CHEETAH_PLUS_NUC);
578 prom_halt(); 522 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
523 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
579 } 524 }
580 n = n / sizeof(*trans); 525}
581
582 for (i = 0; i < n; i++) {
583 unsigned long vaddr = trans[i].virt;
584 unsigned long size = trans[i].size;
585
586 if (vaddr < 0xf0000000UL) {
587 unsigned long avoid_start = (unsigned long) KERNBASE;
588 unsigned long avoid_end = avoid_start + (4 * 1024 * 1024);
589
590 if (bigkernel)
591 avoid_end += (4 * 1024 * 1024);
592 if (vaddr < avoid_start) {
593 unsigned long top = vaddr + size;
594 526
595 if (top > avoid_start)
596 top = avoid_start;
597 prom_unmap(top - vaddr, vaddr);
598 }
599 if ((vaddr + size) > avoid_end) {
600 unsigned long bottom = vaddr;
601 527
602 if (bottom < avoid_end) 528static void __init inherit_prom_mappings(void)
603 bottom = avoid_end; 529{
604 prom_unmap((vaddr + size) - bottom, bottom); 530 read_obp_translations();
605 }
606 }
607 }
608 531
532 /* Now fixup OBP's idea about where we really are mapped. */
533 prom_printf("Remapping the kernel... ");
534 remap_kernel();
609 prom_printf("done.\n"); 535 prom_printf("done.\n");
610 536
537 prom_printf("Registering callbacks... ");
611 register_prom_callbacks(); 538 register_prom_callbacks();
539 prom_printf("done.\n");
612} 540}
613 541
614/* The OBP specifications for sun4u mark 0xfffffffc00000000 and 542/* The OBP specifications for sun4u mark 0xfffffffc00000000 and
@@ -792,8 +720,8 @@ void inherit_locked_prom_mappings(int save_p)
792 } 720 }
793 } 721 }
794 if (tlb_type == spitfire) { 722 if (tlb_type == spitfire) {
795 int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel; 723 int high = sparc64_highest_unlocked_tlb_ent;
796 for (i = 0; i < high; i++) { 724 for (i = 0; i <= high; i++) {
797 unsigned long data; 725 unsigned long data;
798 726
799 /* Spitfire Errata #32 workaround */ 727 /* Spitfire Errata #32 workaround */
@@ -881,9 +809,9 @@ void inherit_locked_prom_mappings(int save_p)
881 } 809 }
882 } 810 }
883 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 811 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
884 int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel; 812 int high = sparc64_highest_unlocked_tlb_ent;
885 813
886 for (i = 0; i < high; i++) { 814 for (i = 0; i <= high; i++) {
887 unsigned long data; 815 unsigned long data;
888 816
889 data = cheetah_get_ldtlb_data(i); 817 data = cheetah_get_ldtlb_data(i);
@@ -1276,14 +1204,14 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1276 int i; 1204 int i;
1277 1205
1278#ifdef CONFIG_DEBUG_BOOTMEM 1206#ifdef CONFIG_DEBUG_BOOTMEM
1279 prom_printf("bootmem_init: Scan sp_banks, "); 1207 prom_printf("bootmem_init: Scan pavail, ");
1280#endif 1208#endif
1281 1209
1282 bytes_avail = 0UL; 1210 bytes_avail = 0UL;
1283 for (i = 0; sp_banks[i].num_bytes != 0; i++) { 1211 for (i = 0; i < pavail_ents; i++) {
1284 end_of_phys_memory = sp_banks[i].base_addr + 1212 end_of_phys_memory = pavail[i].phys_addr +
1285 sp_banks[i].num_bytes; 1213 pavail[i].reg_size;
1286 bytes_avail += sp_banks[i].num_bytes; 1214 bytes_avail += pavail[i].reg_size;
1287 if (cmdline_memory_size) { 1215 if (cmdline_memory_size) {
1288 if (bytes_avail > cmdline_memory_size) { 1216 if (bytes_avail > cmdline_memory_size) {
1289 unsigned long slack = bytes_avail - cmdline_memory_size; 1217 unsigned long slack = bytes_avail - cmdline_memory_size;
@@ -1291,12 +1219,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1291 bytes_avail -= slack; 1219 bytes_avail -= slack;
1292 end_of_phys_memory -= slack; 1220 end_of_phys_memory -= slack;
1293 1221
1294 sp_banks[i].num_bytes -= slack; 1222 pavail[i].reg_size -= slack;
1295 if (sp_banks[i].num_bytes == 0) { 1223 if ((long)pavail[i].reg_size <= 0L) {
1296 sp_banks[i].base_addr = 0xdeadbeef; 1224 pavail[i].phys_addr = 0xdeadbeefUL;
1225 pavail[i].reg_size = 0UL;
1226 pavail_ents = i;
1297 } else { 1227 } else {
1298 sp_banks[i+1].num_bytes = 0; 1228 pavail[i+1].reg_size = 0Ul;
1299 sp_banks[i+1].base_addr = 0xdeadbeef; 1229 pavail[i+1].phys_addr = 0xdeadbeefUL;
1230 pavail_ents = i + 1;
1300 } 1231 }
1301 break; 1232 break;
1302 } 1233 }
@@ -1347,17 +1278,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1347#endif 1278#endif
1348 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); 1279 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
1349 1280
1350 bootmap_base = bootmap_pfn << PAGE_SHIFT;
1351
1352 /* Now register the available physical memory with the 1281 /* Now register the available physical memory with the
1353 * allocator. 1282 * allocator.
1354 */ 1283 */
1355 for (i = 0; sp_banks[i].num_bytes != 0; i++) { 1284 for (i = 0; i < pavail_ents; i++) {
1356#ifdef CONFIG_DEBUG_BOOTMEM 1285#ifdef CONFIG_DEBUG_BOOTMEM
1357 prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n", 1286 prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n",
1358 i, sp_banks[i].base_addr, sp_banks[i].num_bytes); 1287 i, pavail[i].phys_addr, pavail[i].reg_size);
1359#endif 1288#endif
1360 free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes); 1289 free_bootmem(pavail[i].phys_addr, pavail[i].reg_size);
1361 } 1290 }
1362 1291
1363#ifdef CONFIG_BLK_DEV_INITRD 1292#ifdef CONFIG_BLK_DEV_INITRD
@@ -1398,121 +1327,167 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1398 return end_pfn; 1327 return end_pfn;
1399} 1328}
1400 1329
1330#ifdef CONFIG_DEBUG_PAGEALLOC
1331static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
1332{
1333 unsigned long vstart = PAGE_OFFSET + pstart;
1334 unsigned long vend = PAGE_OFFSET + pend;
1335 unsigned long alloc_bytes = 0UL;
1336
1337 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1338 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1339 vstart, vend);
1340 prom_halt();
1341 }
1342
1343 while (vstart < vend) {
1344 unsigned long this_end, paddr = __pa(vstart);
1345 pgd_t *pgd = pgd_offset_k(vstart);
1346 pud_t *pud;
1347 pmd_t *pmd;
1348 pte_t *pte;
1349
1350 pud = pud_offset(pgd, vstart);
1351 if (pud_none(*pud)) {
1352 pmd_t *new;
1353
1354 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1355 alloc_bytes += PAGE_SIZE;
1356 pud_populate(&init_mm, pud, new);
1357 }
1358
1359 pmd = pmd_offset(pud, vstart);
1360 if (!pmd_present(*pmd)) {
1361 pte_t *new;
1362
1363 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1364 alloc_bytes += PAGE_SIZE;
1365 pmd_populate_kernel(&init_mm, pmd, new);
1366 }
1367
1368 pte = pte_offset_kernel(pmd, vstart);
1369 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1370 if (this_end > vend)
1371 this_end = vend;
1372
1373 while (vstart < this_end) {
1374 pte_val(*pte) = (paddr | pgprot_val(prot));
1375
1376 vstart += PAGE_SIZE;
1377 paddr += PAGE_SIZE;
1378 pte++;
1379 }
1380 }
1381
1382 return alloc_bytes;
1383}
1384
1385static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1386static int pall_ents __initdata;
1387
1388extern unsigned int kvmap_linear_patch[1];
1389
1390static void __init kernel_physical_mapping_init(void)
1391{
1392 unsigned long i, mem_alloced = 0UL;
1393
1394 read_obp_memory("reg", &pall[0], &pall_ents);
1395
1396 for (i = 0; i < pall_ents; i++) {
1397 unsigned long phys_start, phys_end;
1398
1399 phys_start = pall[i].phys_addr;
1400 phys_end = phys_start + pall[i].reg_size;
1401 mem_alloced += kernel_map_range(phys_start, phys_end,
1402 PAGE_KERNEL);
1403 }
1404
1405 printk("Allocated %ld bytes for kernel page tables.\n",
1406 mem_alloced);
1407
1408 kvmap_linear_patch[0] = 0x01000000; /* nop */
1409 flushi(&kvmap_linear_patch[0]);
1410
1411 __flush_tlb_all();
1412}
1413
1414void kernel_map_pages(struct page *page, int numpages, int enable)
1415{
1416 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1417 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1418
1419 kernel_map_range(phys_start, phys_end,
1420 (enable ? PAGE_KERNEL : __pgprot(0)));
1421
1422 /* we should perform an IPI and flush all tlbs,
1423 * but that can deadlock->flush only current cpu.
1424 */
1425 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1426 PAGE_OFFSET + phys_end);
1427}
1428#endif
1429
1430unsigned long __init find_ecache_flush_span(unsigned long size)
1431{
1432 int i;
1433
1434 for (i = 0; i < pavail_ents; i++) {
1435 if (pavail[i].reg_size >= size)
1436 return pavail[i].phys_addr;
1437 }
1438
1439 return ~0UL;
1440}
1441
1401/* paging_init() sets up the page tables */ 1442/* paging_init() sets up the page tables */
1402 1443
1403extern void cheetah_ecache_flush_init(void); 1444extern void cheetah_ecache_flush_init(void);
1404 1445
1405static unsigned long last_valid_pfn; 1446static unsigned long last_valid_pfn;
1447pgd_t swapper_pg_dir[2048];
1406 1448
1407void __init paging_init(void) 1449void __init paging_init(void)
1408{ 1450{
1409 extern pmd_t swapper_pmd_dir[1024]; 1451 unsigned long end_pfn, pages_avail, shift;
1410 extern unsigned int sparc64_vpte_patchme1[1]; 1452 unsigned long real_end, i;
1411 extern unsigned int sparc64_vpte_patchme2[1]; 1453
1412 unsigned long alias_base = kern_base + PAGE_OFFSET; 1454 /* Find available physical memory... */
1413 unsigned long second_alias_page = 0; 1455 read_obp_memory("available", &pavail[0], &pavail_ents);
1414 unsigned long pt, flags, end_pfn, pages_avail; 1456
1415 unsigned long shift = alias_base - ((unsigned long)KERNBASE); 1457 phys_base = 0xffffffffffffffffUL;
1416 unsigned long real_end; 1458 for (i = 0; i < pavail_ents; i++)
1459 phys_base = min(phys_base, pavail[i].phys_addr);
1460
1461 pfn_base = phys_base >> PAGE_SHIFT;
1462
1463 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1464 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1417 1465
1418 set_bit(0, mmu_context_bmap); 1466 set_bit(0, mmu_context_bmap);
1419 1467
1468 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1469
1420 real_end = (unsigned long)_end; 1470 real_end = (unsigned long)_end;
1421 if ((real_end > ((unsigned long)KERNBASE + 0x400000))) 1471 if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
1422 bigkernel = 1; 1472 bigkernel = 1;
1423#ifdef CONFIG_BLK_DEV_INITRD 1473 if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
1424 if (sparc_ramdisk_image || sparc_ramdisk_image64) 1474 prom_printf("paging_init: Kernel > 8MB, too large.\n");
1425 real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size)); 1475 prom_halt();
1426#endif
1427
1428 /* We assume physical memory starts at some 4mb multiple,
1429 * if this were not true we wouldn't boot up to this point
1430 * anyways.
1431 */
1432 pt = kern_base | _PAGE_VALID | _PAGE_SZ4MB;
1433 pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
1434 local_irq_save(flags);
1435 if (tlb_type == spitfire) {
1436 __asm__ __volatile__(
1437 " stxa %1, [%0] %3\n"
1438 " stxa %2, [%5] %4\n"
1439 " membar #Sync\n"
1440 " flush %%g6\n"
1441 " nop\n"
1442 " nop\n"
1443 " nop\n"
1444 : /* No outputs */
1445 : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
1446 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
1447 : "memory");
1448 if (real_end >= KERNBASE + 0x340000) {
1449 second_alias_page = alias_base + 0x400000;
1450 __asm__ __volatile__(
1451 " stxa %1, [%0] %3\n"
1452 " stxa %2, [%5] %4\n"
1453 " membar #Sync\n"
1454 " flush %%g6\n"
1455 " nop\n"
1456 " nop\n"
1457 " nop\n"
1458 : /* No outputs */
1459 : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
1460 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
1461 : "memory");
1462 }
1463 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1464 __asm__ __volatile__(
1465 " stxa %1, [%0] %3\n"
1466 " stxa %2, [%5] %4\n"
1467 " membar #Sync\n"
1468 " flush %%g6\n"
1469 " nop\n"
1470 " nop\n"
1471 " nop\n"
1472 : /* No outputs */
1473 : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
1474 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3))
1475 : "memory");
1476 if (real_end >= KERNBASE + 0x340000) {
1477 second_alias_page = alias_base + 0x400000;
1478 __asm__ __volatile__(
1479 " stxa %1, [%0] %3\n"
1480 " stxa %2, [%5] %4\n"
1481 " membar #Sync\n"
1482 " flush %%g6\n"
1483 " nop\n"
1484 " nop\n"
1485 " nop\n"
1486 : /* No outputs */
1487 : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
1488 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3))
1489 : "memory");
1490 }
1491 } 1476 }
1492 local_irq_restore(flags); 1477
1493 1478 /* Set kernel pgd to upper alias so physical page computations
1494 /* Now set kernel pgd to upper alias so physical page computations
1495 * work. 1479 * work.
1496 */ 1480 */
1497 init_mm.pgd += ((shift) / (sizeof(pgd_t))); 1481 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1498 1482
1499 memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir)); 1483 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1500 1484
1501 /* Now can init the kernel/bad page tables. */ 1485 /* Now can init the kernel/bad page tables. */
1502 pud_set(pud_offset(&swapper_pg_dir[0], 0), 1486 pud_set(pud_offset(&swapper_pg_dir[0], 0),
1503 swapper_pmd_dir + (shift / sizeof(pgd_t))); 1487 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1504 1488
1505 sparc64_vpte_patchme1[0] |= 1489 swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
1506 (((unsigned long)pgd_val(init_mm.pgd[0])) >> 10);
1507 sparc64_vpte_patchme2[0] |=
1508 (((unsigned long)pgd_val(init_mm.pgd[0])) & 0x3ff);
1509 flushi((long)&sparc64_vpte_patchme1[0]);
1510 1490
1511 /* Setup bootmem... */
1512 pages_avail = 0;
1513 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1514
1515 /* Inherit non-locked OBP mappings. */
1516 inherit_prom_mappings(); 1491 inherit_prom_mappings();
1517 1492
1518 /* Ok, we can use our TLB miss and window trap handlers safely. 1493 /* Ok, we can use our TLB miss and window trap handlers safely.
@@ -1527,13 +1502,16 @@ void __init paging_init(void)
1527 1502
1528 inherit_locked_prom_mappings(1); 1503 inherit_locked_prom_mappings(1);
1529 1504
1530 /* We only created DTLB mapping of this stuff. */
1531 spitfire_flush_dtlb_nucleus_page(alias_base);
1532 if (second_alias_page)
1533 spitfire_flush_dtlb_nucleus_page(second_alias_page);
1534
1535 __flush_tlb_all(); 1505 __flush_tlb_all();
1536 1506
1507 /* Setup bootmem... */
1508 pages_avail = 0;
1509 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1510
1511#ifdef CONFIG_DEBUG_PAGEALLOC
1512 kernel_physical_mapping_init();
1513#endif
1514
1537 { 1515 {
1538 unsigned long zones_size[MAX_NR_ZONES]; 1516 unsigned long zones_size[MAX_NR_ZONES];
1539 unsigned long zholes_size[MAX_NR_ZONES]; 1517 unsigned long zholes_size[MAX_NR_ZONES];
@@ -1554,128 +1532,35 @@ void __init paging_init(void)
1554 device_scan(); 1532 device_scan();
1555} 1533}
1556 1534
1557/* Ok, it seems that the prom can allocate some more memory chunks
1558 * as a side effect of some prom calls we perform during the
1559 * boot sequence. My most likely theory is that it is from the
1560 * prom_set_traptable() call, and OBP is allocating a scratchpad
1561 * for saving client program register state etc.
1562 */
1563static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
1564{
1565 int swapi = 0;
1566 int i, mitr;
1567 unsigned long tmpaddr, tmpsize;
1568 unsigned long lowest;
1569
1570 for (i = 0; thislist[i].theres_more != 0; i++) {
1571 lowest = thislist[i].start_adr;
1572 for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
1573 if (thislist[mitr].start_adr < lowest) {
1574 lowest = thislist[mitr].start_adr;
1575 swapi = mitr;
1576 }
1577 if (lowest == thislist[i].start_adr)
1578 continue;
1579 tmpaddr = thislist[swapi].start_adr;
1580 tmpsize = thislist[swapi].num_bytes;
1581 for (mitr = swapi; mitr > i; mitr--) {
1582 thislist[mitr].start_adr = thislist[mitr-1].start_adr;
1583 thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
1584 }
1585 thislist[i].start_adr = tmpaddr;
1586 thislist[i].num_bytes = tmpsize;
1587 }
1588}
1589
1590void __init rescan_sp_banks(void)
1591{
1592 struct linux_prom64_registers memlist[64];
1593 struct linux_mlist_p1275 avail[64], *mlist;
1594 unsigned long bytes, base_paddr;
1595 int num_regs, node = prom_finddevice("/memory");
1596 int i;
1597
1598 num_regs = prom_getproperty(node, "available",
1599 (char *) memlist, sizeof(memlist));
1600 num_regs = (num_regs / sizeof(struct linux_prom64_registers));
1601 for (i = 0; i < num_regs; i++) {
1602 avail[i].start_adr = memlist[i].phys_addr;
1603 avail[i].num_bytes = memlist[i].reg_size;
1604 avail[i].theres_more = &avail[i + 1];
1605 }
1606 avail[i - 1].theres_more = NULL;
1607 sort_memlist(avail);
1608
1609 mlist = &avail[0];
1610 i = 0;
1611 bytes = mlist->num_bytes;
1612 base_paddr = mlist->start_adr;
1613
1614 sp_banks[0].base_addr = base_paddr;
1615 sp_banks[0].num_bytes = bytes;
1616
1617 while (mlist->theres_more != NULL){
1618 i++;
1619 mlist = mlist->theres_more;
1620 bytes = mlist->num_bytes;
1621 if (i >= SPARC_PHYS_BANKS-1) {
1622 printk ("The machine has more banks than "
1623 "this kernel can support\n"
1624 "Increase the SPARC_PHYS_BANKS "
1625 "setting (currently %d)\n",
1626 SPARC_PHYS_BANKS);
1627 i = SPARC_PHYS_BANKS-1;
1628 break;
1629 }
1630
1631 sp_banks[i].base_addr = mlist->start_adr;
1632 sp_banks[i].num_bytes = mlist->num_bytes;
1633 }
1634
1635 i++;
1636 sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
1637 sp_banks[i].num_bytes = 0;
1638
1639 for (i = 0; sp_banks[i].num_bytes != 0; i++)
1640 sp_banks[i].num_bytes &= PAGE_MASK;
1641}
1642
1643static void __init taint_real_pages(void) 1535static void __init taint_real_pages(void)
1644{ 1536{
1645 struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
1646 int i; 1537 int i;
1647 1538
1648 for (i = 0; i < SPARC_PHYS_BANKS; i++) { 1539 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
1649 saved_sp_banks[i].base_addr =
1650 sp_banks[i].base_addr;
1651 saved_sp_banks[i].num_bytes =
1652 sp_banks[i].num_bytes;
1653 }
1654
1655 rescan_sp_banks();
1656 1540
1657 /* Find changes discovered in the sp_bank rescan and 1541 /* Find changes discovered in the physmem available rescan and
1658 * reserve the lost portions in the bootmem maps. 1542 * reserve the lost portions in the bootmem maps.
1659 */ 1543 */
1660 for (i = 0; saved_sp_banks[i].num_bytes; i++) { 1544 for (i = 0; i < pavail_ents; i++) {
1661 unsigned long old_start, old_end; 1545 unsigned long old_start, old_end;
1662 1546
1663 old_start = saved_sp_banks[i].base_addr; 1547 old_start = pavail[i].phys_addr;
1664 old_end = old_start + 1548 old_end = old_start +
1665 saved_sp_banks[i].num_bytes; 1549 pavail[i].reg_size;
1666 while (old_start < old_end) { 1550 while (old_start < old_end) {
1667 int n; 1551 int n;
1668 1552
1669 for (n = 0; sp_banks[n].num_bytes; n++) { 1553 for (n = 0; pavail_rescan_ents; n++) {
1670 unsigned long new_start, new_end; 1554 unsigned long new_start, new_end;
1671 1555
1672 new_start = sp_banks[n].base_addr; 1556 new_start = pavail_rescan[n].phys_addr;
1673 new_end = new_start + sp_banks[n].num_bytes; 1557 new_end = new_start +
1558 pavail_rescan[n].reg_size;
1674 1559
1675 if (new_start <= old_start && 1560 if (new_start <= old_start &&
1676 new_end >= (old_start + PAGE_SIZE)) { 1561 new_end >= (old_start + PAGE_SIZE)) {
1677 set_bit (old_start >> 22, 1562 set_bit(old_start >> 22,
1678 sparc64_valid_addr_bitmap); 1563 sparc64_valid_addr_bitmap);
1679 goto do_next_page; 1564 goto do_next_page;
1680 } 1565 }
1681 } 1566 }
@@ -1695,8 +1580,7 @@ void __init mem_init(void)
1695 1580
1696 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6); 1581 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1697 i += 1; 1582 i += 1;
1698 sparc64_valid_addr_bitmap = (unsigned long *) 1583 sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
1699 __alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base);
1700 if (sparc64_valid_addr_bitmap == NULL) { 1584 if (sparc64_valid_addr_bitmap == NULL) {
1701 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); 1585 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1702 prom_halt(); 1586 prom_halt();
@@ -1749,7 +1633,7 @@ void __init mem_init(void)
1749 cheetah_ecache_flush_init(); 1633 cheetah_ecache_flush_init();
1750} 1634}
1751 1635
1752void free_initmem (void) 1636void free_initmem(void)
1753{ 1637{
1754 unsigned long addr, initend; 1638 unsigned long addr, initend;
1755 1639
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index b2ee9b53227f..e4c9151fa116 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -144,42 +144,29 @@ __flush_icache_page: /* %o0 = phys_page */
144 144
145#define DTAG_MASK 0x3 145#define DTAG_MASK 0x3
146 146
147 /* This routine is Spitfire specific so the hardcoded
148 * D-cache size and line-size are OK.
149 */
147 .align 64 150 .align 64
148 .globl __flush_dcache_page 151 .globl __flush_dcache_page
149__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ 152__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
150 sethi %uhi(PAGE_OFFSET), %g1 153 sethi %uhi(PAGE_OFFSET), %g1
151 sllx %g1, 32, %g1 154 sllx %g1, 32, %g1
152 sub %o0, %g1, %o0 155 sub %o0, %g1, %o0 ! physical address
153 clr %o4 156 srlx %o0, 11, %o0 ! make D-cache TAG
154 srlx %o0, 11, %o0 157 sethi %hi(1 << 14), %o2 ! D-cache size
155 sethi %hi(1 << 14), %o2 158 sub %o2, (1 << 5), %o2 ! D-cache line size
1561: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group 1591: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
157 add %o4, (1 << 5), %o4 ! IEU0 160 andcc %o3, DTAG_MASK, %g0 ! Valid?
158 ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group 161 be,pn %xcc, 2f ! Nope, branch
159 add %o4, (1 << 5), %o4 ! IEU0 162 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
160 ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available 163 cmp %o3, %o0 ! TAG match?
161 add %o4, (1 << 5), %o4 ! IEU0 164 bne,pt %xcc, 2f ! Nope, branch
162 andn %o3, DTAG_MASK, %o3 ! IEU1 165 nop
163 ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group 166 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
164 add %o4, (1 << 5), %o4 ! IEU0 167 membar #Sync
165 andn %g1, DTAG_MASK, %g1 ! IEU1 1682: brnz,pt %o2, 1b
166 cmp %o0, %o3 ! IEU1 Group 169 sub %o2, (1 << 5), %o2 ! D-cache line size
167 be,a,pn %xcc, dflush1 ! CTI
168 sub %o4, (4 << 5), %o4 ! IEU0 (Group)
169 cmp %o0, %g1 ! IEU1 Group
170 andn %g2, DTAG_MASK, %g2 ! IEU0
171 be,a,pn %xcc, dflush2 ! CTI
172 sub %o4, (3 << 5), %o4 ! IEU0 (Group)
173 cmp %o0, %g2 ! IEU1 Group
174 andn %g3, DTAG_MASK, %g3 ! IEU0
175 be,a,pn %xcc, dflush3 ! CTI
176 sub %o4, (2 << 5), %o4 ! IEU0 (Group)
177 cmp %o0, %g3 ! IEU1 Group
178 be,a,pn %xcc, dflush4 ! CTI
179 sub %o4, (1 << 5), %o4 ! IEU0
1802: cmp %o4, %o2 ! IEU1 Group
181 bne,pt %xcc, 1b ! CTI
182 nop ! IEU0
183 170
184 /* The I-cache does not snoop local stores so we 171 /* The I-cache does not snoop local stores so we
185 * better flush that too when necessary. 172 * better flush that too when necessary.
@@ -189,48 +176,9 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
189 retl 176 retl
190 nop 177 nop
191 178
192dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG
193 add %o4, (1 << 5), %o4
194dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG
195 add %o4, (1 << 5), %o4
196dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG
197 add %o4, (1 << 5), %o4
198dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG
199 add %o4, (1 << 5), %o4
200 membar #Sync
201 ba,pt %xcc, 2b
202 nop
203#endif /* DCACHE_ALIASING_POSSIBLE */ 179#endif /* DCACHE_ALIASING_POSSIBLE */
204 180
205 .previous .text 181 .previous
206 .align 32
207__prefill_dtlb:
208 rdpr %pstate, %g7
209 wrpr %g7, PSTATE_IE, %pstate
210 mov TLB_TAG_ACCESS, %g1
211 stxa %o5, [%g1] ASI_DMMU
212 stxa %o2, [%g0] ASI_DTLB_DATA_IN
213 flush %g6
214 retl
215 wrpr %g7, %pstate
216__prefill_itlb:
217 rdpr %pstate, %g7
218 wrpr %g7, PSTATE_IE, %pstate
219 mov TLB_TAG_ACCESS, %g1
220 stxa %o5, [%g1] ASI_IMMU
221 stxa %o2, [%g0] ASI_ITLB_DATA_IN
222 flush %g6
223 retl
224 wrpr %g7, %pstate
225
226 .globl __update_mmu_cache
227__update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */
228 srlx %o1, PAGE_SHIFT, %o1
229 andcc %o3, FAULT_CODE_DTLB, %g0
230 sllx %o1, PAGE_SHIFT, %o5
231 bne,pt %xcc, __prefill_dtlb
232 or %o5, %o0, %o5
233 ba,a,pt %xcc, __prefill_itlb
234 182
235 /* Cheetah specific versions, patched at boot time. */ 183 /* Cheetah specific versions, patched at boot time. */
236__cheetah_flush_tlb_mm: /* 18 insns */ 184__cheetah_flush_tlb_mm: /* 18 insns */
@@ -283,7 +231,7 @@ __cheetah_flush_tlb_pending: /* 26 insns */
283 wrpr %g7, 0x0, %pstate 231 wrpr %g7, 0x0, %pstate
284 232
285#ifdef DCACHE_ALIASING_POSSIBLE 233#ifdef DCACHE_ALIASING_POSSIBLE
286flush_dcpage_cheetah: /* 11 insns */ 234__cheetah_flush_dcache_page: /* 11 insns */
287 sethi %uhi(PAGE_OFFSET), %g1 235 sethi %uhi(PAGE_OFFSET), %g1
288 sllx %g1, 32, %g1 236 sllx %g1, 32, %g1
289 sub %o0, %g1, %o0 237 sub %o0, %g1, %o0
@@ -329,8 +277,8 @@ cheetah_patch_cachetlbops:
329#ifdef DCACHE_ALIASING_POSSIBLE 277#ifdef DCACHE_ALIASING_POSSIBLE
330 sethi %hi(__flush_dcache_page), %o0 278 sethi %hi(__flush_dcache_page), %o0
331 or %o0, %lo(__flush_dcache_page), %o0 279 or %o0, %lo(__flush_dcache_page), %o0
332 sethi %hi(flush_dcpage_cheetah), %o1 280 sethi %hi(__cheetah_flush_dcache_page), %o1
333 or %o1, %lo(flush_dcpage_cheetah), %o1 281 or %o1, %lo(__cheetah_flush_dcache_page), %o1
334 call cheetah_patch_one 282 call cheetah_patch_one
335 mov 11, %o2 283 mov 11, %o2
336#endif /* DCACHE_ALIASING_POSSIBLE */ 284#endif /* DCACHE_ALIASING_POSSIBLE */
@@ -505,22 +453,6 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
505 nop 453 nop
506 nop 454 nop
507 455
508 .globl xcall_promstop
509xcall_promstop:
510 rdpr %pstate, %g2
511 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
512 rdpr %pil, %g2
513 wrpr %g0, 15, %pil
514 sethi %hi(109f), %g7
515 b,pt %xcc, etrap_irq
516109: or %g7, %lo(109b), %g7
517 flushw
518 call prom_stopself
519 nop
520 /* We should not return, just spin if we do... */
5211: b,a,pt %xcc, 1b
522 nop
523
524 .data 456 .data
525 457
526errata32_hwbug: 458errata32_hwbug:
diff --git a/arch/sparc64/prom/Makefile b/arch/sparc64/prom/Makefile
index 8f2420d9e9e6..3d33ed27bc27 100644
--- a/arch/sparc64/prom/Makefile
+++ b/arch/sparc64/prom/Makefile
@@ -6,5 +6,5 @@
6EXTRA_AFLAGS := -ansi 6EXTRA_AFLAGS := -ansi
7EXTRA_CFLAGS := -Werror 7EXTRA_CFLAGS := -Werror
8 8
9lib-y := bootstr.o devops.o init.o memory.o misc.o \ 9lib-y := bootstr.o devops.o init.o misc.o \
10 tree.o console.o printf.o p1275.o map.o cif.o 10 tree.o console.o printf.o p1275.o cif.o
diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c
index 028a53fcb1ec..eae5db8dda56 100644
--- a/arch/sparc64/prom/console.c
+++ b/arch/sparc64/prom/console.c
@@ -67,7 +67,7 @@ prom_putchar(char c)
67} 67}
68 68
69void 69void
70prom_puts(char *s, int len) 70prom_puts(const char *s, int len)
71{ 71{
72 p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)| 72 p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
73 P1275_INOUT(3,1), 73 P1275_INOUT(3,1),
diff --git a/arch/sparc64/prom/devops.c b/arch/sparc64/prom/devops.c
index 2c99b21b6981..4641839eb39a 100644
--- a/arch/sparc64/prom/devops.c
+++ b/arch/sparc64/prom/devops.c
@@ -16,7 +16,7 @@
16 * Returns 0 on failure. 16 * Returns 0 on failure.
17 */ 17 */
18int 18int
19prom_devopen(char *dstr) 19prom_devopen(const char *dstr)
20{ 20{
21 return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)| 21 return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)|
22 P1275_INOUT(1,1), 22 P1275_INOUT(1,1),
diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c
index 817faae058cd..f3cc2d8578b2 100644
--- a/arch/sparc64/prom/init.c
+++ b/arch/sparc64/prom/init.c
@@ -27,7 +27,6 @@ int prom_chosen_node;
27 * failure. It gets passed the pointer to the PROM vector. 27 * failure. It gets passed the pointer to the PROM vector.
28 */ 28 */
29 29
30extern void prom_meminit(void);
31extern void prom_cif_init(void *, void *); 30extern void prom_cif_init(void *, void *);
32 31
33void __init prom_init(void *cif_handler, void *cif_stack) 32void __init prom_init(void *cif_handler, void *cif_stack)
@@ -46,7 +45,7 @@ void __init prom_init(void *cif_handler, void *cif_stack)
46 if((prom_root_node == 0) || (prom_root_node == -1)) 45 if((prom_root_node == 0) || (prom_root_node == -1))
47 prom_halt(); 46 prom_halt();
48 47
49 prom_chosen_node = prom_finddevice("/chosen"); 48 prom_chosen_node = prom_finddevice(prom_chosen_path);
50 if (!prom_chosen_node || prom_chosen_node == -1) 49 if (!prom_chosen_node || prom_chosen_node == -1)
51 prom_halt(); 50 prom_halt();
52 51
@@ -90,8 +89,6 @@ void __init prom_init(void *cif_handler, void *cif_stack)
90 89
91 printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust); 90 printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust);
92 91
93 prom_meminit();
94
95 /* Initialization successful. */ 92 /* Initialization successful. */
96 return; 93 return;
97 94
diff --git a/arch/sparc64/prom/map.S b/arch/sparc64/prom/map.S
deleted file mode 100644
index 21b3f9c99ea7..000000000000
--- a/arch/sparc64/prom/map.S
+++ /dev/null
@@ -1,72 +0,0 @@
1/* $Id: map.S,v 1.2 1999/11/19 05:53:02 davem Exp $
2 * map.S: Tricky coding required to fixup the kernel OBP maps
3 * properly.
4 *
5 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
6 */
7
8 .text
9 .align 8192
10 .globl prom_boot_page
11prom_boot_page:
12call_method:
13 .asciz "call-method"
14 .align 8
15map:
16 .asciz "map"
17 .align 8
18
19 /* When we are invoked, our caller has remapped us to
20 * page zero, therefore we must use PC relative addressing
21 * for everything after we begin performing the unmap/map
22 * calls.
23 */
24 .globl prom_remap
25prom_remap: /* %o0 = physpage, %o1 = virtpage, %o2 = mmu_ihandle */
26 rd %pc, %g1
27 srl %o2, 0, %o2 ! kill sign extension
28 sethi %hi(p1275buf), %g2
29 or %g2, %lo(p1275buf), %g2
30 ldx [%g2 + 0x10], %g3 ! prom_cif_stack
31 save %g3, -(192 + 128), %sp
32 ldx [%g2 + 0x08], %l0 ! prom_cif_handler
33 mov %g6, %i3
34 mov %g4, %i4
35 mov %g5, %i5
36 flushw
37
38 sethi %hi(prom_remap - call_method), %g7
39 or %g7, %lo(prom_remap - call_method), %g7
40 sub %g1, %g7, %l2 ! call-method string
41 sethi %hi(prom_remap - map), %g7
42 or %g7, %lo(prom_remap - map), %g7
43 sub %g1, %g7, %l4 ! map string
44
45 /* OK, map the 4MB region we really live at. */
46 stx %l2, [%sp + 2047 + 128 + 0x00] ! call-method
47 mov 7, %l5
48 stx %l5, [%sp + 2047 + 128 + 0x08] ! num_args
49 mov 1, %l5
50 stx %l5, [%sp + 2047 + 128 + 0x10] ! num_rets
51 stx %l4, [%sp + 2047 + 128 + 0x18] ! map
52 stx %i2, [%sp + 2047 + 128 + 0x20] ! mmu_ihandle
53 mov -1, %l5
54 stx %l5, [%sp + 2047 + 128 + 0x28] ! mode == default
55 sethi %hi(4 * 1024 * 1024), %l5
56 stx %l5, [%sp + 2047 + 128 + 0x30] ! size
57 stx %i1, [%sp + 2047 + 128 + 0x38] ! vaddr
58 stx %g0, [%sp + 2047 + 128 + 0x40] ! filler
59 stx %i0, [%sp + 2047 + 128 + 0x48] ! paddr
60 call %l0
61 add %sp, (2047 + 128), %o0 ! argument array
62
63 /* Restore hard-coded globals. */
64 mov %i3, %g6
65 mov %i4, %g4
66 mov %i5, %g5
67
68 /* Wheee.... we are done. */
69 ret
70 restore
71
72 .align 8192
diff --git a/arch/sparc64/prom/memory.c b/arch/sparc64/prom/memory.c
deleted file mode 100644
index f4a8143e052c..000000000000
--- a/arch/sparc64/prom/memory.c
+++ /dev/null
@@ -1,152 +0,0 @@
1/* $Id: memory.c,v 1.5 1999/08/31 06:55:04 davem Exp $
2 * memory.c: Prom routine for acquiring various bits of information
3 * about RAM on the machine, both virtual and physical.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <linux/kernel.h>
10#include <linux/init.h>
11
12#include <asm/openprom.h>
13#include <asm/oplib.h>
14
15/* This routine, for consistency, returns the ram parameters in the
16 * V0 prom memory descriptor format. I choose this format because I
17 * think it was the easiest to work with. I feel the religious
18 * arguments now... ;) Also, I return the linked lists sorted to
19 * prevent paging_init() upset stomach as I have not yet written
20 * the pepto-bismol kernel module yet.
21 */
22
23struct linux_prom64_registers prom_reg_memlist[64];
24struct linux_prom64_registers prom_reg_tmp[64];
25
26struct linux_mlist_p1275 prom_phys_total[64];
27struct linux_mlist_p1275 prom_prom_taken[64];
28struct linux_mlist_p1275 prom_phys_avail[64];
29
30struct linux_mlist_p1275 *prom_ptot_ptr = prom_phys_total;
31struct linux_mlist_p1275 *prom_ptak_ptr = prom_prom_taken;
32struct linux_mlist_p1275 *prom_pavl_ptr = prom_phys_avail;
33
34struct linux_mem_p1275 prom_memlist;
35
36
37/* Internal Prom library routine to sort a linux_mlist_p1275 memory
38 * list. Used below in initialization.
39 */
40static void __init
41prom_sortmemlist(struct linux_mlist_p1275 *thislist)
42{
43 int swapi = 0;
44 int i, mitr;
45 unsigned long tmpaddr, tmpsize;
46 unsigned long lowest;
47
48 for(i=0; thislist[i].theres_more; i++) {
49 lowest = thislist[i].start_adr;
50 for(mitr = i+1; thislist[mitr-1].theres_more; mitr++)
51 if(thislist[mitr].start_adr < lowest) {
52 lowest = thislist[mitr].start_adr;
53 swapi = mitr;
54 }
55 if(lowest == thislist[i].start_adr) continue;
56 tmpaddr = thislist[swapi].start_adr;
57 tmpsize = thislist[swapi].num_bytes;
58 for(mitr = swapi; mitr > i; mitr--) {
59 thislist[mitr].start_adr = thislist[mitr-1].start_adr;
60 thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
61 }
62 thislist[i].start_adr = tmpaddr;
63 thislist[i].num_bytes = tmpsize;
64 }
65}
66
67/* Initialize the memory lists based upon the prom version. */
68void __init prom_meminit(void)
69{
70 int node = 0;
71 unsigned int iter, num_regs;
72
73 node = prom_finddevice("/memory");
74 num_regs = prom_getproperty(node, "available",
75 (char *) prom_reg_memlist,
76 sizeof(prom_reg_memlist));
77 num_regs = (num_regs/sizeof(struct linux_prom64_registers));
78 for(iter=0; iter<num_regs; iter++) {
79 prom_phys_avail[iter].start_adr =
80 prom_reg_memlist[iter].phys_addr;
81 prom_phys_avail[iter].num_bytes =
82 prom_reg_memlist[iter].reg_size;
83 prom_phys_avail[iter].theres_more =
84 &prom_phys_avail[iter+1];
85 }
86 prom_phys_avail[iter-1].theres_more = NULL;
87
88 num_regs = prom_getproperty(node, "reg",
89 (char *) prom_reg_memlist,
90 sizeof(prom_reg_memlist));
91 num_regs = (num_regs/sizeof(struct linux_prom64_registers));
92 for(iter=0; iter<num_regs; iter++) {
93 prom_phys_total[iter].start_adr =
94 prom_reg_memlist[iter].phys_addr;
95 prom_phys_total[iter].num_bytes =
96 prom_reg_memlist[iter].reg_size;
97 prom_phys_total[iter].theres_more =
98 &prom_phys_total[iter+1];
99 }
100 prom_phys_total[iter-1].theres_more = NULL;
101
102 node = prom_finddevice("/virtual-memory");
103 num_regs = prom_getproperty(node, "available",
104 (char *) prom_reg_memlist,
105 sizeof(prom_reg_memlist));
106 num_regs = (num_regs/sizeof(struct linux_prom64_registers));
107
108 /* Convert available virtual areas to taken virtual
109 * areas. First sort, then convert.
110 */
111 for(iter=0; iter<num_regs; iter++) {
112 prom_prom_taken[iter].start_adr =
113 prom_reg_memlist[iter].phys_addr;
114 prom_prom_taken[iter].num_bytes =
115 prom_reg_memlist[iter].reg_size;
116 prom_prom_taken[iter].theres_more =
117 &prom_prom_taken[iter+1];
118 }
119 prom_prom_taken[iter-1].theres_more = NULL;
120
121 prom_sortmemlist(prom_prom_taken);
122
123 /* Finally, convert. */
124 for(iter=0; iter<num_regs; iter++) {
125 prom_prom_taken[iter].start_adr =
126 prom_prom_taken[iter].start_adr +
127 prom_prom_taken[iter].num_bytes;
128 prom_prom_taken[iter].num_bytes =
129 prom_prom_taken[iter+1].start_adr -
130 prom_prom_taken[iter].start_adr;
131 }
132 prom_prom_taken[iter-1].num_bytes =
133 -1UL - prom_prom_taken[iter-1].start_adr;
134
135 /* Sort the other two lists. */
136 prom_sortmemlist(prom_phys_total);
137 prom_sortmemlist(prom_phys_avail);
138
139 /* Link all the lists into the top-level descriptor. */
140 prom_memlist.p1275_totphys=&prom_ptot_ptr;
141 prom_memlist.p1275_prommap=&prom_ptak_ptr;
142 prom_memlist.p1275_available=&prom_pavl_ptr;
143}
144
145/* This returns a pointer to our libraries internal p1275 format
146 * memory descriptor.
147 */
148struct linux_mem_p1275 *
149prom_meminfo(void)
150{
151 return &prom_memlist;
152}
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
index 19c44e97e9ee..87f5cfce23bb 100644
--- a/arch/sparc64/prom/misc.c
+++ b/arch/sparc64/prom/misc.c
@@ -17,14 +17,14 @@
17#include <asm/system.h> 17#include <asm/system.h>
18 18
19/* Reset and reboot the machine with the command 'bcommand'. */ 19/* Reset and reboot the machine with the command 'bcommand'. */
20void prom_reboot(char *bcommand) 20void prom_reboot(const char *bcommand)
21{ 21{
22 p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) | 22 p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) |
23 P1275_INOUT(1, 0), bcommand); 23 P1275_INOUT(1, 0), bcommand);
24} 24}
25 25
26/* Forth evaluate the expression contained in 'fstring'. */ 26/* Forth evaluate the expression contained in 'fstring'. */
27void prom_feval(char *fstring) 27void prom_feval(const char *fstring)
28{ 28{
29 if (!fstring || fstring[0] == 0) 29 if (!fstring || fstring[0] == 0)
30 return; 30 return;
@@ -68,19 +68,11 @@ void prom_cmdline(void)
68 local_irq_restore(flags); 68 local_irq_restore(flags);
69} 69}
70 70
71#ifdef CONFIG_SMP
72extern void smp_promstop_others(void);
73#endif
74
75/* Drop into the prom, but completely terminate the program. 71/* Drop into the prom, but completely terminate the program.
76 * No chance of continuing. 72 * No chance of continuing.
77 */ 73 */
78void prom_halt(void) 74void prom_halt(void)
79{ 75{
80#ifdef CONFIG_SMP
81 smp_promstop_others();
82 udelay(8000);
83#endif
84again: 76again:
85 p1275_cmd("exit", P1275_INOUT(0, 0)); 77 p1275_cmd("exit", P1275_INOUT(0, 0));
86 goto again; /* PROM is out to get me -DaveM */ 78 goto again; /* PROM is out to get me -DaveM */
@@ -88,10 +80,6 @@ again:
88 80
89void prom_halt_power_off(void) 81void prom_halt_power_off(void)
90{ 82{
91#ifdef CONFIG_SMP
92 smp_promstop_others();
93 udelay(8000);
94#endif
95 p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0)); 83 p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0));
96 84
97 /* if nothing else helps, we just halt */ 85 /* if nothing else helps, we just halt */
@@ -148,21 +136,19 @@ void prom_set_trap_table(unsigned long tba)
148 p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba); 136 p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba);
149} 137}
150 138
151int mmu_ihandle_cache = 0;
152
153int prom_get_mmu_ihandle(void) 139int prom_get_mmu_ihandle(void)
154{ 140{
155 int node, ret; 141 int node, ret;
156 142
157 if (mmu_ihandle_cache != 0) 143 if (prom_mmu_ihandle_cache != 0)
158 return mmu_ihandle_cache; 144 return prom_mmu_ihandle_cache;
159 145
160 node = prom_finddevice("/chosen"); 146 node = prom_finddevice(prom_chosen_path);
161 ret = prom_getint(node, "mmu"); 147 ret = prom_getint(node, prom_mmu_name);
162 if (ret == -1 || ret == 0) 148 if (ret == -1 || ret == 0)
163 mmu_ihandle_cache = -1; 149 prom_mmu_ihandle_cache = -1;
164 else 150 else
165 mmu_ihandle_cache = ret; 151 prom_mmu_ihandle_cache = ret;
166 152
167 return ret; 153 return ret;
168} 154}
@@ -190,7 +176,7 @@ long prom_itlb_load(unsigned long index,
190 unsigned long tte_data, 176 unsigned long tte_data,
191 unsigned long vaddr) 177 unsigned long vaddr)
192{ 178{
193 return p1275_cmd("call-method", 179 return p1275_cmd(prom_callmethod_name,
194 (P1275_ARG(0, P1275_ARG_IN_STRING) | 180 (P1275_ARG(0, P1275_ARG_IN_STRING) |
195 P1275_ARG(2, P1275_ARG_IN_64B) | 181 P1275_ARG(2, P1275_ARG_IN_64B) |
196 P1275_ARG(3, P1275_ARG_IN_64B) | 182 P1275_ARG(3, P1275_ARG_IN_64B) |
@@ -207,7 +193,7 @@ long prom_dtlb_load(unsigned long index,
207 unsigned long tte_data, 193 unsigned long tte_data,
208 unsigned long vaddr) 194 unsigned long vaddr)
209{ 195{
210 return p1275_cmd("call-method", 196 return p1275_cmd(prom_callmethod_name,
211 (P1275_ARG(0, P1275_ARG_IN_STRING) | 197 (P1275_ARG(0, P1275_ARG_IN_STRING) |
212 P1275_ARG(2, P1275_ARG_IN_64B) | 198 P1275_ARG(2, P1275_ARG_IN_64B) |
213 P1275_ARG(3, P1275_ARG_IN_64B) | 199 P1275_ARG(3, P1275_ARG_IN_64B) |
@@ -223,13 +209,13 @@ long prom_dtlb_load(unsigned long index,
223int prom_map(int mode, unsigned long size, 209int prom_map(int mode, unsigned long size,
224 unsigned long vaddr, unsigned long paddr) 210 unsigned long vaddr, unsigned long paddr)
225{ 211{
226 int ret = p1275_cmd("call-method", 212 int ret = p1275_cmd(prom_callmethod_name,
227 (P1275_ARG(0, P1275_ARG_IN_STRING) | 213 (P1275_ARG(0, P1275_ARG_IN_STRING) |
228 P1275_ARG(3, P1275_ARG_IN_64B) | 214 P1275_ARG(3, P1275_ARG_IN_64B) |
229 P1275_ARG(4, P1275_ARG_IN_64B) | 215 P1275_ARG(4, P1275_ARG_IN_64B) |
230 P1275_ARG(6, P1275_ARG_IN_64B) | 216 P1275_ARG(6, P1275_ARG_IN_64B) |
231 P1275_INOUT(7, 1)), 217 P1275_INOUT(7, 1)),
232 "map", 218 prom_map_name,
233 prom_get_mmu_ihandle(), 219 prom_get_mmu_ihandle(),
234 mode, 220 mode,
235 size, 221 size,
@@ -244,12 +230,12 @@ int prom_map(int mode, unsigned long size,
244 230
245void prom_unmap(unsigned long size, unsigned long vaddr) 231void prom_unmap(unsigned long size, unsigned long vaddr)
246{ 232{
247 p1275_cmd("call-method", 233 p1275_cmd(prom_callmethod_name,
248 (P1275_ARG(0, P1275_ARG_IN_STRING) | 234 (P1275_ARG(0, P1275_ARG_IN_STRING) |
249 P1275_ARG(2, P1275_ARG_IN_64B) | 235 P1275_ARG(2, P1275_ARG_IN_64B) |
250 P1275_ARG(3, P1275_ARG_IN_64B) | 236 P1275_ARG(3, P1275_ARG_IN_64B) |
251 P1275_INOUT(4, 0)), 237 P1275_INOUT(4, 0)),
252 "unmap", 238 prom_unmap_name,
253 prom_get_mmu_ihandle(), 239 prom_get_mmu_ihandle(),
254 size, 240 size,
255 vaddr); 241 vaddr);
@@ -258,7 +244,7 @@ void prom_unmap(unsigned long size, unsigned long vaddr)
258/* Set aside physical memory which is not touched or modified 244/* Set aside physical memory which is not touched or modified
259 * across soft resets. 245 * across soft resets.
260 */ 246 */
261unsigned long prom_retain(char *name, 247unsigned long prom_retain(const char *name,
262 unsigned long pa_low, unsigned long pa_high, 248 unsigned long pa_low, unsigned long pa_high,
263 long size, long align) 249 long size, long align)
264{ 250{
@@ -290,7 +276,7 @@ int prom_getunumber(int syndrome_code,
290 unsigned long phys_addr, 276 unsigned long phys_addr,
291 char *buf, int buflen) 277 char *buf, int buflen)
292{ 278{
293 return p1275_cmd("call-method", 279 return p1275_cmd(prom_callmethod_name,
294 (P1275_ARG(0, P1275_ARG_IN_STRING) | 280 (P1275_ARG(0, P1275_ARG_IN_STRING) |
295 P1275_ARG(3, P1275_ARG_OUT_BUF) | 281 P1275_ARG(3, P1275_ARG_OUT_BUF) |
296 P1275_ARG(6, P1275_ARG_IN_64B) | 282 P1275_ARG(6, P1275_ARG_IN_64B) |
diff --git a/arch/sparc64/prom/p1275.c b/arch/sparc64/prom/p1275.c
index 59fe38bba39e..a5a7c5712028 100644
--- a/arch/sparc64/prom/p1275.c
+++ b/arch/sparc64/prom/p1275.c
@@ -46,7 +46,7 @@ static inline unsigned long spitfire_get_primary_context(void)
46 */ 46 */
47DEFINE_SPINLOCK(prom_entry_lock); 47DEFINE_SPINLOCK(prom_entry_lock);
48 48
49long p1275_cmd (char *service, long fmt, ...) 49long p1275_cmd(const char *service, long fmt, ...)
50{ 50{
51 char *p, *q; 51 char *p, *q;
52 unsigned long flags; 52 unsigned long flags;
diff --git a/arch/sparc64/prom/printf.c b/arch/sparc64/prom/printf.c
index a6df82cafa0d..660943ee4c2a 100644
--- a/arch/sparc64/prom/printf.c
+++ b/arch/sparc64/prom/printf.c
@@ -34,7 +34,7 @@ prom_write(const char *buf, unsigned int n)
34} 34}
35 35
36void 36void
37prom_printf(char *fmt, ...) 37prom_printf(const char *fmt, ...)
38{ 38{
39 va_list args; 39 va_list args;
40 int i; 40 int i;
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c
index ccf73258ebf7..b1ff9e87dcc6 100644
--- a/arch/sparc64/prom/tree.c
+++ b/arch/sparc64/prom/tree.c
@@ -69,7 +69,7 @@ prom_getsibling(int node)
69 * Return -1 on error. 69 * Return -1 on error.
70 */ 70 */
71__inline__ int 71__inline__ int
72prom_getproplen(int node, char *prop) 72prom_getproplen(int node, const char *prop)
73{ 73{
74 if((!node) || (!prop)) return -1; 74 if((!node) || (!prop)) return -1;
75 return p1275_cmd ("getproplen", 75 return p1275_cmd ("getproplen",
@@ -83,20 +83,20 @@ prom_getproplen(int node, char *prop)
83 * was successful the length will be returned, else -1 is returned. 83 * was successful the length will be returned, else -1 is returned.
84 */ 84 */
85__inline__ int 85__inline__ int
86prom_getproperty(int node, char *prop, char *buffer, int bufsize) 86prom_getproperty(int node, const char *prop, char *buffer, int bufsize)
87{ 87{
88 int plen; 88 int plen;
89 89
90 plen = prom_getproplen(node, prop); 90 plen = prom_getproplen(node, prop);
91 if((plen > bufsize) || (plen == 0) || (plen == -1)) 91 if ((plen > bufsize) || (plen == 0) || (plen == -1)) {
92 return -1; 92 return -1;
93 else { 93 } else {
94 /* Ok, things seem all right. */ 94 /* Ok, things seem all right. */
95 return p1275_cmd ("getprop", 95 return p1275_cmd(prom_getprop_name,
96 P1275_ARG(1,P1275_ARG_IN_STRING)| 96 P1275_ARG(1,P1275_ARG_IN_STRING)|
97 P1275_ARG(2,P1275_ARG_OUT_BUF)| 97 P1275_ARG(2,P1275_ARG_OUT_BUF)|
98 P1275_INOUT(4, 1), 98 P1275_INOUT(4, 1),
99 node, prop, buffer, P1275_SIZE(plen)); 99 node, prop, buffer, P1275_SIZE(plen));
100 } 100 }
101} 101}
102 102
@@ -104,7 +104,7 @@ prom_getproperty(int node, char *prop, char *buffer, int bufsize)
104 * on failure. 104 * on failure.
105 */ 105 */
106__inline__ int 106__inline__ int
107prom_getint(int node, char *prop) 107prom_getint(int node, const char *prop)
108{ 108{
109 int intprop; 109 int intprop;
110 110
@@ -119,7 +119,7 @@ prom_getint(int node, char *prop)
119 */ 119 */
120 120
121int 121int
122prom_getintdefault(int node, char *property, int deflt) 122prom_getintdefault(int node, const char *property, int deflt)
123{ 123{
124 int retval; 124 int retval;
125 125
@@ -131,7 +131,7 @@ prom_getintdefault(int node, char *property, int deflt)
131 131
132/* Acquire a boolean property, 1=TRUE 0=FALSE. */ 132/* Acquire a boolean property, 1=TRUE 0=FALSE. */
133int 133int
134prom_getbool(int node, char *prop) 134prom_getbool(int node, const char *prop)
135{ 135{
136 int retval; 136 int retval;
137 137
@@ -145,7 +145,7 @@ prom_getbool(int node, char *prop)
145 * buffer. 145 * buffer.
146 */ 146 */
147void 147void
148prom_getstring(int node, char *prop, char *user_buf, int ubuf_size) 148prom_getstring(int node, const char *prop, char *user_buf, int ubuf_size)
149{ 149{
150 int len; 150 int len;
151 151
@@ -160,7 +160,7 @@ prom_getstring(int node, char *prop, char *user_buf, int ubuf_size)
160 * YES = 1 NO = 0 160 * YES = 1 NO = 0
161 */ 161 */
162int 162int
163prom_nodematch(int node, char *name) 163prom_nodematch(int node, const char *name)
164{ 164{
165 char namebuf[128]; 165 char namebuf[128];
166 prom_getproperty(node, "name", namebuf, sizeof(namebuf)); 166 prom_getproperty(node, "name", namebuf, sizeof(namebuf));
@@ -172,7 +172,7 @@ prom_nodematch(int node, char *name)
172 * 'nodename'. Return node if successful, zero if not. 172 * 'nodename'. Return node if successful, zero if not.
173 */ 173 */
174int 174int
175prom_searchsiblings(int node_start, char *nodename) 175prom_searchsiblings(int node_start, const char *nodename)
176{ 176{
177 177
178 int thisnode, error; 178 int thisnode, error;
@@ -294,7 +294,7 @@ prom_firstprop(int node, char *buffer)
294 * property types for this node. 294 * property types for this node.
295 */ 295 */
296__inline__ char * 296__inline__ char *
297prom_nextprop(int node, char *oprop, char *buffer) 297prom_nextprop(int node, const char *oprop, char *buffer)
298{ 298{
299 char buf[32]; 299 char buf[32];
300 300
@@ -314,15 +314,17 @@ prom_nextprop(int node, char *oprop, char *buffer)
314} 314}
315 315
316int 316int
317prom_finddevice(char *name) 317prom_finddevice(const char *name)
318{ 318{
319 if(!name) return 0; 319 if (!name)
320 return p1275_cmd ("finddevice", P1275_ARG(0,P1275_ARG_IN_STRING)| 320 return 0;
321 P1275_INOUT(1, 1), 321 return p1275_cmd(prom_finddev_name,
322 name); 322 P1275_ARG(0,P1275_ARG_IN_STRING)|
323 P1275_INOUT(1, 1),
324 name);
323} 325}
324 326
325int prom_node_has_property(int node, char *prop) 327int prom_node_has_property(int node, const char *prop)
326{ 328{
327 char buf [32]; 329 char buf [32];
328 330
@@ -339,7 +341,7 @@ int prom_node_has_property(int node, char *prop)
339 * of 'size' bytes. Return the number of bytes the prom accepted. 341 * of 'size' bytes. Return the number of bytes the prom accepted.
340 */ 342 */
341int 343int
342prom_setprop(int node, char *pname, char *value, int size) 344prom_setprop(int node, const char *pname, char *value, int size)
343{ 345{
344 if(size == 0) return 0; 346 if(size == 0) return 0;
345 if((pname == 0) || (value == 0)) return 0; 347 if((pname == 0) || (value == 0)) return 0;
@@ -364,7 +366,7 @@ prom_inst2pkg(int inst)
364 * FIXME: Should work for v0 as well 366 * FIXME: Should work for v0 as well
365 */ 367 */
366int 368int
367prom_pathtoinode(char *path) 369prom_pathtoinode(const char *path)
368{ 370{
369 int node, inst; 371 int node, inst;
370 372