aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/head.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/head.S')
-rw-r--r--arch/sparc64/kernel/head.S782
1 files changed, 782 insertions, 0 deletions
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
new file mode 100644
index 000000000000..8104a56ca2d8
--- /dev/null
+++ b/arch/sparc64/kernel/head.S
@@ -0,0 +1,782 @@
1/* $Id: head.S,v 1.87 2002/02/09 19:49:31 davem Exp $
2 * head.S: Initial boot code for the Sparc64 port of Linux.
3 *
4 * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
8 */
9
10#include <linux/config.h>
11#include <linux/version.h>
12#include <linux/errno.h>
13#include <asm/thread_info.h>
14#include <asm/asi.h>
15#include <asm/pstate.h>
16#include <asm/ptrace.h>
17#include <asm/spitfire.h>
18#include <asm/page.h>
19#include <asm/pgtable.h>
20#include <asm/errno.h>
21#include <asm/signal.h>
22#include <asm/processor.h>
23#include <asm/lsu.h>
24#include <asm/dcr.h>
25#include <asm/dcu.h>
26#include <asm/head.h>
27#include <asm/ttable.h>
28#include <asm/mmu.h>
29
30/* This section from from _start to sparc64_boot_end should fit into
31 * 0x0000.0000.0040.4000 to 0x0000.0000.0040.8000 and will be sharing space
32 * with bootup_user_stack, which is from 0x0000.0000.0040.4000 to
33 * 0x0000.0000.0040.6000 and empty_bad_page, which is from
34 * 0x0000.0000.0040.6000 to 0x0000.0000.0040.8000.
35 */
36
37 .text
38 .globl start, _start, stext, _stext
39_start:
40start:
41_stext:
42stext:
43bootup_user_stack:
44! 0x0000000000404000
45 b sparc64_boot
46 flushw /* Flush register file. */
47
48/* This stuff has to be in sync with SILO and other potential boot loaders
49 * Fields should be kept upward compatible and whenever any change is made,
50 * HdrS version should be incremented.
51 */
52 .global root_flags, ram_flags, root_dev
53 .global sparc_ramdisk_image, sparc_ramdisk_size
54 .global sparc_ramdisk_image64
55
56 .ascii "HdrS"
57 .word LINUX_VERSION_CODE
58
59 /* History:
60 *
61 * 0x0300 : Supports being located at other than 0x4000
62 * 0x0202 : Supports kernel params string
63 * 0x0201 : Supports reboot_command
64 */
65 .half 0x0301 /* HdrS version */
66
67root_flags:
68 .half 1
69root_dev:
70 .half 0
71ram_flags:
72 .half 0
73sparc_ramdisk_image:
74 .word 0
75sparc_ramdisk_size:
76 .word 0
77 .xword reboot_command
78 .xword bootstr_info
79sparc_ramdisk_image64:
80 .xword 0
81 .word _end
82
83 /* We must be careful, 32-bit OpenBOOT will get confused if it
84 * tries to save away a register window to a 64-bit kernel
85 * stack address. Flush all windows, disable interrupts,
86 * remap if necessary, jump onto kernel trap table, then kernel
87 * stack, or else we die.
88 *
89 * PROM entry point is on %o4
90 */
91sparc64_boot:
92 BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
93 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
94 ba,pt %xcc, spitfire_boot
95 nop
96
97cheetah_plus_boot:
98 /* Preserve OBP chosen DCU and DCR register settings. */
99 ba,pt %xcc, cheetah_generic_boot
100 nop
101
102cheetah_boot:
103 mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
104 wr %g1, %asr18
105
106 sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
107 or %g7, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
108 sllx %g7, 32, %g7
109 or %g7, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g7
110 stxa %g7, [%g0] ASI_DCU_CONTROL_REG
111 membar #Sync
112
113cheetah_generic_boot:
114 mov TSB_EXTENSION_P, %g3
115 stxa %g0, [%g3] ASI_DMMU
116 stxa %g0, [%g3] ASI_IMMU
117 membar #Sync
118
119 mov TSB_EXTENSION_S, %g3
120 stxa %g0, [%g3] ASI_DMMU
121 membar #Sync
122
123 mov TSB_EXTENSION_N, %g3
124 stxa %g0, [%g3] ASI_DMMU
125 stxa %g0, [%g3] ASI_IMMU
126 membar #Sync
127
128 wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
129 wr %g0, 0, %fprs
130
131 /* Just like for Spitfire, we probe itlb-2 for a mapping which
132 * matches our current %pc. We take the physical address in
133 * that mapping and use it to make our own.
134 */
135
136 /* %g5 holds the tlb data */
137 sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
138 sllx %g5, 32, %g5
139 or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
140
141 /* Put PADDR tlb data mask into %g3. */
142 sethi %uhi(_PAGE_PADDR), %g3
143 or %g3, %ulo(_PAGE_PADDR), %g3
144 sllx %g3, 32, %g3
145 sethi %hi(_PAGE_PADDR), %g7
146 or %g7, %lo(_PAGE_PADDR), %g7
147 or %g3, %g7, %g3
148
149 set 2 << 16, %l0 /* TLB entry walker. */
150 set 0x1fff, %l2 /* Page mask. */
151 rd %pc, %l3
152 andn %l3, %l2, %g2 /* vaddr comparator */
153
1541: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
155 membar #Sync
156 andn %g1, %l2, %g1
157 cmp %g1, %g2
158 be,pn %xcc, cheetah_got_tlbentry
159 nop
160 and %l0, (127 << 3), %g1
161 cmp %g1, (127 << 3)
162 blu,pt %xcc, 1b
163 add %l0, (1 << 3), %l0
164
165 /* Search the small TLB. OBP never maps us like that but
166 * newer SILO can.
167 */
168 clr %l0
169
1701: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
171 membar #Sync
172 andn %g1, %l2, %g1
173 cmp %g1, %g2
174 be,pn %xcc, cheetah_got_tlbentry
175 nop
176 cmp %l0, (15 << 3)
177 blu,pt %xcc, 1b
178 add %l0, (1 << 3), %l0
179
180 /* BUG() if we get here... */
181 ta 0x5
182
183cheetah_got_tlbentry:
184 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g0
185 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
186 membar #Sync
187 and %g1, %g3, %g1
188 set 0x5fff, %l0
189 andn %g1, %l0, %g1
190 or %g5, %g1, %g5
191
192 /* Clear out any KERNBASE area entries. */
193 set 2 << 16, %l0
194 sethi %hi(KERNBASE), %g3
195 sethi %hi(KERNBASE<<1), %g7
196 mov TLB_TAG_ACCESS, %l7
197
198 /* First, check ITLB */
1991: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
200 membar #Sync
201 andn %g1, %l2, %g1
202 cmp %g1, %g3
203 blu,pn %xcc, 2f
204 cmp %g1, %g7
205 bgeu,pn %xcc, 2f
206 nop
207 stxa %g0, [%l7] ASI_IMMU
208 membar #Sync
209 stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
210 membar #Sync
211
2122: and %l0, (127 << 3), %g1
213 cmp %g1, (127 << 3)
214 blu,pt %xcc, 1b
215 add %l0, (1 << 3), %l0
216
217 /* Next, check DTLB */
218 set 2 << 16, %l0
2191: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
220 membar #Sync
221 andn %g1, %l2, %g1
222 cmp %g1, %g3
223 blu,pn %xcc, 2f
224 cmp %g1, %g7
225 bgeu,pn %xcc, 2f
226 nop
227 stxa %g0, [%l7] ASI_DMMU
228 membar #Sync
229 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
230 membar #Sync
231
2322: and %l0, (511 << 3), %g1
233 cmp %g1, (511 << 3)
234 blu,pt %xcc, 1b
235 add %l0, (1 << 3), %l0
236
237 /* On Cheetah+, have to check second DTLB. */
238 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,l0,2f)
239 ba,pt %xcc, 9f
240 nop
241
2422: set 3 << 16, %l0
2431: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
244 membar #Sync
245 andn %g1, %l2, %g1
246 cmp %g1, %g3
247 blu,pn %xcc, 2f
248 cmp %g1, %g7
249 bgeu,pn %xcc, 2f
250 nop
251 stxa %g0, [%l7] ASI_DMMU
252 membar #Sync
253 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
254 membar #Sync
255
2562: and %l0, (511 << 3), %g1
257 cmp %g1, (511 << 3)
258 blu,pt %xcc, 1b
259 add %l0, (1 << 3), %l0
260
2619:
262
263 /* Now lock the TTE we created into ITLB-0 and DTLB-0,
264 * entry 15 (and maybe 14 too).
265 */
266 sethi %hi(KERNBASE), %g3
267 set (0 << 16) | (15 << 3), %g7
268 stxa %g3, [%l7] ASI_DMMU
269 membar #Sync
270 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
271 membar #Sync
272 stxa %g3, [%l7] ASI_IMMU
273 membar #Sync
274 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
275 membar #Sync
276 flush %g3
277 membar #Sync
278 sethi %hi(_end), %g3 /* Check for bigkernel case */
279 or %g3, %lo(_end), %g3
280 srl %g3, 23, %g3 /* Check if _end > 8M */
281 brz,pt %g3, 1f
282 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
283 sethi %hi(0x400000), %g3
284 or %g3, %lo(0x400000), %g3
285 add %g5, %g3, %g5 /* New tte data */
286 andn %g5, (_PAGE_G), %g5
287 sethi %hi(KERNBASE+0x400000), %g3
288 or %g3, %lo(KERNBASE+0x400000), %g3
289 set (0 << 16) | (14 << 3), %g7
290 stxa %g3, [%l7] ASI_DMMU
291 membar #Sync
292 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
293 membar #Sync
294 stxa %g3, [%l7] ASI_IMMU
295 membar #Sync
296 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
297 membar #Sync
298 flush %g3
299 membar #Sync
300 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
301 ba,pt %xcc, 1f
302 nop
303
3041: set sun4u_init, %g2
305 jmpl %g2 + %g0, %g0
306 nop
307
308spitfire_boot:
309 /* Typically PROM has already enabled both MMU's and both on-chip
310 * caches, but we do it here anyway just to be paranoid.
311 */
312 mov (LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1
313 stxa %g1, [%g0] ASI_LSU_CONTROL
314 membar #Sync
315
316 /*
317 * Make sure we are in privileged mode, have address masking,
318 * using the ordinary globals and have enabled floating
319 * point.
320 *
321 * Again, typically PROM has left %pil at 13 or similar, and
322 * (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate.
323 */
324 wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
325 wr %g0, 0, %fprs
326
327spitfire_create_mappings:
328 /* %g5 holds the tlb data */
329 sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
330 sllx %g5, 32, %g5
331 or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
332
333 /* Base of physical memory cannot reliably be assumed to be
334 * at 0x0! Figure out where it happens to be. -DaveM
335 */
336
337 /* Put PADDR tlb data mask into %g3. */
338 sethi %uhi(_PAGE_PADDR_SF), %g3
339 or %g3, %ulo(_PAGE_PADDR_SF), %g3
340 sllx %g3, 32, %g3
341 sethi %hi(_PAGE_PADDR_SF), %g7
342 or %g7, %lo(_PAGE_PADDR_SF), %g7
343 or %g3, %g7, %g3
344
345 /* Walk through entire ITLB, looking for entry which maps
346 * our %pc currently, stick PADDR from there into %g5 tlb data.
347 */
348 clr %l0 /* TLB entry walker. */
349 set 0x1fff, %l2 /* Page mask. */
350 rd %pc, %l3
351 andn %l3, %l2, %g2 /* vaddr comparator */
3521:
353 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
354 ldxa [%l0] ASI_ITLB_TAG_READ, %g1
355 nop
356 nop
357 nop
358 andn %g1, %l2, %g1 /* Get vaddr */
359 cmp %g1, %g2
360 be,a,pn %xcc, spitfire_got_tlbentry
361 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
362 cmp %l0, (63 << 3)
363 blu,pt %xcc, 1b
364 add %l0, (1 << 3), %l0
365
366 /* BUG() if we get here... */
367 ta 0x5
368
369spitfire_got_tlbentry:
370 /* Nops here again, perhaps Cheetah/Blackbird are better behaved... */
371 nop
372 nop
373 nop
374 and %g1, %g3, %g1 /* Mask to just get paddr bits. */
375 set 0x5fff, %l3 /* Mask offset to get phys base. */
376 andn %g1, %l3, %g1
377
378 /* NOTE: We hold on to %g1 paddr base as we need it below to lock
379 * NOTE: the PROM cif code into the TLB.
380 */
381
382 or %g5, %g1, %g5 /* Or it into TAG being built. */
383
384 clr %l0 /* TLB entry walker. */
385 sethi %hi(KERNBASE), %g3 /* 4M lower limit */
386 sethi %hi(KERNBASE<<1), %g7 /* 8M upper limit */
387 mov TLB_TAG_ACCESS, %l7
3881:
389 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
390 ldxa [%l0] ASI_ITLB_TAG_READ, %g1
391 nop
392 nop
393 nop
394 andn %g1, %l2, %g1 /* Get vaddr */
395 cmp %g1, %g3
396 blu,pn %xcc, 2f
397 cmp %g1, %g7
398 bgeu,pn %xcc, 2f
399 nop
400 stxa %g0, [%l7] ASI_IMMU
401 stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
402 membar #Sync
4032:
404 cmp %l0, (63 << 3)
405 blu,pt %xcc, 1b
406 add %l0, (1 << 3), %l0
407
408 nop; nop; nop
409
410 clr %l0 /* TLB entry walker. */
4111:
412 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
413 ldxa [%l0] ASI_DTLB_TAG_READ, %g1
414 nop
415 nop
416 nop
417 andn %g1, %l2, %g1 /* Get vaddr */
418 cmp %g1, %g3
419 blu,pn %xcc, 2f
420 cmp %g1, %g7
421 bgeu,pn %xcc, 2f
422 nop
423 stxa %g0, [%l7] ASI_DMMU
424 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
425 membar #Sync
4262:
427 cmp %l0, (63 << 3)
428 blu,pt %xcc, 1b
429 add %l0, (1 << 3), %l0
430
431 nop; nop; nop
432
433
434 /* PROM never puts any TLB entries into the MMU with the lock bit
435 * set. So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too.
436 */
437
438 sethi %hi(KERNBASE), %g3
439 mov (63 << 3), %g7
440 stxa %g3, [%l7] ASI_DMMU /* KERNBASE into TLB TAG */
441 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */
442 membar #Sync
443 stxa %g3, [%l7] ASI_IMMU /* KERNBASE into TLB TAG */
444 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */
445 membar #Sync
446 flush %g3
447 membar #Sync
448 sethi %hi(_end), %g3 /* Check for bigkernel case */
449 or %g3, %lo(_end), %g3
450 srl %g3, 23, %g3 /* Check if _end > 8M */
451 brz,pt %g3, 2f
452 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
453 sethi %hi(0x400000), %g3
454 or %g3, %lo(0x400000), %g3
455 add %g5, %g3, %g5 /* New tte data */
456 andn %g5, (_PAGE_G), %g5
457 sethi %hi(KERNBASE+0x400000), %g3
458 or %g3, %lo(KERNBASE+0x400000), %g3
459 mov (62 << 3), %g7
460 stxa %g3, [%l7] ASI_DMMU
461 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
462 membar #Sync
463 stxa %g3, [%l7] ASI_IMMU
464 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
465 membar #Sync
466 flush %g3
467 membar #Sync
468 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
4692: ba,pt %xcc, 1f
470 nop
4711:
472 set sun4u_init, %g2
473 jmpl %g2 + %g0, %g0
474 nop
475
476sun4u_init:
477 /* Set ctx 0 */
478 mov PRIMARY_CONTEXT, %g7
479 stxa %g0, [%g7] ASI_DMMU
480 membar #Sync
481
482 mov SECONDARY_CONTEXT, %g7
483 stxa %g0, [%g7] ASI_DMMU
484 membar #Sync
485
486 /* We are now safely (we hope) in Nucleus context (0), rewrite
487 * the KERNBASE TTE's so they no longer have the global bit set.
488 * Don't forget to setup TAG_ACCESS first 8-)
489 */
490 mov TLB_TAG_ACCESS, %g2
491 stxa %g3, [%g2] ASI_IMMU
492 stxa %g3, [%g2] ASI_DMMU
493 membar #Sync
494
495 BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup)
496
497 ba,pt %xcc, spitfire_tlb_fixup
498 nop
499
500cheetah_tlb_fixup:
501 set (0 << 16) | (15 << 3), %g7
502 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g0
503 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
504 andn %g1, (_PAGE_G), %g1
505 stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
506 membar #Sync
507
508 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g0
509 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
510 andn %g1, (_PAGE_G), %g1
511 stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
512 membar #Sync
513
514 /* Kill instruction prefetch queues. */
515 flush %g3
516 membar #Sync
517
518 mov 2, %g2 /* Set TLB type to cheetah+. */
519 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
520
521 mov 1, %g2 /* Set TLB type to cheetah. */
522
5231: sethi %hi(tlb_type), %g1
524 stw %g2, [%g1 + %lo(tlb_type)]
525
526 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
527 ba,pt %xcc, 2f
528 nop
529
5301: /* Patch context register writes to support nucleus page
531 * size correctly.
532 */
533 call cheetah_plus_patch_etrap
534 nop
535 call cheetah_plus_patch_rtrap
536 nop
537 call cheetah_plus_patch_fpdis
538 nop
539 call cheetah_plus_patch_winfixup
540 nop
541
542
5432: /* Patch copy/page operations to cheetah optimized versions. */
544 call cheetah_patch_copyops
545 nop
546 call cheetah_patch_cachetlbops
547 nop
548
549 ba,pt %xcc, tlb_fixup_done
550 nop
551
552spitfire_tlb_fixup:
553 mov (63 << 3), %g7
554 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
555 andn %g1, (_PAGE_G), %g1
556 stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
557 membar #Sync
558
559 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
560 andn %g1, (_PAGE_G), %g1
561 stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
562 membar #Sync
563
564 /* Kill instruction prefetch queues. */
565 flush %g3
566 membar #Sync
567
568 /* Set TLB type to spitfire. */
569 mov 0, %g2
570 sethi %hi(tlb_type), %g1
571 stw %g2, [%g1 + %lo(tlb_type)]
572
573tlb_fixup_done:
574 sethi %hi(init_thread_union), %g6
575 or %g6, %lo(init_thread_union), %g6
576 ldx [%g6 + TI_TASK], %g4
577 mov %sp, %l6
578 mov %o4, %l7
579
580#if 0 /* We don't do it like this anymore, but for historical hack value
581 * I leave this snippet here to show how crazy we can be sometimes. 8-)
582 */
583
584 /* Setup "Linux Current Register", thanks Sun 8-) */
585 wr %g0, 0x1, %pcr
586
587 /* Blackbird errata workaround. See commentary in
588 * smp.c:smp_percpu_timer_interrupt() for more
589 * information.
590 */
591 ba,pt %xcc, 99f
592 nop
593 .align 64
59499: wr %g6, %g0, %pic
595 rd %pic, %g0
596#endif
597
598 wr %g0, ASI_P, %asi
599 mov 1, %g1
600 sllx %g1, THREAD_SHIFT, %g1
601 sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
602 add %g6, %g1, %sp
603 mov 0, %fp
604
605 /* Set per-cpu pointer initially to zero, this makes
606 * the boot-cpu use the in-kernel-image per-cpu areas
607 * before setup_per_cpu_area() is invoked.
608 */
609 clr %g5
610
611 wrpr %g0, 0, %wstate
612 wrpr %g0, 0x0, %tl
613
614 /* Clear the bss */
615 sethi %hi(__bss_start), %o0
616 or %o0, %lo(__bss_start), %o0
617 sethi %hi(_end), %o1
618 or %o1, %lo(_end), %o1
619 call __bzero
620 sub %o1, %o0, %o1
621
622 mov %l6, %o1 ! OpenPROM stack
623 call prom_init
624 mov %l7, %o0 ! OpenPROM cif handler
625
626 /* Off we go.... */
627 call start_kernel
628 nop
629 /* Not reached... */
630
631/* IMPORTANT NOTE: Whenever making changes here, check
632 * trampoline.S as well. -jj */
633 .globl setup_tba
634setup_tba: /* i0 = is_starfire */
635 save %sp, -160, %sp
636
637 rdpr %tba, %g7
638 sethi %hi(prom_tba), %o1
639 or %o1, %lo(prom_tba), %o1
640 stx %g7, [%o1]
641
642 /* Setup "Linux" globals 8-) */
643 rdpr %pstate, %o1
644 mov %g6, %o2
645 wrpr %o1, (PSTATE_AG|PSTATE_IE), %pstate
646 sethi %hi(sparc64_ttable_tl0), %g1
647 wrpr %g1, %tba
648 mov %o2, %g6
649
650 /* Set up MMU globals */
651 wrpr %o1, (PSTATE_MG|PSTATE_IE), %pstate
652
653 /* Set fixed globals used by dTLB miss handler. */
654#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
655#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
656
657 mov TSB_REG, %g1
658 stxa %g0, [%g1] ASI_DMMU
659 membar #Sync
660 stxa %g0, [%g1] ASI_IMMU
661 membar #Sync
662 mov TLB_SFSR, %g1
663 sethi %uhi(KERN_HIGHBITS), %g2
664 or %g2, %ulo(KERN_HIGHBITS), %g2
665 sllx %g2, 32, %g2
666 or %g2, KERN_LOWBITS, %g2
667
668 BRANCH_IF_ANY_CHEETAH(g3,g7,cheetah_vpte_base)
669 ba,pt %xcc, spitfire_vpte_base
670 nop
671
672cheetah_vpte_base:
673 sethi %uhi(VPTE_BASE_CHEETAH), %g3
674 or %g3, %ulo(VPTE_BASE_CHEETAH), %g3
675 ba,pt %xcc, 2f
676 sllx %g3, 32, %g3
677
678spitfire_vpte_base:
679 sethi %uhi(VPTE_BASE_SPITFIRE), %g3
680 or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3
681 sllx %g3, 32, %g3
682
6832:
684 clr %g7
685#undef KERN_HIGHBITS
686#undef KERN_LOWBITS
687
688 /* Kill PROM timer */
689 sethi %hi(0x80000000), %o2
690 sllx %o2, 32, %o2
691 wr %o2, 0, %tick_cmpr
692
693 BRANCH_IF_ANY_CHEETAH(o2,o3,1f)
694
695 ba,pt %xcc, 2f
696 nop
697
698 /* Disable STICK_INT interrupts. */
6991:
700 sethi %hi(0x80000000), %o2
701 sllx %o2, 32, %o2
702 wr %o2, %asr25
703
704 /* Ok, we're done setting up all the state our trap mechanims needs,
705 * now get back into normal globals and let the PROM know what is up.
706 */
7072:
708 wrpr %g0, %g0, %wstate
709 wrpr %o1, PSTATE_IE, %pstate
710
711 call init_irqwork_curcpu
712 nop
713
714 call prom_set_trap_table
715 sethi %hi(sparc64_ttable_tl0), %o0
716
717 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
718 ba,pt %xcc, 2f
719 nop
720
7211: /* Start using proper page size encodings in ctx register. */
722 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
723 mov PRIMARY_CONTEXT, %g1
724 sllx %g3, 32, %g3
725 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
726 or %g3, %g2, %g3
727 stxa %g3, [%g1] ASI_DMMU
728 membar #Sync
729
7302:
731 rdpr %pstate, %o1
732 or %o1, PSTATE_IE, %o1
733 wrpr %o1, 0, %pstate
734
735 ret
736 restore
737
738/*
739 * The following skips make sure the trap table in ttable.S is aligned
740 * on a 32K boundary as required by the v9 specs for TBA register.
741 */
742sparc64_boot_end:
743 .skip 0x2000 + _start - sparc64_boot_end
744bootup_user_stack_end:
745 .skip 0x2000
746
747#ifdef CONFIG_SBUS
748/* This is just a hack to fool make depend config.h discovering
749 strategy: As the .S files below need config.h, but
750 make depend does not find it for them, we include config.h
751 in head.S */
752#endif
753
754! 0x0000000000408000
755
756#include "ttable.S"
757#include "systbls.S"
758
759 .align 1024
760 .globl swapper_pg_dir
761swapper_pg_dir:
762 .word 0
763
764#include "etrap.S"
765#include "rtrap.S"
766#include "winfixup.S"
767#include "entry.S"
768
769 /* This is just anal retentiveness on my part... */
770 .align 16384
771
772 .data
773 .align 8
774 .globl prom_tba, tlb_type
775prom_tba: .xword 0
776tlb_type: .word 0 /* Must NOT end up in BSS */
777 .section ".fixup",#alloc,#execinstr
778 .globl __ret_efault
779__ret_efault:
780 ret
781 restore %g0, -EFAULT, %o0
782