aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-10-04 10:16:31 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-10-04 10:16:31 -0400
commit2ee73cc2d507df7b28050fba5d08bd33dd34848c (patch)
treeaf5f33b265318e0f4b61f788691fe4f780ec402c /arch/sparc64/mm
parentc1d9728ecc5b560465df3c0c0d3b3825c2710b40 (diff)
parented39f731ab2e77e58122232f6e27333331d7793d (diff)
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r--arch/sparc64/mm/Makefile2
-rw-r--r--arch/sparc64/mm/extable.c80
-rw-r--r--arch/sparc64/mm/fault.c69
-rw-r--r--arch/sparc64/mm/init.c905
-rw-r--r--arch/sparc64/mm/ultra.S96
5 files changed, 457 insertions, 695 deletions
diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile
index cda87333a77b..9d0960e69f48 100644
--- a/arch/sparc64/mm/Makefile
+++ b/arch/sparc64/mm/Makefile
@@ -5,6 +5,6 @@
5EXTRA_AFLAGS := -ansi 5EXTRA_AFLAGS := -ansi
6EXTRA_CFLAGS := -Werror 6EXTRA_CFLAGS := -Werror
7 7
8obj-y := ultra.o tlb.o fault.o init.o generic.o extable.o 8obj-y := ultra.o tlb.o fault.o init.o generic.o
9 9
10obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 10obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc64/mm/extable.c b/arch/sparc64/mm/extable.c
deleted file mode 100644
index ec334297ff4f..000000000000
--- a/arch/sparc64/mm/extable.c
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * linux/arch/sparc64/mm/extable.c
3 */
4
5#include <linux/config.h>
6#include <linux/module.h>
7#include <asm/uaccess.h>
8
9extern const struct exception_table_entry __start___ex_table[];
10extern const struct exception_table_entry __stop___ex_table[];
11
12void sort_extable(struct exception_table_entry *start,
13 struct exception_table_entry *finish)
14{
15}
16
17/* Caller knows they are in a range if ret->fixup == 0 */
18const struct exception_table_entry *
19search_extable(const struct exception_table_entry *start,
20 const struct exception_table_entry *last,
21 unsigned long value)
22{
23 const struct exception_table_entry *walk;
24
25 /* Single insn entries are encoded as:
26 * word 1: insn address
27 * word 2: fixup code address
28 *
29 * Range entries are encoded as:
30 * word 1: first insn address
31 * word 2: 0
32 * word 3: last insn address + 4 bytes
33 * word 4: fixup code address
34 *
35 * See asm/uaccess.h for more details.
36 */
37
38 /* 1. Try to find an exact match. */
39 for (walk = start; walk <= last; walk++) {
40 if (walk->fixup == 0) {
41 /* A range entry, skip both parts. */
42 walk++;
43 continue;
44 }
45
46 if (walk->insn == value)
47 return walk;
48 }
49
50 /* 2. Try to find a range match. */
51 for (walk = start; walk <= (last - 1); walk++) {
52 if (walk->fixup)
53 continue;
54
55 if (walk[0].insn <= value && walk[1].insn > value)
56 return walk;
57
58 walk++;
59 }
60
61 return NULL;
62}
63
64/* Special extable search, which handles ranges. Returns fixup */
65unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
66{
67 const struct exception_table_entry *entry;
68
69 entry = search_exception_tables(addr);
70 if (!entry)
71 return 0;
72
73 /* Inside range? Fix g2 and return correct fixup */
74 if (!entry->fixup) {
75 *g2 = (addr - entry->insn) / 4;
76 return (entry + 1)->fixup;
77 }
78
79 return entry->fixup;
80}
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index db1e3310e907..31fbc67719a1 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -32,8 +32,6 @@
32 32
33#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0])) 33#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
34 34
35extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
36
37/* 35/*
38 * To debug kernel to catch accesses to certain virtual/physical addresses. 36 * To debug kernel to catch accesses to certain virtual/physical addresses.
39 * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints. 37 * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints.
@@ -71,53 +69,6 @@ void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode)
71 : "memory"); 69 : "memory");
72} 70}
73 71
74/* Nice, simple, prom library does all the sweating for us. ;) */
75unsigned long __init prom_probe_memory (void)
76{
77 register struct linux_mlist_p1275 *mlist;
78 register unsigned long bytes, base_paddr, tally;
79 register int i;
80
81 i = 0;
82 mlist = *prom_meminfo()->p1275_available;
83 bytes = tally = mlist->num_bytes;
84 base_paddr = mlist->start_adr;
85
86 sp_banks[0].base_addr = base_paddr;
87 sp_banks[0].num_bytes = bytes;
88
89 while (mlist->theres_more != (void *) 0) {
90 i++;
91 mlist = mlist->theres_more;
92 bytes = mlist->num_bytes;
93 tally += bytes;
94 if (i >= SPARC_PHYS_BANKS-1) {
95 printk ("The machine has more banks than "
96 "this kernel can support\n"
97 "Increase the SPARC_PHYS_BANKS "
98 "setting (currently %d)\n",
99 SPARC_PHYS_BANKS);
100 i = SPARC_PHYS_BANKS-1;
101 break;
102 }
103
104 sp_banks[i].base_addr = mlist->start_adr;
105 sp_banks[i].num_bytes = mlist->num_bytes;
106 }
107
108 i++;
109 sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
110 sp_banks[i].num_bytes = 0;
111
112 /* Now mask all bank sizes on a page boundary, it is all we can
113 * use anyways.
114 */
115 for (i = 0; sp_banks[i].num_bytes != 0; i++)
116 sp_banks[i].num_bytes &= PAGE_MASK;
117
118 return tally;
119}
120
121static void __kprobes unhandled_fault(unsigned long address, 72static void __kprobes unhandled_fault(unsigned long address,
122 struct task_struct *tsk, 73 struct task_struct *tsk,
123 struct pt_regs *regs) 74 struct pt_regs *regs)
@@ -242,7 +193,6 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
242static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, 193static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
243 unsigned int insn, unsigned long address) 194 unsigned int insn, unsigned long address)
244{ 195{
245 unsigned long g2;
246 unsigned char asi = ASI_P; 196 unsigned char asi = ASI_P;
247 197
248 if ((!insn) && (regs->tstate & TSTATE_PRIV)) 198 if ((!insn) && (regs->tstate & TSTATE_PRIV))
@@ -273,11 +223,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
273 } 223 }
274 } 224 }
275 225
276 g2 = regs->u_regs[UREG_G2];
277
278 /* Is this in ex_table? */ 226 /* Is this in ex_table? */
279 if (regs->tstate & TSTATE_PRIV) { 227 if (regs->tstate & TSTATE_PRIV) {
280 unsigned long fixup; 228 const struct exception_table_entry *entry;
281 229
282 if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) { 230 if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) {
283 if (insn & 0x2000) 231 if (insn & 0x2000)
@@ -288,10 +236,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
288 236
289 /* Look in asi.h: All _S asis have LS bit set */ 237 /* Look in asi.h: All _S asis have LS bit set */
290 if ((asi & 0x1) && 238 if ((asi & 0x1) &&
291 (fixup = search_extables_range(regs->tpc, &g2))) { 239 (entry = search_exception_tables(regs->tpc))) {
292 regs->tpc = fixup; 240 regs->tpc = entry->fixup;
293 regs->tnpc = regs->tpc + 4; 241 regs->tnpc = regs->tpc + 4;
294 regs->u_regs[UREG_G2] = g2;
295 return; 242 return;
296 } 243 }
297 } else { 244 } else {
@@ -461,7 +408,7 @@ good_area:
461 } 408 }
462 409
463 up_read(&mm->mmap_sem); 410 up_read(&mm->mmap_sem);
464 goto fault_done; 411 return;
465 412
466 /* 413 /*
467 * Something tried to access memory that isn't in our memory map.. 414 * Something tried to access memory that isn't in our memory map..
@@ -473,8 +420,7 @@ bad_area:
473 420
474handle_kernel_fault: 421handle_kernel_fault:
475 do_kernel_fault(regs, si_code, fault_code, insn, address); 422 do_kernel_fault(regs, si_code, fault_code, insn, address);
476 423 return;
477 goto fault_done;
478 424
479/* 425/*
480 * We ran out of memory, or some other thing happened to us that made 426 * We ran out of memory, or some other thing happened to us that made
@@ -505,9 +451,4 @@ do_sigbus:
505 /* Kernel mode? Handle exceptions or die */ 451 /* Kernel mode? Handle exceptions or die */
506 if (regs->tstate & TSTATE_PRIV) 452 if (regs->tstate & TSTATE_PRIV)
507 goto handle_kernel_fault; 453 goto handle_kernel_fault;
508
509fault_done:
510 /* These values are no longer needed, clear them. */
511 set_thread_fault_code(0);
512 current_thread_info()->fault_address = 0;
513} 454}
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index fdb1ebb308c9..5db50524f20d 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -20,6 +20,8 @@
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/kprobes.h> 22#include <linux/kprobes.h>
23#include <linux/cache.h>
24#include <linux/sort.h>
23 25
24#include <asm/head.h> 26#include <asm/head.h>
25#include <asm/system.h> 27#include <asm/system.h>
@@ -40,24 +42,80 @@
40 42
41extern void device_scan(void); 43extern void device_scan(void);
42 44
43struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; 45#define MAX_BANKS 32
44 46
45unsigned long *sparc64_valid_addr_bitmap; 47static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
48static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
49static int pavail_ents __initdata;
50static int pavail_rescan_ents __initdata;
51
52static int cmp_p64(const void *a, const void *b)
53{
54 const struct linux_prom64_registers *x = a, *y = b;
55
56 if (x->phys_addr > y->phys_addr)
57 return 1;
58 if (x->phys_addr < y->phys_addr)
59 return -1;
60 return 0;
61}
62
63static void __init read_obp_memory(const char *property,
64 struct linux_prom64_registers *regs,
65 int *num_ents)
66{
67 int node = prom_finddevice("/memory");
68 int prop_size = prom_getproplen(node, property);
69 int ents, ret, i;
70
71 ents = prop_size / sizeof(struct linux_prom64_registers);
72 if (ents > MAX_BANKS) {
73 prom_printf("The machine has more %s property entries than "
74 "this kernel can support (%d).\n",
75 property, MAX_BANKS);
76 prom_halt();
77 }
78
79 ret = prom_getproperty(node, property, (char *) regs, prop_size);
80 if (ret == -1) {
81 prom_printf("Couldn't get %s property from /memory.\n");
82 prom_halt();
83 }
84
85 *num_ents = ents;
86
87 /* Sanitize what we got from the firmware, by page aligning
88 * everything.
89 */
90 for (i = 0; i < ents; i++) {
91 unsigned long base, size;
92
93 base = regs[i].phys_addr;
94 size = regs[i].reg_size;
95
96 size &= PAGE_MASK;
97 if (base & ~PAGE_MASK) {
98 unsigned long new_base = PAGE_ALIGN(base);
99
100 size -= new_base - base;
101 if ((long) size < 0L)
102 size = 0UL;
103 base = new_base;
104 }
105 regs[i].phys_addr = base;
106 regs[i].reg_size = size;
107 }
108 sort(regs, ents, sizeof(struct linux_prom64_registers),
109 cmp_p64, NULL);
110}
111
112unsigned long *sparc64_valid_addr_bitmap __read_mostly;
46 113
47/* Ugly, but necessary... -DaveM */ 114/* Ugly, but necessary... -DaveM */
48unsigned long phys_base; 115unsigned long phys_base __read_mostly;
49unsigned long kern_base; 116unsigned long kern_base __read_mostly;
50unsigned long kern_size; 117unsigned long kern_size __read_mostly;
51unsigned long pfn_base; 118unsigned long pfn_base __read_mostly;
52
53/* This is even uglier. We have a problem where the kernel may not be
54 * located at phys_base. However, initial __alloc_bootmem() calls need to
55 * be adjusted to be within the 4-8Megs that the kernel is mapped to, else
56 * those page mappings wont work. Things are ok after inherit_prom_mappings
57 * is called though. Dave says he'll clean this up some other time.
58 * -- BenC
59 */
60static unsigned long bootmap_base;
61 119
62/* get_new_mmu_context() uses "cache + 1". */ 120/* get_new_mmu_context() uses "cache + 1". */
63DEFINE_SPINLOCK(ctx_alloc_lock); 121DEFINE_SPINLOCK(ctx_alloc_lock);
@@ -73,7 +131,7 @@ extern unsigned long sparc_ramdisk_image64;
73extern unsigned int sparc_ramdisk_image; 131extern unsigned int sparc_ramdisk_image;
74extern unsigned int sparc_ramdisk_size; 132extern unsigned int sparc_ramdisk_size;
75 133
76struct page *mem_map_zero; 134struct page *mem_map_zero __read_mostly;
77 135
78int bigkernel = 0; 136int bigkernel = 0;
79 137
@@ -179,8 +237,6 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
179 : "g1", "g7"); 237 : "g1", "g7");
180} 238}
181 239
182extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);
183
184void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 240void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
185{ 241{
186 struct page *page; 242 struct page *page;
@@ -207,10 +263,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
207 263
208 put_cpu(); 264 put_cpu();
209 } 265 }
210
211 if (get_thread_fault_code())
212 __update_mmu_cache(CTX_NRBITS(vma->vm_mm->context),
213 address, pte, get_thread_fault_code());
214} 266}
215 267
216void flush_dcache_page(struct page *page) 268void flush_dcache_page(struct page *page)
@@ -309,6 +361,7 @@ struct linux_prom_translation {
309 unsigned long size; 361 unsigned long size;
310 unsigned long data; 362 unsigned long data;
311}; 363};
364static struct linux_prom_translation prom_trans[512] __initdata;
312 365
313extern unsigned long prom_boot_page; 366extern unsigned long prom_boot_page;
314extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); 367extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
@@ -318,14 +371,63 @@ extern void register_prom_callbacks(void);
318/* Exported for SMP bootup purposes. */ 371/* Exported for SMP bootup purposes. */
319unsigned long kern_locked_tte_data; 372unsigned long kern_locked_tte_data;
320 373
321void __init early_pgtable_allocfail(char *type) 374/* Exported for kernel TLB miss handling in ktlb.S */
375unsigned long prom_pmd_phys __read_mostly;
376unsigned int swapper_pgd_zero __read_mostly;
377
378/* Allocate power-of-2 aligned chunks from the end of the
379 * kernel image. Return physical address.
380 */
381static inline unsigned long early_alloc_phys(unsigned long size)
322{ 382{
323 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); 383 unsigned long base;
324 prom_halt(); 384
385 BUILD_BUG_ON(size & (size - 1));
386
387 kern_size = (kern_size + (size - 1)) & ~(size - 1);
388 base = kern_base + kern_size;
389 kern_size += size;
390
391 return base;
392}
393
394static inline unsigned long load_phys32(unsigned long pa)
395{
396 unsigned long val;
397
398 __asm__ __volatile__("lduwa [%1] %2, %0"
399 : "=&r" (val)
400 : "r" (pa), "i" (ASI_PHYS_USE_EC));
401
402 return val;
403}
404
405static inline unsigned long load_phys64(unsigned long pa)
406{
407 unsigned long val;
408
409 __asm__ __volatile__("ldxa [%1] %2, %0"
410 : "=&r" (val)
411 : "r" (pa), "i" (ASI_PHYS_USE_EC));
412
413 return val;
414}
415
416static inline void store_phys32(unsigned long pa, unsigned long val)
417{
418 __asm__ __volatile__("stwa %0, [%1] %2"
419 : /* no outputs */
420 : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
421}
422
423static inline void store_phys64(unsigned long pa, unsigned long val)
424{
425 __asm__ __volatile__("stxa %0, [%1] %2"
426 : /* no outputs */
427 : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
325} 428}
326 429
327#define BASE_PAGE_SIZE 8192 430#define BASE_PAGE_SIZE 8192
328static pmd_t *prompmd;
329 431
330/* 432/*
331 * Translate PROM's mapping we capture at boot time into physical address. 433 * Translate PROM's mapping we capture at boot time into physical address.
@@ -333,278 +435,172 @@ static pmd_t *prompmd;
333 */ 435 */
334unsigned long prom_virt_to_phys(unsigned long promva, int *error) 436unsigned long prom_virt_to_phys(unsigned long promva, int *error)
335{ 437{
336 pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff); 438 unsigned long pmd_phys = (prom_pmd_phys +
337 pte_t *ptep; 439 ((promva >> 23) & 0x7ff) * sizeof(pmd_t));
440 unsigned long pte_phys;
441 pmd_t pmd_ent;
442 pte_t pte_ent;
338 unsigned long base; 443 unsigned long base;
339 444
340 if (pmd_none(*pmdp)) { 445 pmd_val(pmd_ent) = load_phys32(pmd_phys);
446 if (pmd_none(pmd_ent)) {
341 if (error) 447 if (error)
342 *error = 1; 448 *error = 1;
343 return(0); 449 return 0;
344 } 450 }
345 ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff); 451
346 if (!pte_present(*ptep)) { 452 pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
453 pte_phys += ((promva >> 13) & 0x3ff) * sizeof(pte_t);
454 pte_val(pte_ent) = load_phys64(pte_phys);
455 if (!pte_present(pte_ent)) {
347 if (error) 456 if (error)
348 *error = 1; 457 *error = 1;
349 return(0); 458 return 0;
350 } 459 }
351 if (error) { 460 if (error) {
352 *error = 0; 461 *error = 0;
353 return(pte_val(*ptep)); 462 return pte_val(pte_ent);
354 } 463 }
355 base = pte_val(*ptep) & _PAGE_PADDR; 464 base = pte_val(pte_ent) & _PAGE_PADDR;
356 return(base + (promva & (BASE_PAGE_SIZE - 1))); 465 return (base + (promva & (BASE_PAGE_SIZE - 1)));
357} 466}
358 467
359static void inherit_prom_mappings(void) 468/* The obp translations are saved based on 8k pagesize, since obp can
469 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
470 * HI_OBP_ADDRESS range are handled in entry.S and do not use the vpte
471 * scheme (also, see rant in inherit_locked_prom_mappings()).
472 */
473static void __init build_obp_range(unsigned long start, unsigned long end, unsigned long data)
360{ 474{
361 struct linux_prom_translation *trans; 475 unsigned long vaddr;
362 unsigned long phys_page, tte_vaddr, tte_data;
363 void (*remap_func)(unsigned long, unsigned long, int);
364 pmd_t *pmdp;
365 pte_t *ptep;
366 int node, n, i, tsz;
367 extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2];
368 476
369 node = prom_finddevice("/virtual-memory"); 477 for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) {
370 n = prom_getproplen(node, "translations"); 478 unsigned long val, pte_phys, pmd_phys;
371 if (n == 0 || n == -1) { 479 pmd_t pmd_ent;
372 prom_printf("Couldn't get translation property\n"); 480 int i;
373 prom_halt();
374 }
375 n += 5 * sizeof(struct linux_prom_translation);
376 for (tsz = 1; tsz < n; tsz <<= 1)
377 /* empty */;
378 trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base);
379 if (trans == NULL) {
380 prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");
381 prom_halt();
382 }
383 memset(trans, 0, tsz);
384 481
385 if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) { 482 pmd_phys = (prom_pmd_phys +
386 prom_printf("Couldn't get translation property\n"); 483 (((vaddr >> 23) & 0x7ff) * sizeof(pmd_t)));
387 prom_halt(); 484 pmd_val(pmd_ent) = load_phys32(pmd_phys);
388 } 485 if (pmd_none(pmd_ent)) {
389 n = n / sizeof(*trans); 486 pte_phys = early_alloc_phys(BASE_PAGE_SIZE);
390 487
391 /* 488 for (i = 0; i < BASE_PAGE_SIZE / sizeof(pte_t); i++)
392 * The obp translations are saved based on 8k pagesize, since obp can 489 store_phys64(pte_phys+i*sizeof(pte_t),0);
393 * use a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000,
394 * ie obp range, are handled in entry.S and do not use the vpte scheme
395 * (see rant in inherit_locked_prom_mappings()).
396 */
397#define OBP_PMD_SIZE 2048
398 prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base);
399 if (prompmd == NULL)
400 early_pgtable_allocfail("pmd");
401 memset(prompmd, 0, OBP_PMD_SIZE);
402 for (i = 0; i < n; i++) {
403 unsigned long vaddr;
404
405 if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) {
406 for (vaddr = trans[i].virt;
407 ((vaddr < trans[i].virt + trans[i].size) &&
408 (vaddr < HI_OBP_ADDRESS));
409 vaddr += BASE_PAGE_SIZE) {
410 unsigned long val;
411
412 pmdp = prompmd + ((vaddr >> 23) & 0x7ff);
413 if (pmd_none(*pmdp)) {
414 ptep = __alloc_bootmem(BASE_PAGE_SIZE,
415 BASE_PAGE_SIZE,
416 bootmap_base);
417 if (ptep == NULL)
418 early_pgtable_allocfail("pte");
419 memset(ptep, 0, BASE_PAGE_SIZE);
420 pmd_set(pmdp, ptep);
421 }
422 ptep = (pte_t *)__pmd_page(*pmdp) +
423 ((vaddr >> 13) & 0x3ff);
424 490
425 val = trans[i].data; 491 pmd_val(pmd_ent) = pte_phys >> 11UL;
492 store_phys32(pmd_phys, pmd_val(pmd_ent));
493 }
426 494
427 /* Clear diag TTE bits. */ 495 pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
428 if (tlb_type == spitfire) 496 pte_phys += (((vaddr >> 13) & 0x3ff) * sizeof(pte_t));
429 val &= ~0x0003fe0000000000UL;
430 497
431 set_pte_at(&init_mm, vaddr, 498 val = data;
432 ptep, __pte(val | _PAGE_MODIFIED));
433 trans[i].data += BASE_PAGE_SIZE;
434 }
435 }
436 }
437 phys_page = __pa(prompmd);
438 obp_iaddr_patch[0] |= (phys_page >> 10);
439 obp_iaddr_patch[1] |= (phys_page & 0x3ff);
440 flushi((long)&obp_iaddr_patch[0]);
441 obp_daddr_patch[0] |= (phys_page >> 10);
442 obp_daddr_patch[1] |= (phys_page & 0x3ff);
443 flushi((long)&obp_daddr_patch[0]);
444 499
445 /* Now fixup OBP's idea about where we really are mapped. */ 500 /* Clear diag TTE bits. */
446 prom_printf("Remapping the kernel... "); 501 if (tlb_type == spitfire)
502 val &= ~0x0003fe0000000000UL;
447 503
448 /* Spitfire Errata #32 workaround */ 504 store_phys64(pte_phys, val | _PAGE_MODIFIED);
449 /* NOTE: Using plain zero for the context value is
450 * correct here, we are not using the Linux trap
451 * tables yet so we should not use the special
452 * UltraSPARC-III+ page size encodings yet.
453 */
454 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
455 "flush %%g6"
456 : /* No outputs */
457 : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
458
459 switch (tlb_type) {
460 default:
461 case spitfire:
462 phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
463 break;
464
465 case cheetah:
466 case cheetah_plus:
467 phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
468 break;
469 };
470
471 phys_page &= _PAGE_PADDR;
472 phys_page += ((unsigned long)&prom_boot_page -
473 (unsigned long)KERNBASE);
474 505
475 if (tlb_type == spitfire) { 506 data += BASE_PAGE_SIZE;
476 /* Lock this into i/d tlb entry 59 */
477 __asm__ __volatile__(
478 "stxa %%g0, [%2] %3\n\t"
479 "stxa %0, [%1] %4\n\t"
480 "membar #Sync\n\t"
481 "flush %%g6\n\t"
482 "stxa %%g0, [%2] %5\n\t"
483 "stxa %0, [%1] %6\n\t"
484 "membar #Sync\n\t"
485 "flush %%g6"
486 : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
487 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
488 "r" (59 << 3), "r" (TLB_TAG_ACCESS),
489 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
490 "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
491 : "memory");
492 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
493 /* Lock this into i/d tlb-0 entry 11 */
494 __asm__ __volatile__(
495 "stxa %%g0, [%2] %3\n\t"
496 "stxa %0, [%1] %4\n\t"
497 "membar #Sync\n\t"
498 "flush %%g6\n\t"
499 "stxa %%g0, [%2] %5\n\t"
500 "stxa %0, [%1] %6\n\t"
501 "membar #Sync\n\t"
502 "flush %%g6"
503 : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
504 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
505 "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS),
506 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
507 "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
508 : "memory");
509 } else {
510 /* Implement me :-) */
511 BUG();
512 } 507 }
508}
513 509
514 tte_vaddr = (unsigned long) KERNBASE; 510static inline int in_obp_range(unsigned long vaddr)
511{
512 return (vaddr >= LOW_OBP_ADDRESS &&
513 vaddr < HI_OBP_ADDRESS);
514}
515 515
516 /* Spitfire Errata #32 workaround */ 516#define OBP_PMD_SIZE 2048
517 /* NOTE: Using plain zero for the context value is 517static void __init build_obp_pgtable(int prom_trans_ents)
518 * correct here, we are not using the Linux trap 518{
519 * tables yet so we should not use the special 519 unsigned long i;
520 * UltraSPARC-III+ page size encodings yet.
521 */
522 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
523 "flush %%g6"
524 : /* No outputs */
525 : "r" (0),
526 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
527
528 if (tlb_type == spitfire)
529 tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
530 else
531 tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent());
532 520
533 kern_locked_tte_data = tte_data; 521 prom_pmd_phys = early_alloc_phys(OBP_PMD_SIZE);
522 for (i = 0; i < OBP_PMD_SIZE; i += 4)
523 store_phys32(prom_pmd_phys + i, 0);
534 524
535 remap_func = (void *) ((unsigned long) &prom_remap - 525 for (i = 0; i < prom_trans_ents; i++) {
536 (unsigned long) &prom_boot_page); 526 unsigned long start, end;
537 527
528 if (!in_obp_range(prom_trans[i].virt))
529 continue;
538 530
539 /* Spitfire Errata #32 workaround */ 531 start = prom_trans[i].virt;
540 /* NOTE: Using plain zero for the context value is 532 end = start + prom_trans[i].size;
541 * correct here, we are not using the Linux trap 533 if (end > HI_OBP_ADDRESS)
542 * tables yet so we should not use the special 534 end = HI_OBP_ADDRESS;
543 * UltraSPARC-III+ page size encodings yet. 535
544 */ 536 build_obp_range(start, end, prom_trans[i].data);
545 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
546 "flush %%g6"
547 : /* No outputs */
548 : "r" (0),
549 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
550
551 remap_func((tlb_type == spitfire ?
552 (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) :
553 (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)),
554 (unsigned long) KERNBASE,
555 prom_get_mmu_ihandle());
556
557 if (bigkernel)
558 remap_func(((tte_data + 0x400000) & _PAGE_PADDR),
559 (unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle());
560
561 /* Flush out that temporary mapping. */
562 spitfire_flush_dtlb_nucleus_page(0x0);
563 spitfire_flush_itlb_nucleus_page(0x0);
564
565 /* Now lock us back into the TLBs via OBP. */
566 prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
567 prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
568 if (bigkernel) {
569 prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
570 tte_vaddr + 0x400000);
571 prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
572 tte_vaddr + 0x400000);
573 } 537 }
538}
539
540/* Read OBP translations property into 'prom_trans[]'.
541 * Return the number of entries.
542 */
543static int __init read_obp_translations(void)
544{
545 int n, node;
574 546
575 /* Re-read translations property. */ 547 node = prom_finddevice("/virtual-memory");
576 if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) { 548 n = prom_getproplen(node, "translations");
577 prom_printf("Couldn't get translation property\n"); 549 if (unlikely(n == 0 || n == -1)) {
550 prom_printf("prom_mappings: Couldn't get size.\n");
551 prom_halt();
552 }
553 if (unlikely(n > sizeof(prom_trans))) {
554 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
578 prom_halt(); 555 prom_halt();
579 } 556 }
580 n = n / sizeof(*trans);
581 557
582 for (i = 0; i < n; i++) { 558 if ((n = prom_getproperty(node, "translations",
583 unsigned long vaddr = trans[i].virt; 559 (char *)&prom_trans[0],
584 unsigned long size = trans[i].size; 560 sizeof(prom_trans))) == -1) {
561 prom_printf("prom_mappings: Couldn't get property.\n");
562 prom_halt();
563 }
564 n = n / sizeof(struct linux_prom_translation);
565 return n;
566}
585 567
586 if (vaddr < 0xf0000000UL) { 568static void __init remap_kernel(void)
587 unsigned long avoid_start = (unsigned long) KERNBASE; 569{
588 unsigned long avoid_end = avoid_start + (4 * 1024 * 1024); 570 unsigned long phys_page, tte_vaddr, tte_data;
571 int tlb_ent = sparc64_highest_locked_tlbent();
589 572
590 if (bigkernel) 573 tte_vaddr = (unsigned long) KERNBASE;
591 avoid_end += (4 * 1024 * 1024); 574 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
592 if (vaddr < avoid_start) { 575 tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB |
593 unsigned long top = vaddr + size; 576 _PAGE_CP | _PAGE_CV | _PAGE_P |
577 _PAGE_L | _PAGE_W));
594 578
595 if (top > avoid_start) 579 kern_locked_tte_data = tte_data;
596 top = avoid_start;
597 prom_unmap(top - vaddr, vaddr);
598 }
599 if ((vaddr + size) > avoid_end) {
600 unsigned long bottom = vaddr;
601 580
602 if (bottom < avoid_end) 581 /* Now lock us into the TLBs via OBP. */
603 bottom = avoid_end; 582 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
604 prom_unmap((vaddr + size) - bottom, bottom); 583 prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
605 } 584 if (bigkernel) {
606 } 585 prom_dtlb_load(tlb_ent - 1,
586 tte_data + 0x400000,
587 tte_vaddr + 0x400000);
588 prom_itlb_load(tlb_ent - 1,
589 tte_data + 0x400000,
590 tte_vaddr + 0x400000);
607 } 591 }
592}
593
594static void __init inherit_prom_mappings(void)
595{
596 int n;
597
598 n = read_obp_translations();
599 build_obp_pgtable(n);
600
601 /* Now fixup OBP's idea about where we really are mapped. */
602 prom_printf("Remapping the kernel... ");
603 remap_kernel();
608 604
609 prom_printf("done.\n"); 605 prom_printf("done.\n");
610 606
@@ -1276,14 +1272,14 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1276 int i; 1272 int i;
1277 1273
1278#ifdef CONFIG_DEBUG_BOOTMEM 1274#ifdef CONFIG_DEBUG_BOOTMEM
1279 prom_printf("bootmem_init: Scan sp_banks, "); 1275 prom_printf("bootmem_init: Scan pavail, ");
1280#endif 1276#endif
1281 1277
1282 bytes_avail = 0UL; 1278 bytes_avail = 0UL;
1283 for (i = 0; sp_banks[i].num_bytes != 0; i++) { 1279 for (i = 0; i < pavail_ents; i++) {
1284 end_of_phys_memory = sp_banks[i].base_addr + 1280 end_of_phys_memory = pavail[i].phys_addr +
1285 sp_banks[i].num_bytes; 1281 pavail[i].reg_size;
1286 bytes_avail += sp_banks[i].num_bytes; 1282 bytes_avail += pavail[i].reg_size;
1287 if (cmdline_memory_size) { 1283 if (cmdline_memory_size) {
1288 if (bytes_avail > cmdline_memory_size) { 1284 if (bytes_avail > cmdline_memory_size) {
1289 unsigned long slack = bytes_avail - cmdline_memory_size; 1285 unsigned long slack = bytes_avail - cmdline_memory_size;
@@ -1291,12 +1287,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1291 bytes_avail -= slack; 1287 bytes_avail -= slack;
1292 end_of_phys_memory -= slack; 1288 end_of_phys_memory -= slack;
1293 1289
1294 sp_banks[i].num_bytes -= slack; 1290 pavail[i].reg_size -= slack;
1295 if (sp_banks[i].num_bytes == 0) { 1291 if ((long)pavail[i].reg_size <= 0L) {
1296 sp_banks[i].base_addr = 0xdeadbeef; 1292 pavail[i].phys_addr = 0xdeadbeefUL;
1293 pavail[i].reg_size = 0UL;
1294 pavail_ents = i;
1297 } else { 1295 } else {
1298 sp_banks[i+1].num_bytes = 0; 1296 pavail[i+1].reg_size = 0Ul;
1299 sp_banks[i+1].base_addr = 0xdeadbeef; 1297 pavail[i+1].phys_addr = 0xdeadbeefUL;
1298 pavail_ents = i + 1;
1300 } 1299 }
1301 break; 1300 break;
1302 } 1301 }
@@ -1347,17 +1346,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1347#endif 1346#endif
1348 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); 1347 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
1349 1348
1350 bootmap_base = bootmap_pfn << PAGE_SHIFT;
1351
1352 /* Now register the available physical memory with the 1349 /* Now register the available physical memory with the
1353 * allocator. 1350 * allocator.
1354 */ 1351 */
1355 for (i = 0; sp_banks[i].num_bytes != 0; i++) { 1352 for (i = 0; i < pavail_ents; i++) {
1356#ifdef CONFIG_DEBUG_BOOTMEM 1353#ifdef CONFIG_DEBUG_BOOTMEM
1357 prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n", 1354 prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n",
1358 i, sp_banks[i].base_addr, sp_banks[i].num_bytes); 1355 i, pavail[i].phys_addr, pavail[i].reg_size);
1359#endif 1356#endif
1360 free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes); 1357 free_bootmem(pavail[i].phys_addr, pavail[i].reg_size);
1361 } 1358 }
1362 1359
1363#ifdef CONFIG_BLK_DEV_INITRD 1360#ifdef CONFIG_BLK_DEV_INITRD
@@ -1398,120 +1395,167 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1398 return end_pfn; 1395 return end_pfn;
1399} 1396}
1400 1397
1398#ifdef CONFIG_DEBUG_PAGEALLOC
1399static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
1400{
1401 unsigned long vstart = PAGE_OFFSET + pstart;
1402 unsigned long vend = PAGE_OFFSET + pend;
1403 unsigned long alloc_bytes = 0UL;
1404
1405 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1406 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1407 vstart, vend);
1408 prom_halt();
1409 }
1410
1411 while (vstart < vend) {
1412 unsigned long this_end, paddr = __pa(vstart);
1413 pgd_t *pgd = pgd_offset_k(vstart);
1414 pud_t *pud;
1415 pmd_t *pmd;
1416 pte_t *pte;
1417
1418 pud = pud_offset(pgd, vstart);
1419 if (pud_none(*pud)) {
1420 pmd_t *new;
1421
1422 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1423 alloc_bytes += PAGE_SIZE;
1424 pud_populate(&init_mm, pud, new);
1425 }
1426
1427 pmd = pmd_offset(pud, vstart);
1428 if (!pmd_present(*pmd)) {
1429 pte_t *new;
1430
1431 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1432 alloc_bytes += PAGE_SIZE;
1433 pmd_populate_kernel(&init_mm, pmd, new);
1434 }
1435
1436 pte = pte_offset_kernel(pmd, vstart);
1437 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1438 if (this_end > vend)
1439 this_end = vend;
1440
1441 while (vstart < this_end) {
1442 pte_val(*pte) = (paddr | pgprot_val(prot));
1443
1444 vstart += PAGE_SIZE;
1445 paddr += PAGE_SIZE;
1446 pte++;
1447 }
1448 }
1449
1450 return alloc_bytes;
1451}
1452
1453static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1454static int pall_ents __initdata;
1455
1456extern unsigned int kvmap_linear_patch[1];
1457
1458static void __init kernel_physical_mapping_init(void)
1459{
1460 unsigned long i, mem_alloced = 0UL;
1461
1462 read_obp_memory("reg", &pall[0], &pall_ents);
1463
1464 for (i = 0; i < pall_ents; i++) {
1465 unsigned long phys_start, phys_end;
1466
1467 phys_start = pall[i].phys_addr;
1468 phys_end = phys_start + pall[i].reg_size;
1469 mem_alloced += kernel_map_range(phys_start, phys_end,
1470 PAGE_KERNEL);
1471 }
1472
1473 printk("Allocated %ld bytes for kernel page tables.\n",
1474 mem_alloced);
1475
1476 kvmap_linear_patch[0] = 0x01000000; /* nop */
1477 flushi(&kvmap_linear_patch[0]);
1478
1479 __flush_tlb_all();
1480}
1481
1482void kernel_map_pages(struct page *page, int numpages, int enable)
1483{
1484 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1485 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1486
1487 kernel_map_range(phys_start, phys_end,
1488 (enable ? PAGE_KERNEL : __pgprot(0)));
1489
1490 /* we should perform an IPI and flush all tlbs,
1491 * but that can deadlock->flush only current cpu.
1492 */
1493 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1494 PAGE_OFFSET + phys_end);
1495}
1496#endif
1497
1498unsigned long __init find_ecache_flush_span(unsigned long size)
1499{
1500 int i;
1501
1502 for (i = 0; i < pavail_ents; i++) {
1503 if (pavail[i].reg_size >= size)
1504 return pavail[i].phys_addr;
1505 }
1506
1507 return ~0UL;
1508}
1509
1401/* paging_init() sets up the page tables */ 1510/* paging_init() sets up the page tables */
1402 1511
1403extern void cheetah_ecache_flush_init(void); 1512extern void cheetah_ecache_flush_init(void);
1404 1513
1405static unsigned long last_valid_pfn; 1514static unsigned long last_valid_pfn;
1515pgd_t swapper_pg_dir[2048];
1406 1516
1407void __init paging_init(void) 1517void __init paging_init(void)
1408{ 1518{
1409 extern pmd_t swapper_pmd_dir[1024]; 1519 unsigned long end_pfn, pages_avail, shift;
1410 extern unsigned int sparc64_vpte_patchme1[1]; 1520 unsigned long real_end, i;
1411 extern unsigned int sparc64_vpte_patchme2[1]; 1521
1412 unsigned long alias_base = kern_base + PAGE_OFFSET; 1522 /* Find available physical memory... */
1413 unsigned long second_alias_page = 0; 1523 read_obp_memory("available", &pavail[0], &pavail_ents);
1414 unsigned long pt, flags, end_pfn, pages_avail; 1524
1415 unsigned long shift = alias_base - ((unsigned long)KERNBASE); 1525 phys_base = 0xffffffffffffffffUL;
1416 unsigned long real_end; 1526 for (i = 0; i < pavail_ents; i++)
1527 phys_base = min(phys_base, pavail[i].phys_addr);
1528
1529 pfn_base = phys_base >> PAGE_SHIFT;
1530
1531 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1532 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1417 1533
1418 set_bit(0, mmu_context_bmap); 1534 set_bit(0, mmu_context_bmap);
1419 1535
1536 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1537
1420 real_end = (unsigned long)_end; 1538 real_end = (unsigned long)_end;
1421 if ((real_end > ((unsigned long)KERNBASE + 0x400000))) 1539 if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
1422 bigkernel = 1; 1540 bigkernel = 1;
1423#ifdef CONFIG_BLK_DEV_INITRD 1541 if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
1424 if (sparc_ramdisk_image || sparc_ramdisk_image64) 1542 prom_printf("paging_init: Kernel > 8MB, too large.\n");
1425 real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size)); 1543 prom_halt();
1426#endif
1427
1428 /* We assume physical memory starts at some 4mb multiple,
1429 * if this were not true we wouldn't boot up to this point
1430 * anyways.
1431 */
1432 pt = kern_base | _PAGE_VALID | _PAGE_SZ4MB;
1433 pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
1434 local_irq_save(flags);
1435 if (tlb_type == spitfire) {
1436 __asm__ __volatile__(
1437 " stxa %1, [%0] %3\n"
1438 " stxa %2, [%5] %4\n"
1439 " membar #Sync\n"
1440 " flush %%g6\n"
1441 " nop\n"
1442 " nop\n"
1443 " nop\n"
1444 : /* No outputs */
1445 : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
1446 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
1447 : "memory");
1448 if (real_end >= KERNBASE + 0x340000) {
1449 second_alias_page = alias_base + 0x400000;
1450 __asm__ __volatile__(
1451 " stxa %1, [%0] %3\n"
1452 " stxa %2, [%5] %4\n"
1453 " membar #Sync\n"
1454 " flush %%g6\n"
1455 " nop\n"
1456 " nop\n"
1457 " nop\n"
1458 : /* No outputs */
1459 : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
1460 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
1461 : "memory");
1462 }
1463 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1464 __asm__ __volatile__(
1465 " stxa %1, [%0] %3\n"
1466 " stxa %2, [%5] %4\n"
1467 " membar #Sync\n"
1468 " flush %%g6\n"
1469 " nop\n"
1470 " nop\n"
1471 " nop\n"
1472 : /* No outputs */
1473 : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
1474 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3))
1475 : "memory");
1476 if (real_end >= KERNBASE + 0x340000) {
1477 second_alias_page = alias_base + 0x400000;
1478 __asm__ __volatile__(
1479 " stxa %1, [%0] %3\n"
1480 " stxa %2, [%5] %4\n"
1481 " membar #Sync\n"
1482 " flush %%g6\n"
1483 " nop\n"
1484 " nop\n"
1485 " nop\n"
1486 : /* No outputs */
1487 : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
1488 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3))
1489 : "memory");
1490 }
1491 } 1544 }
1492 local_irq_restore(flags); 1545
1493 1546 /* Set kernel pgd to upper alias so physical page computations
1494 /* Now set kernel pgd to upper alias so physical page computations
1495 * work. 1547 * work.
1496 */ 1548 */
1497 init_mm.pgd += ((shift) / (sizeof(pgd_t))); 1549 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1498 1550
1499 memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir)); 1551 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1500 1552
1501 /* Now can init the kernel/bad page tables. */ 1553 /* Now can init the kernel/bad page tables. */
1502 pud_set(pud_offset(&swapper_pg_dir[0], 0), 1554 pud_set(pud_offset(&swapper_pg_dir[0], 0),
1503 swapper_pmd_dir + (shift / sizeof(pgd_t))); 1555 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1504 1556
1505 sparc64_vpte_patchme1[0] |= 1557 swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
1506 (((unsigned long)pgd_val(init_mm.pgd[0])) >> 10);
1507 sparc64_vpte_patchme2[0] |=
1508 (((unsigned long)pgd_val(init_mm.pgd[0])) & 0x3ff);
1509 flushi((long)&sparc64_vpte_patchme1[0]);
1510 1558
1511 /* Setup bootmem... */
1512 pages_avail = 0;
1513 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1514
1515 /* Inherit non-locked OBP mappings. */ 1559 /* Inherit non-locked OBP mappings. */
1516 inherit_prom_mappings(); 1560 inherit_prom_mappings();
1517 1561
@@ -1527,13 +1571,16 @@ void __init paging_init(void)
1527 1571
1528 inherit_locked_prom_mappings(1); 1572 inherit_locked_prom_mappings(1);
1529 1573
1530 /* We only created DTLB mapping of this stuff. */
1531 spitfire_flush_dtlb_nucleus_page(alias_base);
1532 if (second_alias_page)
1533 spitfire_flush_dtlb_nucleus_page(second_alias_page);
1534
1535 __flush_tlb_all(); 1574 __flush_tlb_all();
1536 1575
1576 /* Setup bootmem... */
1577 pages_avail = 0;
1578 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1579
1580#ifdef CONFIG_DEBUG_PAGEALLOC
1581 kernel_physical_mapping_init();
1582#endif
1583
1537 { 1584 {
1538 unsigned long zones_size[MAX_NR_ZONES]; 1585 unsigned long zones_size[MAX_NR_ZONES];
1539 unsigned long zholes_size[MAX_NR_ZONES]; 1586 unsigned long zholes_size[MAX_NR_ZONES];
@@ -1554,128 +1601,35 @@ void __init paging_init(void)
1554 device_scan(); 1601 device_scan();
1555} 1602}
1556 1603
1557/* Ok, it seems that the prom can allocate some more memory chunks
1558 * as a side effect of some prom calls we perform during the
1559 * boot sequence. My most likely theory is that it is from the
1560 * prom_set_traptable() call, and OBP is allocating a scratchpad
1561 * for saving client program register state etc.
1562 */
1563static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
1564{
1565 int swapi = 0;
1566 int i, mitr;
1567 unsigned long tmpaddr, tmpsize;
1568 unsigned long lowest;
1569
1570 for (i = 0; thislist[i].theres_more != 0; i++) {
1571 lowest = thislist[i].start_adr;
1572 for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
1573 if (thislist[mitr].start_adr < lowest) {
1574 lowest = thislist[mitr].start_adr;
1575 swapi = mitr;
1576 }
1577 if (lowest == thislist[i].start_adr)
1578 continue;
1579 tmpaddr = thislist[swapi].start_adr;
1580 tmpsize = thislist[swapi].num_bytes;
1581 for (mitr = swapi; mitr > i; mitr--) {
1582 thislist[mitr].start_adr = thislist[mitr-1].start_adr;
1583 thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
1584 }
1585 thislist[i].start_adr = tmpaddr;
1586 thislist[i].num_bytes = tmpsize;
1587 }
1588}
1589
1590void __init rescan_sp_banks(void)
1591{
1592 struct linux_prom64_registers memlist[64];
1593 struct linux_mlist_p1275 avail[64], *mlist;
1594 unsigned long bytes, base_paddr;
1595 int num_regs, node = prom_finddevice("/memory");
1596 int i;
1597
1598 num_regs = prom_getproperty(node, "available",
1599 (char *) memlist, sizeof(memlist));
1600 num_regs = (num_regs / sizeof(struct linux_prom64_registers));
1601 for (i = 0; i < num_regs; i++) {
1602 avail[i].start_adr = memlist[i].phys_addr;
1603 avail[i].num_bytes = memlist[i].reg_size;
1604 avail[i].theres_more = &avail[i + 1];
1605 }
1606 avail[i - 1].theres_more = NULL;
1607 sort_memlist(avail);
1608
1609 mlist = &avail[0];
1610 i = 0;
1611 bytes = mlist->num_bytes;
1612 base_paddr = mlist->start_adr;
1613
1614 sp_banks[0].base_addr = base_paddr;
1615 sp_banks[0].num_bytes = bytes;
1616
1617 while (mlist->theres_more != NULL){
1618 i++;
1619 mlist = mlist->theres_more;
1620 bytes = mlist->num_bytes;
1621 if (i >= SPARC_PHYS_BANKS-1) {
1622 printk ("The machine has more banks than "
1623 "this kernel can support\n"
1624 "Increase the SPARC_PHYS_BANKS "
1625 "setting (currently %d)\n",
1626 SPARC_PHYS_BANKS);
1627 i = SPARC_PHYS_BANKS-1;
1628 break;
1629 }
1630
1631 sp_banks[i].base_addr = mlist->start_adr;
1632 sp_banks[i].num_bytes = mlist->num_bytes;
1633 }
1634
1635 i++;
1636 sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
1637 sp_banks[i].num_bytes = 0;
1638
1639 for (i = 0; sp_banks[i].num_bytes != 0; i++)
1640 sp_banks[i].num_bytes &= PAGE_MASK;
1641}
1642
1643static void __init taint_real_pages(void) 1604static void __init taint_real_pages(void)
1644{ 1605{
1645 struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
1646 int i; 1606 int i;
1647 1607
1648 for (i = 0; i < SPARC_PHYS_BANKS; i++) { 1608 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
1649 saved_sp_banks[i].base_addr =
1650 sp_banks[i].base_addr;
1651 saved_sp_banks[i].num_bytes =
1652 sp_banks[i].num_bytes;
1653 }
1654
1655 rescan_sp_banks();
1656 1609
1657 /* Find changes discovered in the sp_bank rescan and 1610 /* Find changes discovered in the physmem available rescan and
1658 * reserve the lost portions in the bootmem maps. 1611 * reserve the lost portions in the bootmem maps.
1659 */ 1612 */
1660 for (i = 0; saved_sp_banks[i].num_bytes; i++) { 1613 for (i = 0; i < pavail_ents; i++) {
1661 unsigned long old_start, old_end; 1614 unsigned long old_start, old_end;
1662 1615
1663 old_start = saved_sp_banks[i].base_addr; 1616 old_start = pavail[i].phys_addr;
1664 old_end = old_start + 1617 old_end = old_start +
1665 saved_sp_banks[i].num_bytes; 1618 pavail[i].reg_size;
1666 while (old_start < old_end) { 1619 while (old_start < old_end) {
1667 int n; 1620 int n;
1668 1621
1669 for (n = 0; sp_banks[n].num_bytes; n++) { 1622 for (n = 0; pavail_rescan_ents; n++) {
1670 unsigned long new_start, new_end; 1623 unsigned long new_start, new_end;
1671 1624
1672 new_start = sp_banks[n].base_addr; 1625 new_start = pavail_rescan[n].phys_addr;
1673 new_end = new_start + sp_banks[n].num_bytes; 1626 new_end = new_start +
1627 pavail_rescan[n].reg_size;
1674 1628
1675 if (new_start <= old_start && 1629 if (new_start <= old_start &&
1676 new_end >= (old_start + PAGE_SIZE)) { 1630 new_end >= (old_start + PAGE_SIZE)) {
1677 set_bit (old_start >> 22, 1631 set_bit(old_start >> 22,
1678 sparc64_valid_addr_bitmap); 1632 sparc64_valid_addr_bitmap);
1679 goto do_next_page; 1633 goto do_next_page;
1680 } 1634 }
1681 } 1635 }
@@ -1695,8 +1649,7 @@ void __init mem_init(void)
1695 1649
1696 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6); 1650 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1697 i += 1; 1651 i += 1;
1698 sparc64_valid_addr_bitmap = (unsigned long *) 1652 sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
1699 __alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base);
1700 if (sparc64_valid_addr_bitmap == NULL) { 1653 if (sparc64_valid_addr_bitmap == NULL) {
1701 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); 1654 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1702 prom_halt(); 1655 prom_halt();
@@ -1749,7 +1702,7 @@ void __init mem_init(void)
1749 cheetah_ecache_flush_init(); 1702 cheetah_ecache_flush_init();
1750} 1703}
1751 1704
1752void free_initmem (void) 1705void free_initmem(void)
1753{ 1706{
1754 unsigned long addr, initend; 1707 unsigned long addr, initend;
1755 1708
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index b2ee9b53227f..058b8126c1a7 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -144,42 +144,29 @@ __flush_icache_page: /* %o0 = phys_page */
144 144
145#define DTAG_MASK 0x3 145#define DTAG_MASK 0x3
146 146
147 /* This routine is Spitfire specific so the hardcoded
148 * D-cache size and line-size are OK.
149 */
147 .align 64 150 .align 64
148 .globl __flush_dcache_page 151 .globl __flush_dcache_page
149__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ 152__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
150 sethi %uhi(PAGE_OFFSET), %g1 153 sethi %uhi(PAGE_OFFSET), %g1
151 sllx %g1, 32, %g1 154 sllx %g1, 32, %g1
152 sub %o0, %g1, %o0 155 sub %o0, %g1, %o0 ! physical address
153 clr %o4 156 srlx %o0, 11, %o0 ! make D-cache TAG
154 srlx %o0, 11, %o0 157 sethi %hi(1 << 14), %o2 ! D-cache size
155 sethi %hi(1 << 14), %o2 158 sub %o2, (1 << 5), %o2 ! D-cache line size
1561: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group 1591: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
157 add %o4, (1 << 5), %o4 ! IEU0 160 andcc %o3, DTAG_MASK, %g0 ! Valid?
158 ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group 161 be,pn %xcc, 2f ! Nope, branch
159 add %o4, (1 << 5), %o4 ! IEU0 162 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
160 ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available 163 cmp %o3, %o0 ! TAG match?
161 add %o4, (1 << 5), %o4 ! IEU0 164 bne,pt %xcc, 2f ! Nope, branch
162 andn %o3, DTAG_MASK, %o3 ! IEU1 165 nop
163 ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group 166 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
164 add %o4, (1 << 5), %o4 ! IEU0 167 membar #Sync
165 andn %g1, DTAG_MASK, %g1 ! IEU1 1682: brnz,pt %o2, 1b
166 cmp %o0, %o3 ! IEU1 Group 169 sub %o2, (1 << 5), %o2 ! D-cache line size
167 be,a,pn %xcc, dflush1 ! CTI
168 sub %o4, (4 << 5), %o4 ! IEU0 (Group)
169 cmp %o0, %g1 ! IEU1 Group
170 andn %g2, DTAG_MASK, %g2 ! IEU0
171 be,a,pn %xcc, dflush2 ! CTI
172 sub %o4, (3 << 5), %o4 ! IEU0 (Group)
173 cmp %o0, %g2 ! IEU1 Group
174 andn %g3, DTAG_MASK, %g3 ! IEU0
175 be,a,pn %xcc, dflush3 ! CTI
176 sub %o4, (2 << 5), %o4 ! IEU0 (Group)
177 cmp %o0, %g3 ! IEU1 Group
178 be,a,pn %xcc, dflush4 ! CTI
179 sub %o4, (1 << 5), %o4 ! IEU0
1802: cmp %o4, %o2 ! IEU1 Group
181 bne,pt %xcc, 1b ! CTI
182 nop ! IEU0
183 170
184 /* The I-cache does not snoop local stores so we 171 /* The I-cache does not snoop local stores so we
185 * better flush that too when necessary. 172 * better flush that too when necessary.
@@ -189,48 +176,9 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
189 retl 176 retl
190 nop 177 nop
191 178
192dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG
193 add %o4, (1 << 5), %o4
194dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG
195 add %o4, (1 << 5), %o4
196dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG
197 add %o4, (1 << 5), %o4
198dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG
199 add %o4, (1 << 5), %o4
200 membar #Sync
201 ba,pt %xcc, 2b
202 nop
203#endif /* DCACHE_ALIASING_POSSIBLE */ 179#endif /* DCACHE_ALIASING_POSSIBLE */
204 180
205 .previous .text 181 .previous
206 .align 32
207__prefill_dtlb:
208 rdpr %pstate, %g7
209 wrpr %g7, PSTATE_IE, %pstate
210 mov TLB_TAG_ACCESS, %g1
211 stxa %o5, [%g1] ASI_DMMU
212 stxa %o2, [%g0] ASI_DTLB_DATA_IN
213 flush %g6
214 retl
215 wrpr %g7, %pstate
216__prefill_itlb:
217 rdpr %pstate, %g7
218 wrpr %g7, PSTATE_IE, %pstate
219 mov TLB_TAG_ACCESS, %g1
220 stxa %o5, [%g1] ASI_IMMU
221 stxa %o2, [%g0] ASI_ITLB_DATA_IN
222 flush %g6
223 retl
224 wrpr %g7, %pstate
225
226 .globl __update_mmu_cache
227__update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */
228 srlx %o1, PAGE_SHIFT, %o1
229 andcc %o3, FAULT_CODE_DTLB, %g0
230 sllx %o1, PAGE_SHIFT, %o5
231 bne,pt %xcc, __prefill_dtlb
232 or %o5, %o0, %o5
233 ba,a,pt %xcc, __prefill_itlb
234 182
235 /* Cheetah specific versions, patched at boot time. */ 183 /* Cheetah specific versions, patched at boot time. */
236__cheetah_flush_tlb_mm: /* 18 insns */ 184__cheetah_flush_tlb_mm: /* 18 insns */
@@ -283,7 +231,7 @@ __cheetah_flush_tlb_pending: /* 26 insns */
283 wrpr %g7, 0x0, %pstate 231 wrpr %g7, 0x0, %pstate
284 232
285#ifdef DCACHE_ALIASING_POSSIBLE 233#ifdef DCACHE_ALIASING_POSSIBLE
286flush_dcpage_cheetah: /* 11 insns */ 234__cheetah_flush_dcache_page: /* 11 insns */
287 sethi %uhi(PAGE_OFFSET), %g1 235 sethi %uhi(PAGE_OFFSET), %g1
288 sllx %g1, 32, %g1 236 sllx %g1, 32, %g1
289 sub %o0, %g1, %o0 237 sub %o0, %g1, %o0
@@ -329,8 +277,8 @@ cheetah_patch_cachetlbops:
329#ifdef DCACHE_ALIASING_POSSIBLE 277#ifdef DCACHE_ALIASING_POSSIBLE
330 sethi %hi(__flush_dcache_page), %o0 278 sethi %hi(__flush_dcache_page), %o0
331 or %o0, %lo(__flush_dcache_page), %o0 279 or %o0, %lo(__flush_dcache_page), %o0
332 sethi %hi(flush_dcpage_cheetah), %o1 280 sethi %hi(__cheetah_flush_dcache_page), %o1
333 or %o1, %lo(flush_dcpage_cheetah), %o1 281 or %o1, %lo(__cheetah_flush_dcache_page), %o1
334 call cheetah_patch_one 282 call cheetah_patch_one
335 mov 11, %o2 283 mov 11, %o2
336#endif /* DCACHE_ALIASING_POSSIBLE */ 284#endif /* DCACHE_ALIASING_POSSIBLE */