aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2005-09-25 19:46:57 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-09-25 19:46:57 -0400
commit56425306517ef28a9b480161cdb96d182172bc1d (patch)
tree204cfbef0e5d86954f87b6b40d79d57f8157e5ea
parent52f26deb7c67d5f34910660200b925c1a2b8df8c (diff)
[SPARC64]: Add CONFIG_DEBUG_PAGEALLOC support.
The trick is that we do the kernel linear mapping TLB miss starting with an instruction sequence like this: ba,pt %xcc, kvmap_load xor %g2, %g4, %g5 succeeded by an instruction sequence which performs a full page table walk starting at swapper_pg_dir. We first take over the trap table from the firmware. Then, using this constant PTE generation for the linear mapping area above, we build the kernel page tables for the linear mapping. After this is setup, we patch that branch above into a "nop", which will cause TLB misses to fall through to the full page table walk. With this, the page unmapping for CONFIG_DEBUG_PAGEALLOC is trivial. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/Kconfig.debug8
-rw-r--r--arch/sparc64/kernel/head.S6
-rw-r--r--arch/sparc64/kernel/ktlb.S33
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S3
-rw-r--r--arch/sparc64/mm/init.c109
-rw-r--r--include/asm-sparc64/cacheflush.h5
-rw-r--r--include/asm-sparc64/pgtable.h7
7 files changed, 156 insertions, 15 deletions
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
index af0e9411b83e..fa06ea04837b 100644
--- a/arch/sparc64/Kconfig.debug
+++ b/arch/sparc64/Kconfig.debug
@@ -33,6 +33,14 @@ config DEBUG_BOOTMEM
33 depends on DEBUG_KERNEL 33 depends on DEBUG_KERNEL
34 bool "Debug BOOTMEM initialization" 34 bool "Debug BOOTMEM initialization"
35 35
36config DEBUG_PAGEALLOC
37 bool "Page alloc debugging"
38 depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND
39 help
40 Unmap pages from the kernel linear mapping after free_pages().
41 This results in a large slowdown, but helps to find certain types
42 of memory corruptions.
43
36config MCOUNT 44config MCOUNT
37 bool 45 bool
38 depends on STACK_DEBUG 46 depends on STACK_DEBUG
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 56af714f5f1b..ecc748fb9ad7 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -525,12 +525,6 @@ bootup_user_stack_end:
525 525
526#include "ttable.S" 526#include "ttable.S"
527#include "systbls.S" 527#include "systbls.S"
528
529 .align 1024
530 .globl swapper_pg_dir
531swapper_pg_dir:
532 .word 0
533
534#include "ktlb.S" 528#include "ktlb.S"
535#include "etrap.S" 529#include "etrap.S"
536#include "rtrap.S" 530#include "rtrap.S"
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index a591bc0ebc7b..7796b37f478c 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -132,9 +132,40 @@ kvmap_do_obp:
132 */ 132 */
133 .align 32 133 .align 32
134kvmap: 134kvmap:
135 brlz,pt %g4, kvmap_load 135 brgez,pn %g4, kvmap_nonlinear
136 nop
137
138#ifdef CONFIG_DEBUG_PAGEALLOC
139 .globl kvmap_linear_patch
140kvmap_linear_patch:
141#endif
142 ba,pt %xcc, kvmap_load
136 xor %g2, %g4, %g5 143 xor %g2, %g4, %g5
137 144
145#ifdef CONFIG_DEBUG_PAGEALLOC
146 sethi %hi(swapper_pg_dir), %g5
147 or %g5, %lo(swapper_pg_dir), %g5
148 sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6
149 srlx %g6, 64 - PAGE_SHIFT, %g6
150 andn %g6, 0x3, %g6
151 lduw [%g5 + %g6], %g5
152 brz,pn %g5, longpath
153 sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6
154 srlx %g6, 64 - PAGE_SHIFT, %g6
155 sllx %g5, 11, %g5
156 andn %g6, 0x3, %g6
157 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
158 brz,pn %g5, longpath
159 sllx %g4, 64 - PMD_SHIFT, %g6
160 srlx %g6, 64 - PAGE_SHIFT, %g6
161 sllx %g5, 11, %g5
162 andn %g6, 0x7, %g6
163 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
164 brz,pn %g5, longpath
165 nop
166 ba,a,pt %xcc, kvmap_load
167#endif
168
138kvmap_nonlinear: 169kvmap_nonlinear:
139 sethi %hi(MODULES_VADDR), %g5 170 sethi %hi(MODULES_VADDR), %g5
140 cmp %g4, %g5 171 cmp %g4, %g5
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index f47d0be39378..2af0cf0a8640 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -9,8 +9,7 @@ ENTRY(_start)
9jiffies = jiffies_64; 9jiffies = jiffies_64;
10SECTIONS 10SECTIONS
11{ 11{
12 swapper_pmd_dir = 0x0000000000402000; 12 swapper_low_pmd_dir = 0x0000000000402000;
13 empty_pg_dir = 0x0000000000403000;
14 . = 0x4000; 13 . = 0x4000;
15 .text 0x0000000000404000 : 14 .text 0x0000000000404000 :
16 { 15 {
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index cf747372f0c9..8d72f8a1268e 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1332,15 +1332,114 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1332 return end_pfn; 1332 return end_pfn;
1333} 1333}
1334 1334
1335#ifdef CONFIG_DEBUG_PAGEALLOC
1336static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
1337{
1338 unsigned long vstart = PAGE_OFFSET + pstart;
1339 unsigned long vend = PAGE_OFFSET + pend;
1340 unsigned long alloc_bytes = 0UL;
1341
1342 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1343 prom_printf("kernel_map: Unaligned sp_banks[%lx:%lx]\n",
1344 vstart, vend);
1345 prom_halt();
1346 }
1347
1348 while (vstart < vend) {
1349 unsigned long this_end, paddr = __pa(vstart);
1350 pgd_t *pgd = pgd_offset_k(vstart);
1351 pud_t *pud;
1352 pmd_t *pmd;
1353 pte_t *pte;
1354
1355 pud = pud_offset(pgd, vstart);
1356 if (pud_none(*pud)) {
1357 pmd_t *new;
1358
1359 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1360 alloc_bytes += PAGE_SIZE;
1361 pud_populate(&init_mm, pud, new);
1362 }
1363
1364 pmd = pmd_offset(pud, vstart);
1365 if (!pmd_present(*pmd)) {
1366 pte_t *new;
1367
1368 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1369 alloc_bytes += PAGE_SIZE;
1370 pmd_populate_kernel(&init_mm, pmd, new);
1371 }
1372
1373 pte = pte_offset_kernel(pmd, vstart);
1374 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1375 if (this_end > vend)
1376 this_end = vend;
1377
1378 while (vstart < this_end) {
1379 pte_val(*pte) = (paddr | pgprot_val(prot));
1380
1381 vstart += PAGE_SIZE;
1382 paddr += PAGE_SIZE;
1383 pte++;
1384 }
1385 }
1386
1387 return alloc_bytes;
1388}
1389
1390extern struct linux_mlist_p1275 *prom_ptot_ptr;
1391extern unsigned int kvmap_linear_patch[1];
1392
1393static void __init kernel_physical_mapping_init(void)
1394{
1395 struct linux_mlist_p1275 *p = prom_ptot_ptr;
1396 unsigned long mem_alloced = 0UL;
1397
1398 while (p) {
1399 unsigned long phys_start, phys_end;
1400
1401 phys_start = p->start_adr;
1402 phys_end = phys_start + p->num_bytes;
1403 mem_alloced += kernel_map_range(phys_start, phys_end,
1404 PAGE_KERNEL);
1405
1406 p = p->theres_more;
1407 }
1408
1409 printk("Allocated %ld bytes for kernel page tables.\n",
1410 mem_alloced);
1411
1412 kvmap_linear_patch[0] = 0x01000000; /* nop */
1413 flushi(&kvmap_linear_patch[0]);
1414
1415 __flush_tlb_all();
1416}
1417
1418void kernel_map_pages(struct page *page, int numpages, int enable)
1419{
1420 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1421 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1422
1423 kernel_map_range(phys_start, phys_end,
1424 (enable ? PAGE_KERNEL : __pgprot(0)));
1425
1426 /* we should perform an IPI and flush all tlbs,
1427 * but that can deadlock->flush only current cpu.
1428 */
1429 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1430 PAGE_OFFSET + phys_end);
1431}
1432#endif
1433
1335/* paging_init() sets up the page tables */ 1434/* paging_init() sets up the page tables */
1336 1435
1337extern void cheetah_ecache_flush_init(void); 1436extern void cheetah_ecache_flush_init(void);
1338 1437
1339static unsigned long last_valid_pfn; 1438static unsigned long last_valid_pfn;
1439pgd_t swapper_pg_dir[2048];
1340 1440
1341void __init paging_init(void) 1441void __init paging_init(void)
1342{ 1442{
1343 extern pmd_t swapper_pmd_dir[1024];
1344 unsigned long end_pfn, pages_avail, shift; 1443 unsigned long end_pfn, pages_avail, shift;
1345 unsigned long real_end; 1444 unsigned long real_end;
1346 1445
@@ -1361,11 +1460,11 @@ void __init paging_init(void)
1361 */ 1460 */
1362 init_mm.pgd += ((shift) / (sizeof(pgd_t))); 1461 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1363 1462
1364 memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir)); 1463 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1365 1464
1366 /* Now can init the kernel/bad page tables. */ 1465 /* Now can init the kernel/bad page tables. */
1367 pud_set(pud_offset(&swapper_pg_dir[0], 0), 1466 pud_set(pud_offset(&swapper_pg_dir[0], 0),
1368 swapper_pmd_dir + (shift / sizeof(pgd_t))); 1467 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1369 1468
1370 swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); 1469 swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
1371 1470
@@ -1390,6 +1489,10 @@ void __init paging_init(void)
1390 pages_avail = 0; 1489 pages_avail = 0;
1391 last_valid_pfn = end_pfn = bootmem_init(&pages_avail); 1490 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1392 1491
1492#ifdef CONFIG_DEBUG_PAGEALLOC
1493 kernel_physical_mapping_init();
1494#endif
1495
1393 { 1496 {
1394 unsigned long zones_size[MAX_NR_ZONES]; 1497 unsigned long zones_size[MAX_NR_ZONES];
1395 unsigned long zholes_size[MAX_NR_ZONES]; 1498 unsigned long zholes_size[MAX_NR_ZONES];
diff --git a/include/asm-sparc64/cacheflush.h b/include/asm-sparc64/cacheflush.h
index ededd2659eab..b3f61659ba81 100644
--- a/include/asm-sparc64/cacheflush.h
+++ b/include/asm-sparc64/cacheflush.h
@@ -66,6 +66,11 @@ extern void flush_ptrace_access(struct vm_area_struct *, struct page *,
66#define flush_cache_vmap(start, end) do { } while (0) 66#define flush_cache_vmap(start, end) do { } while (0)
67#define flush_cache_vunmap(start, end) do { } while (0) 67#define flush_cache_vunmap(start, end) do { } while (0)
68 68
69#ifdef CONFIG_DEBUG_PAGEALLOC
70/* internal debugging function */
71void kernel_map_pages(struct page *page, int numpages, int enable);
72#endif
73
69#endif /* !__ASSEMBLY__ */ 74#endif /* !__ASSEMBLY__ */
70 75
71#endif /* _SPARC64_CACHEFLUSH_H */ 76#endif /* _SPARC64_CACHEFLUSH_H */
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index a297f6144f0f..43cbb089cde2 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -60,13 +60,13 @@
60 * table can map 60 * table can map
61 */ 61 */
62#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) 62#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
63#define PMD_SIZE (1UL << PMD_SHIFT) 63#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
64#define PMD_MASK (~(PMD_SIZE-1)) 64#define PMD_MASK (~(PMD_SIZE-1))
65#define PMD_BITS (PAGE_SHIFT - 2) 65#define PMD_BITS (PAGE_SHIFT - 2)
66 66
67/* PGDIR_SHIFT determines what a third-level page table entry can map */ 67/* PGDIR_SHIFT determines what a third-level page table entry can map */
68#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS) 68#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
69#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 69#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
70#define PGDIR_MASK (~(PGDIR_SIZE-1)) 70#define PGDIR_MASK (~(PGDIR_SIZE-1))
71#define PGDIR_BITS (PAGE_SHIFT - 2) 71#define PGDIR_BITS (PAGE_SHIFT - 2)
72 72
@@ -336,7 +336,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p
336#define pte_clear(mm,addr,ptep) \ 336#define pte_clear(mm,addr,ptep) \
337 set_pte_at((mm), (addr), (ptep), __pte(0UL)) 337 set_pte_at((mm), (addr), (ptep), __pte(0UL))
338 338
339extern pgd_t swapper_pg_dir[1]; 339extern pgd_t swapper_pg_dir[2048];
340extern pmd_t swapper_low_pmd_dir[2048];
340 341
341/* These do nothing with the way I have things setup. */ 342/* These do nothing with the way I have things setup. */
342#define mmu_lockarea(vaddr, len) (vaddr) 343#define mmu_lockarea(vaddr, len) (vaddr)