diff options
| -rw-r--r-- | arch/parisc/include/asm/cacheflush.h | 5 | ||||
| -rw-r--r-- | arch/parisc/include/asm/pgtable.h | 9 | ||||
| -rw-r--r-- | arch/parisc/kernel/cache.c | 13 | ||||
| -rw-r--r-- | arch/parisc/kernel/entry.S | 3 | ||||
| -rw-r--r-- | arch/parisc/kernel/head.S | 5 | ||||
| -rw-r--r-- | arch/parisc/kernel/module.c | 10 | ||||
| -rw-r--r-- | arch/parisc/kernel/vmlinux.lds.S | 1 | ||||
| -rw-r--r-- | arch/parisc/mm/init.c | 260 |
8 files changed, 182 insertions, 124 deletions
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index d18328b3f938..da601dd34c05 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
| 5 | #include <linux/uaccess.h> | 5 | #include <linux/uaccess.h> |
| 6 | #include <asm/tlbflush.h> | ||
| 6 | 7 | ||
| 7 | /* The usual comment is "Caches aren't brain-dead on the <architecture>". | 8 | /* The usual comment is "Caches aren't brain-dead on the <architecture>". |
| 8 | * Unfortunately, that doesn't apply to PA-RISC. */ | 9 | * Unfortunately, that doesn't apply to PA-RISC. */ |
| @@ -112,8 +113,10 @@ void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); | |||
| 112 | static inline void | 113 | static inline void |
| 113 | flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | 114 | flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
| 114 | { | 115 | { |
| 115 | if (PageAnon(page)) | 116 | if (PageAnon(page)) { |
| 117 | flush_tlb_page(vma, vmaddr); | ||
| 116 | flush_dcache_page_asm(page_to_phys(page), vmaddr); | 118 | flush_dcache_page_asm(page_to_phys(page), vmaddr); |
| 119 | } | ||
| 117 | } | 120 | } |
| 118 | 121 | ||
| 119 | #ifdef CONFIG_DEBUG_RODATA | 122 | #ifdef CONFIG_DEBUG_RODATA |
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 5d7b8ce9fdf3..22dadeb58695 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h | |||
| @@ -177,7 +177,10 @@ struct vm_area_struct; | |||
| 177 | 177 | ||
| 178 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) | 178 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) |
| 179 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | 179 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
| 180 | #define _PAGE_KERNEL (_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) | 180 | #define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED) |
| 181 | #define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC) | ||
| 182 | #define _PAGE_KERNEL_RWX (_PAGE_KERNEL_EXEC | _PAGE_WRITE) | ||
| 183 | #define _PAGE_KERNEL (_PAGE_KERNEL_RO | _PAGE_WRITE) | ||
| 181 | 184 | ||
| 182 | /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds | 185 | /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds |
| 183 | * are page-aligned, we don't care about the PAGE_OFFSET bits, except | 186 | * are page-aligned, we don't care about the PAGE_OFFSET bits, except |
| @@ -208,7 +211,9 @@ struct vm_area_struct; | |||
| 208 | #define PAGE_COPY PAGE_EXECREAD | 211 | #define PAGE_COPY PAGE_EXECREAD |
| 209 | #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) | 212 | #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) |
| 210 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) | 213 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) |
| 211 | #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) | 214 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) |
| 215 | #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX) | ||
| 216 | #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO) | ||
| 212 | #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) | 217 | #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) |
| 213 | #define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ) | 218 | #define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ) |
| 214 | 219 | ||
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 3f11331c2775..83335f3da5fc 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
| @@ -304,10 +304,20 @@ void flush_dcache_page(struct page *page) | |||
| 304 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | 304 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; |
| 305 | addr = mpnt->vm_start + offset; | 305 | addr = mpnt->vm_start + offset; |
| 306 | 306 | ||
| 307 | /* The TLB is the engine of coherence on parisc: The | ||
| 308 | * CPU is entitled to speculate any page with a TLB | ||
| 309 | * mapping, so here we kill the mapping then flush the | ||
| 310 | * page along a special flush only alias mapping. | ||
| 311 | * This guarantees that the page is no-longer in the | ||
| 312 | * cache for any process and nor may it be | ||
| 313 | * speculatively read in (until the user or kernel | ||
| 314 | * specifically accesses it, of course) */ | ||
| 315 | |||
| 316 | flush_tlb_page(mpnt, addr); | ||
| 307 | if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) { | 317 | if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) { |
| 308 | __flush_cache_page(mpnt, addr, page_to_phys(page)); | 318 | __flush_cache_page(mpnt, addr, page_to_phys(page)); |
| 309 | if (old_addr) | 319 | if (old_addr) |
| 310 | printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? mpnt->vm_file->f_path.dentry->d_name.name : "(null)"); | 320 | printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)"); |
| 311 | old_addr = addr; | 321 | old_addr = addr; |
| 312 | } | 322 | } |
| 313 | } | 323 | } |
| @@ -499,6 +509,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long | |||
| 499 | { | 509 | { |
| 500 | BUG_ON(!vma->vm_mm->context); | 510 | BUG_ON(!vma->vm_mm->context); |
| 501 | 511 | ||
| 512 | flush_tlb_page(vma, vmaddr); | ||
| 502 | __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); | 513 | __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); |
| 503 | 514 | ||
| 504 | } | 515 | } |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index ead8d2a1034c..6f0594439143 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
| @@ -692,6 +692,9 @@ ENTRY(fault_vector_11) | |||
| 692 | END(fault_vector_11) | 692 | END(fault_vector_11) |
| 693 | 693 | ||
| 694 | #endif | 694 | #endif |
| 695 | /* Fault vector is separately protected and *must* be on its own page */ | ||
| 696 | .align PAGE_SIZE | ||
| 697 | ENTRY(end_fault_vector) | ||
| 695 | 698 | ||
| 696 | .import handle_interruption,code | 699 | .import handle_interruption,code |
| 697 | .import do_cpu_irq_mask,code | 700 | .import do_cpu_irq_mask,code |
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index 145c5e4caaa0..37aabd772fbb 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S | |||
| @@ -106,8 +106,9 @@ $bss_loop: | |||
| 106 | #endif | 106 | #endif |
| 107 | 107 | ||
| 108 | 108 | ||
| 109 | /* Now initialize the PTEs themselves */ | 109 | /* Now initialize the PTEs themselves. We use RWX for |
| 110 | ldo 0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */ | 110 | * everything ... it will get remapped correctly later */ |
| 111 | ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */ | ||
| 111 | ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ | 112 | ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ |
| 112 | load32 PA(pg0),%r1 | 113 | load32 PA(pg0),%r1 |
| 113 | 114 | ||
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 6e81bb596e5b..cedbbb8b18d9 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c | |||
| @@ -61,8 +61,10 @@ | |||
| 61 | #include <linux/string.h> | 61 | #include <linux/string.h> |
| 62 | #include <linux/kernel.h> | 62 | #include <linux/kernel.h> |
| 63 | #include <linux/bug.h> | 63 | #include <linux/bug.h> |
| 64 | #include <linux/mm.h> | ||
| 64 | #include <linux/slab.h> | 65 | #include <linux/slab.h> |
| 65 | 66 | ||
| 67 | #include <asm/pgtable.h> | ||
| 66 | #include <asm/unwind.h> | 68 | #include <asm/unwind.h> |
| 67 | 69 | ||
| 68 | #if 0 | 70 | #if 0 |
| @@ -214,7 +216,13 @@ void *module_alloc(unsigned long size) | |||
| 214 | { | 216 | { |
| 215 | if (size == 0) | 217 | if (size == 0) |
| 216 | return NULL; | 218 | return NULL; |
| 217 | return vmalloc(size); | 219 | /* using RWX means less protection for modules, but it's |
| 220 | * easier than trying to map the text, data, init_text and | ||
| 221 | * init_data correctly */ | ||
| 222 | return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, | ||
| 223 | GFP_KERNEL | __GFP_HIGHMEM, | ||
| 224 | PAGE_KERNEL_RWX, -1, | ||
| 225 | __builtin_return_address(0)); | ||
| 218 | } | 226 | } |
| 219 | 227 | ||
| 220 | #ifndef CONFIG_64BIT | 228 | #ifndef CONFIG_64BIT |
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 8f1e4efd143e..bf6a43a322ec 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S | |||
| @@ -134,6 +134,7 @@ SECTIONS | |||
| 134 | . = ALIGN(16384); | 134 | . = ALIGN(16384); |
| 135 | __init_begin = .; | 135 | __init_begin = .; |
| 136 | INIT_TEXT_SECTION(16384) | 136 | INIT_TEXT_SECTION(16384) |
| 137 | . = ALIGN(PAGE_SIZE); | ||
| 137 | INIT_DATA_SECTION(16) | 138 | INIT_DATA_SECTION(16) |
| 138 | /* we have to discard exit text and such at runtime, not link time */ | 139 | /* we have to discard exit text and such at runtime, not link time */ |
| 139 | .exit.text : | 140 | .exit.text : |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index b7ed8d7a9b33..7e6b4656f3d7 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
| @@ -369,24 +369,158 @@ static void __init setup_bootmem(void) | |||
| 369 | request_resource(&sysram_resources[0], &pdcdata_resource); | 369 | request_resource(&sysram_resources[0], &pdcdata_resource); |
| 370 | } | 370 | } |
| 371 | 371 | ||
| 372 | static void __init map_pages(unsigned long start_vaddr, | ||
| 373 | unsigned long start_paddr, unsigned long size, | ||
| 374 | pgprot_t pgprot, int force) | ||
| 375 | { | ||
| 376 | pgd_t *pg_dir; | ||
| 377 | pmd_t *pmd; | ||
| 378 | pte_t *pg_table; | ||
| 379 | unsigned long end_paddr; | ||
| 380 | unsigned long start_pmd; | ||
| 381 | unsigned long start_pte; | ||
| 382 | unsigned long tmp1; | ||
| 383 | unsigned long tmp2; | ||
| 384 | unsigned long address; | ||
| 385 | unsigned long vaddr; | ||
| 386 | unsigned long ro_start; | ||
| 387 | unsigned long ro_end; | ||
| 388 | unsigned long fv_addr; | ||
| 389 | unsigned long gw_addr; | ||
| 390 | extern const unsigned long fault_vector_20; | ||
| 391 | extern void * const linux_gateway_page; | ||
| 392 | |||
| 393 | ro_start = __pa((unsigned long)_text); | ||
| 394 | ro_end = __pa((unsigned long)&data_start); | ||
| 395 | fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; | ||
| 396 | gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; | ||
| 397 | |||
| 398 | end_paddr = start_paddr + size; | ||
| 399 | |||
| 400 | pg_dir = pgd_offset_k(start_vaddr); | ||
| 401 | |||
| 402 | #if PTRS_PER_PMD == 1 | ||
| 403 | start_pmd = 0; | ||
| 404 | #else | ||
| 405 | start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | ||
| 406 | #endif | ||
| 407 | start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); | ||
| 408 | |||
| 409 | address = start_paddr; | ||
| 410 | vaddr = start_vaddr; | ||
| 411 | while (address < end_paddr) { | ||
| 412 | #if PTRS_PER_PMD == 1 | ||
| 413 | pmd = (pmd_t *)__pa(pg_dir); | ||
| 414 | #else | ||
| 415 | pmd = (pmd_t *)pgd_address(*pg_dir); | ||
| 416 | |||
| 417 | /* | ||
| 418 | * pmd is physical at this point | ||
| 419 | */ | ||
| 420 | |||
| 421 | if (!pmd) { | ||
| 422 | pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER); | ||
| 423 | pmd = (pmd_t *) __pa(pmd); | ||
| 424 | } | ||
| 425 | |||
| 426 | pgd_populate(NULL, pg_dir, __va(pmd)); | ||
| 427 | #endif | ||
| 428 | pg_dir++; | ||
| 429 | |||
| 430 | /* now change pmd to kernel virtual addresses */ | ||
| 431 | |||
| 432 | pmd = (pmd_t *)__va(pmd) + start_pmd; | ||
| 433 | for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { | ||
| 434 | |||
| 435 | /* | ||
| 436 | * pg_table is physical at this point | ||
| 437 | */ | ||
| 438 | |||
| 439 | pg_table = (pte_t *)pmd_address(*pmd); | ||
| 440 | if (!pg_table) { | ||
| 441 | pg_table = (pte_t *) | ||
| 442 | alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE); | ||
| 443 | pg_table = (pte_t *) __pa(pg_table); | ||
| 444 | } | ||
| 445 | |||
| 446 | pmd_populate_kernel(NULL, pmd, __va(pg_table)); | ||
| 447 | |||
| 448 | /* now change pg_table to kernel virtual addresses */ | ||
| 449 | |||
| 450 | pg_table = (pte_t *) __va(pg_table) + start_pte; | ||
| 451 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { | ||
| 452 | pte_t pte; | ||
| 453 | |||
| 454 | /* | ||
| 455 | * Map the fault vector writable so we can | ||
| 456 | * write the HPMC checksum. | ||
| 457 | */ | ||
| 458 | if (force) | ||
| 459 | pte = __mk_pte(address, pgprot); | ||
| 460 | else if (core_kernel_text(vaddr) && | ||
| 461 | address != fv_addr) | ||
| 462 | pte = __mk_pte(address, PAGE_KERNEL_EXEC); | ||
| 463 | else | ||
| 464 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
| 465 | if (address >= ro_start && address < ro_end | ||
| 466 | && address != fv_addr | ||
| 467 | && address != gw_addr) | ||
| 468 | pte = __mk_pte(address, PAGE_KERNEL_RO); | ||
| 469 | else | ||
| 470 | #endif | ||
| 471 | pte = __mk_pte(address, pgprot); | ||
| 472 | |||
| 473 | if (address >= end_paddr) { | ||
| 474 | if (force) | ||
| 475 | break; | ||
| 476 | else | ||
| 477 | pte_val(pte) = 0; | ||
| 478 | } | ||
| 479 | |||
| 480 | set_pte(pg_table, pte); | ||
| 481 | |||
| 482 | address += PAGE_SIZE; | ||
| 483 | vaddr += PAGE_SIZE; | ||
| 484 | } | ||
| 485 | start_pte = 0; | ||
| 486 | |||
| 487 | if (address >= end_paddr) | ||
| 488 | break; | ||
| 489 | } | ||
| 490 | start_pmd = 0; | ||
| 491 | } | ||
| 492 | } | ||
| 493 | |||
| 372 | void free_initmem(void) | 494 | void free_initmem(void) |
| 373 | { | 495 | { |
| 374 | unsigned long addr; | 496 | unsigned long addr; |
| 375 | unsigned long init_begin = (unsigned long)__init_begin; | 497 | unsigned long init_begin = (unsigned long)__init_begin; |
| 376 | unsigned long init_end = (unsigned long)__init_end; | 498 | unsigned long init_end = (unsigned long)__init_end; |
| 377 | 499 | ||
| 378 | #ifdef CONFIG_DEBUG_KERNEL | 500 | /* The init text pages are marked R-X. We have to |
| 501 | * flush the icache and mark them RW- | ||
| 502 | * | ||
| 503 | * This is tricky, because map_pages is in the init section. | ||
| 504 | * Do a dummy remap of the data section first (the data | ||
| 505 | * section is already PAGE_KERNEL) to pull in the TLB entries | ||
| 506 | * for map_kernel */ | ||
| 507 | map_pages(init_begin, __pa(init_begin), init_end - init_begin, | ||
| 508 | PAGE_KERNEL_RWX, 1); | ||
| 509 | /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute | ||
| 510 | * map_pages */ | ||
| 511 | map_pages(init_begin, __pa(init_begin), init_end - init_begin, | ||
| 512 | PAGE_KERNEL, 1); | ||
| 513 | |||
| 514 | /* force the kernel to see the new TLB entries */ | ||
| 515 | __flush_tlb_range(0, init_begin, init_end); | ||
| 379 | /* Attempt to catch anyone trying to execute code here | 516 | /* Attempt to catch anyone trying to execute code here |
| 380 | * by filling the page with BRK insns. | 517 | * by filling the page with BRK insns. |
| 381 | */ | 518 | */ |
| 382 | memset((void *)init_begin, 0x00, init_end - init_begin); | 519 | memset((void *)init_begin, 0x00, init_end - init_begin); |
| 520 | /* finally dump all the instructions which were cached, since the | ||
| 521 | * pages are no-longer executable */ | ||
| 383 | flush_icache_range(init_begin, init_end); | 522 | flush_icache_range(init_begin, init_end); |
| 384 | #endif | ||
| 385 | 523 | ||
| 386 | /* align __init_begin and __init_end to page size, | ||
| 387 | ignoring linker script where we might have tried to save RAM */ | ||
| 388 | init_begin = PAGE_ALIGN(init_begin); | ||
| 389 | init_end = PAGE_ALIGN(init_end); | ||
| 390 | for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { | 524 | for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { |
| 391 | ClearPageReserved(virt_to_page(addr)); | 525 | ClearPageReserved(virt_to_page(addr)); |
| 392 | init_page_count(virt_to_page(addr)); | 526 | init_page_count(virt_to_page(addr)); |
| @@ -616,114 +750,6 @@ void show_mem(unsigned int filter) | |||
| 616 | #endif | 750 | #endif |
| 617 | } | 751 | } |
| 618 | 752 | ||
| 619 | |||
| 620 | static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot) | ||
| 621 | { | ||
| 622 | pgd_t *pg_dir; | ||
| 623 | pmd_t *pmd; | ||
| 624 | pte_t *pg_table; | ||
| 625 | unsigned long end_paddr; | ||
| 626 | unsigned long start_pmd; | ||
| 627 | unsigned long start_pte; | ||
| 628 | unsigned long tmp1; | ||
| 629 | unsigned long tmp2; | ||
| 630 | unsigned long address; | ||
| 631 | unsigned long ro_start; | ||
| 632 | unsigned long ro_end; | ||
| 633 | unsigned long fv_addr; | ||
| 634 | unsigned long gw_addr; | ||
| 635 | extern const unsigned long fault_vector_20; | ||
| 636 | extern void * const linux_gateway_page; | ||
| 637 | |||
| 638 | ro_start = __pa((unsigned long)_text); | ||
| 639 | ro_end = __pa((unsigned long)&data_start); | ||
| 640 | fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; | ||
| 641 | gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; | ||
| 642 | |||
| 643 | end_paddr = start_paddr + size; | ||
| 644 | |||
| 645 | pg_dir = pgd_offset_k(start_vaddr); | ||
| 646 | |||
| 647 | #if PTRS_PER_PMD == 1 | ||
| 648 | start_pmd = 0; | ||
| 649 | #else | ||
| 650 | start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | ||
| 651 | #endif | ||
| 652 | start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); | ||
| 653 | |||
| 654 | address = start_paddr; | ||
| 655 | while (address < end_paddr) { | ||
| 656 | #if PTRS_PER_PMD == 1 | ||
| 657 | pmd = (pmd_t *)__pa(pg_dir); | ||
| 658 | #else | ||
| 659 | pmd = (pmd_t *)pgd_address(*pg_dir); | ||
| 660 | |||
| 661 | /* | ||
| 662 | * pmd is physical at this point | ||
| 663 | */ | ||
| 664 | |||
| 665 | if (!pmd) { | ||
| 666 | pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER); | ||
| 667 | pmd = (pmd_t *) __pa(pmd); | ||
| 668 | } | ||
| 669 | |||
| 670 | pgd_populate(NULL, pg_dir, __va(pmd)); | ||
| 671 | #endif | ||
| 672 | pg_dir++; | ||
| 673 | |||
| 674 | /* now change pmd to kernel virtual addresses */ | ||
| 675 | |||
| 676 | pmd = (pmd_t *)__va(pmd) + start_pmd; | ||
| 677 | for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) { | ||
| 678 | |||
| 679 | /* | ||
| 680 | * pg_table is physical at this point | ||
| 681 | */ | ||
| 682 | |||
| 683 | pg_table = (pte_t *)pmd_address(*pmd); | ||
| 684 | if (!pg_table) { | ||
| 685 | pg_table = (pte_t *) | ||
| 686 | alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE); | ||
| 687 | pg_table = (pte_t *) __pa(pg_table); | ||
| 688 | } | ||
| 689 | |||
| 690 | pmd_populate_kernel(NULL, pmd, __va(pg_table)); | ||
| 691 | |||
| 692 | /* now change pg_table to kernel virtual addresses */ | ||
| 693 | |||
| 694 | pg_table = (pte_t *) __va(pg_table) + start_pte; | ||
| 695 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) { | ||
| 696 | pte_t pte; | ||
| 697 | |||
| 698 | /* | ||
| 699 | * Map the fault vector writable so we can | ||
| 700 | * write the HPMC checksum. | ||
| 701 | */ | ||
| 702 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
| 703 | if (address >= ro_start && address < ro_end | ||
| 704 | && address != fv_addr | ||
| 705 | && address != gw_addr) | ||
| 706 | pte = __mk_pte(address, PAGE_KERNEL_RO); | ||
| 707 | else | ||
| 708 | #endif | ||
| 709 | pte = __mk_pte(address, pgprot); | ||
| 710 | |||
| 711 | if (address >= end_paddr) | ||
| 712 | pte_val(pte) = 0; | ||
| 713 | |||
| 714 | set_pte(pg_table, pte); | ||
| 715 | |||
| 716 | address += PAGE_SIZE; | ||
| 717 | } | ||
| 718 | start_pte = 0; | ||
| 719 | |||
| 720 | if (address >= end_paddr) | ||
| 721 | break; | ||
| 722 | } | ||
| 723 | start_pmd = 0; | ||
| 724 | } | ||
| 725 | } | ||
| 726 | |||
| 727 | /* | 753 | /* |
| 728 | * pagetable_init() sets up the page tables | 754 | * pagetable_init() sets up the page tables |
| 729 | * | 755 | * |
| @@ -748,14 +774,14 @@ static void __init pagetable_init(void) | |||
| 748 | size = pmem_ranges[range].pages << PAGE_SHIFT; | 774 | size = pmem_ranges[range].pages << PAGE_SHIFT; |
| 749 | 775 | ||
| 750 | map_pages((unsigned long)__va(start_paddr), start_paddr, | 776 | map_pages((unsigned long)__va(start_paddr), start_paddr, |
| 751 | size, PAGE_KERNEL); | 777 | size, PAGE_KERNEL, 0); |
| 752 | } | 778 | } |
| 753 | 779 | ||
| 754 | #ifdef CONFIG_BLK_DEV_INITRD | 780 | #ifdef CONFIG_BLK_DEV_INITRD |
| 755 | if (initrd_end && initrd_end > mem_limit) { | 781 | if (initrd_end && initrd_end > mem_limit) { |
| 756 | printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); | 782 | printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); |
| 757 | map_pages(initrd_start, __pa(initrd_start), | 783 | map_pages(initrd_start, __pa(initrd_start), |
| 758 | initrd_end - initrd_start, PAGE_KERNEL); | 784 | initrd_end - initrd_start, PAGE_KERNEL, 0); |
| 759 | } | 785 | } |
| 760 | #endif | 786 | #endif |
| 761 | 787 | ||
| @@ -780,7 +806,7 @@ static void __init gateway_init(void) | |||
| 780 | */ | 806 | */ |
| 781 | 807 | ||
| 782 | map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), | 808 | map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), |
| 783 | PAGE_SIZE, PAGE_GATEWAY); | 809 | PAGE_SIZE, PAGE_GATEWAY, 1); |
| 784 | } | 810 | } |
| 785 | 811 | ||
| 786 | #ifdef CONFIG_HPUX | 812 | #ifdef CONFIG_HPUX |
