diff options
-rw-r--r-- | arch/parisc/Kconfig | 31 | ||||
-rw-r--r-- | arch/parisc/kernel/asm-offsets.c | 3 | ||||
-rw-r--r-- | arch/parisc/kernel/entry.S | 36 | ||||
-rw-r--r-- | arch/parisc/kernel/head.S | 15 | ||||
-rw-r--r-- | arch/parisc/kernel/init_task.c | 10 | ||||
-rw-r--r-- | arch/parisc/kernel/pacache.S | 25 | ||||
-rw-r--r-- | arch/parisc/kernel/syscall.S | 10 | ||||
-rw-r--r-- | arch/parisc/kernel/vmlinux.lds.S | 54 | ||||
-rw-r--r-- | arch/parisc/mm/init.c | 28 | ||||
-rw-r--r-- | include/asm-parisc/page.h | 25 | ||||
-rw-r--r-- | include/asm-parisc/pgtable.h | 63 |
11 files changed, 198 insertions, 102 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 19f911c5dd58..910fb3afc0b5 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -138,6 +138,37 @@ config 64BIT | |||
138 | enable this option otherwise. The 64bit kernel is significantly bigger | 138 | enable this option otherwise. The 64bit kernel is significantly bigger |
139 | and slower than the 32bit one. | 139 | and slower than the 32bit one. |
140 | 140 | ||
141 | choice | ||
142 | prompt "Kernel page size" | ||
143 | default PARISC_PAGE_SIZE_4KB if !64BIT | ||
144 | default PARISC_PAGE_SIZE_4KB if 64BIT | ||
145 | # default PARISC_PAGE_SIZE_16KB if 64BIT | ||
146 | |||
147 | config PARISC_PAGE_SIZE_4KB | ||
148 | bool "4KB" | ||
149 | help | ||
150 | This lets you select the page size of the kernel. For best | ||
151 | performance, a page size of 16KB is recommended. For best | ||
152 | compatibility with 32bit applications, a page size of 4KB should be | ||
153 | selected (the vast majority of 32bit binaries work perfectly fine | ||
154 | with a larger page size). | ||
155 | |||
156 | 4KB For best 32bit compatibility | ||
157 | 16KB For best performance | ||
158 | 64KB For best performance, might give more overhead. | ||
159 | |||
160 | If you don't know what to do, choose 4KB. | ||
161 | |||
162 | config PARISC_PAGE_SIZE_16KB | ||
163 | bool "16KB (EXPERIMENTAL)" | ||
164 | depends on PA8X00 && EXPERIMENTAL | ||
165 | |||
166 | config PARISC_PAGE_SIZE_64KB | ||
167 | bool "64KB (EXPERIMENTAL)" | ||
168 | depends on PA8X00 && EXPERIMENTAL | ||
169 | |||
170 | endchoice | ||
171 | |||
141 | config SMP | 172 | config SMP |
142 | bool "Symmetric multi-processing support" | 173 | bool "Symmetric multi-processing support" |
143 | ---help--- | 174 | ---help--- |
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index e23c4e1e3a25..c11a5bc7c067 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c | |||
@@ -288,8 +288,11 @@ int main(void) | |||
288 | DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE); | 288 | DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE); |
289 | DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE); | 289 | DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE); |
290 | DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE); | 290 | DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE); |
291 | DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); | ||
291 | DEFINE(ASM_PT_INITIAL, PT_INITIAL); | 292 | DEFINE(ASM_PT_INITIAL, PT_INITIAL); |
292 | DEFINE(ASM_PAGE_SIZE, PAGE_SIZE); | 293 | DEFINE(ASM_PAGE_SIZE, PAGE_SIZE); |
294 | DEFINE(ASM_PAGE_SIZE_DIV64, PAGE_SIZE/64); | ||
295 | DEFINE(ASM_PAGE_SIZE_DIV128, PAGE_SIZE/128); | ||
293 | BLANK(); | 296 | BLANK(); |
294 | DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); | 297 | DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); |
295 | DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); | 298 | DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 7c95d7663c29..d9e53cf0372b 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
@@ -502,18 +502,20 @@ | |||
502 | * all ILP32 processes and all the kernel for machines with | 502 | * all ILP32 processes and all the kernel for machines with |
503 | * under 4GB of memory) */ | 503 | * under 4GB of memory) */ |
504 | .macro L3_ptep pgd,pte,index,va,fault | 504 | .macro L3_ptep pgd,pte,index,va,fault |
505 | #if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ | ||
505 | extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index | 506 | extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index |
506 | copy %r0,\pte | 507 | copy %r0,\pte |
507 | extrd,u,*= \va,31,32,%r0 | 508 | extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 |
508 | ldw,s \index(\pgd),\pgd | 509 | ldw,s \index(\pgd),\pgd |
509 | extrd,u,*= \va,31,32,%r0 | 510 | extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 |
510 | bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault | 511 | bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault |
511 | extrd,u,*= \va,31,32,%r0 | 512 | extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 |
512 | shld \pgd,PxD_VALUE_SHIFT,\index | 513 | shld \pgd,PxD_VALUE_SHIFT,\index |
513 | extrd,u,*= \va,31,32,%r0 | 514 | extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 |
514 | copy \index,\pgd | 515 | copy \index,\pgd |
515 | extrd,u,*<> \va,31,32,%r0 | 516 | extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 |
516 | ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd | 517 | ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd |
518 | #endif | ||
517 | L2_ptep \pgd,\pte,\index,\va,\fault | 519 | L2_ptep \pgd,\pte,\index,\va,\fault |
518 | .endm | 520 | .endm |
519 | 521 | ||
@@ -563,10 +565,18 @@ | |||
563 | extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 | 565 | extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 |
564 | depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ | 566 | depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ |
565 | 567 | ||
566 | /* Get rid of prot bits and convert to page addr for iitlbt and idtlbt */ | 568 | /* Enforce uncacheable pages. |
569 | * This should ONLY be use for MMIO on PA 2.0 machines. | ||
570 | * Memory/DMA is cache coherent on all PA2.0 machines we support | ||
571 | * (that means T-class is NOT supported) and the memory controllers | ||
572 | * on most of those machines only handles cache transactions. | ||
573 | */ | ||
574 | extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 | ||
575 | depi 1,12,1,\prot | ||
567 | 576 | ||
568 | depd %r0,63,PAGE_SHIFT,\pte | 577 | /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ |
569 | extrd,s \pte,(63-PAGE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte | 578 | extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte |
579 | depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte | ||
570 | .endm | 580 | .endm |
571 | 581 | ||
572 | /* Identical macro to make_insert_tlb above, except it | 582 | /* Identical macro to make_insert_tlb above, except it |
@@ -584,9 +594,8 @@ | |||
584 | 594 | ||
585 | /* Get rid of prot bits and convert to page addr for iitlba */ | 595 | /* Get rid of prot bits and convert to page addr for iitlba */ |
586 | 596 | ||
587 | depi 0,31,PAGE_SHIFT,\pte | 597 | depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte |
588 | extru \pte,24,25,\pte | 598 | extru \pte,24,25,\pte |
589 | |||
590 | .endm | 599 | .endm |
591 | 600 | ||
592 | /* This is for ILP32 PA2.0 only. The TLB insertion needs | 601 | /* This is for ILP32 PA2.0 only. The TLB insertion needs |
@@ -1201,10 +1210,9 @@ intr_save: | |||
1201 | */ | 1210 | */ |
1202 | 1211 | ||
1203 | /* adjust isr/ior. */ | 1212 | /* adjust isr/ior. */ |
1204 | 1213 | extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */ | |
1205 | extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */ | 1214 | depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */ |
1206 | depd %r1,31,7,%r17 /* deposit them into ior */ | 1215 | depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */ |
1207 | depdi 0,63,7,%r16 /* clear them from isr */ | ||
1208 | #endif | 1216 | #endif |
1209 | STREG %r16, PT_ISR(%r29) | 1217 | STREG %r16, PT_ISR(%r29) |
1210 | STREG %r17, PT_IOR(%r29) | 1218 | STREG %r17, PT_IOR(%r29) |
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index 0b47afc20690..3e79e62f7b0b 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S | |||
@@ -76,16 +76,16 @@ $bss_loop: | |||
76 | mtctl %r4,%cr24 /* Initialize kernel root pointer */ | 76 | mtctl %r4,%cr24 /* Initialize kernel root pointer */ |
77 | mtctl %r4,%cr25 /* Initialize user root pointer */ | 77 | mtctl %r4,%cr25 /* Initialize user root pointer */ |
78 | 78 | ||
79 | #ifdef CONFIG_64BIT | 79 | #if PT_NLEVELS == 3 |
80 | /* Set pmd in pgd */ | 80 | /* Set pmd in pgd */ |
81 | load32 PA(pmd0),%r5 | 81 | load32 PA(pmd0),%r5 |
82 | shrd %r5,PxD_VALUE_SHIFT,%r3 | 82 | shrd %r5,PxD_VALUE_SHIFT,%r3 |
83 | ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3 | 83 | ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3 |
84 | stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4) | 84 | stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4) |
85 | ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4 | 85 | ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4 |
86 | #else | 86 | #else |
87 | /* 2-level page table, so pmd == pgd */ | 87 | /* 2-level page table, so pmd == pgd */ |
88 | ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4 | 88 | ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4 |
89 | #endif | 89 | #endif |
90 | 90 | ||
91 | /* Fill in pmd with enough pte directories */ | 91 | /* Fill in pmd with enough pte directories */ |
@@ -99,7 +99,7 @@ $bss_loop: | |||
99 | stw %r3,0(%r4) | 99 | stw %r3,0(%r4) |
100 | ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3 | 100 | ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3 |
101 | addib,> -1,%r1,1b | 101 | addib,> -1,%r1,1b |
102 | #ifdef CONFIG_64BIT | 102 | #if PT_NLEVELS == 3 |
103 | ldo ASM_PMD_ENTRY_SIZE(%r4),%r4 | 103 | ldo ASM_PMD_ENTRY_SIZE(%r4),%r4 |
104 | #else | 104 | #else |
105 | ldo ASM_PGD_ENTRY_SIZE(%r4),%r4 | 105 | ldo ASM_PGD_ENTRY_SIZE(%r4),%r4 |
@@ -107,13 +107,14 @@ $bss_loop: | |||
107 | 107 | ||
108 | 108 | ||
109 | /* Now initialize the PTEs themselves */ | 109 | /* Now initialize the PTEs themselves */ |
110 | ldo _PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */ | 110 | ldo 0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */ |
111 | ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ | ||
111 | load32 PA(pg0),%r1 | 112 | load32 PA(pg0),%r1 |
112 | 113 | ||
113 | $pgt_fill_loop: | 114 | $pgt_fill_loop: |
114 | STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1) | 115 | STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1) |
115 | ldo ASM_PAGE_SIZE(%r3),%r3 | 116 | ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */ |
116 | bb,>= %r3,31-KERNEL_INITIAL_ORDER,$pgt_fill_loop | 117 | addib,> -1,%r11,$pgt_fill_loop |
117 | nop | 118 | nop |
118 | 119 | ||
119 | /* Load the return address...er...crash 'n burn */ | 120 | /* Load the return address...er...crash 'n burn */ |
diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c index 7e898fd64415..8384bf9cecd2 100644 --- a/arch/parisc/kernel/init_task.c +++ b/arch/parisc/kernel/init_task.c | |||
@@ -53,17 +53,17 @@ union thread_union init_thread_union | |||
53 | __attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) = | 53 | __attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) = |
54 | { INIT_THREAD_INFO(init_task) }; | 54 | { INIT_THREAD_INFO(init_task) }; |
55 | 55 | ||
56 | #ifdef __LP64__ | 56 | #if PT_NLEVELS == 3 |
57 | /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout | 57 | /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout |
58 | * with the first pmd adjacent to the pgd and below it. gcc doesn't actually | 58 | * with the first pmd adjacent to the pgd and below it. gcc doesn't actually |
59 | * guarantee that global objects will be laid out in memory in the same order | 59 | * guarantee that global objects will be laid out in memory in the same order |
60 | * as the order of declaration, so put these in different sections and use | 60 | * as the order of declaration, so put these in different sections and use |
61 | * the linker script to order them. */ | 61 | * the linker script to order them. */ |
62 | pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pmd"))) = { {0}, }; | 62 | pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data.vm0.pmd"), aligned(PAGE_SIZE))); |
63 | |||
64 | #endif | 63 | #endif |
65 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pgd"))) = { {0}, }; | 64 | |
66 | pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pte"))) = { {0}, }; | 65 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data.vm0.pgd"), aligned(PAGE_SIZE))); |
66 | pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data.vm0.pte"), aligned(PAGE_SIZE))); | ||
67 | 67 | ||
68 | /* | 68 | /* |
69 | * Initial task structure. | 69 | * Initial task structure. |
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 7a4f07e8d3c3..f600556414d1 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S | |||
@@ -65,7 +65,7 @@ flush_tlb_all_local: | |||
65 | */ | 65 | */ |
66 | 66 | ||
67 | /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */ | 67 | /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */ |
68 | rsm PSW_SM_I, %r19 /* save I-bit state */ | 68 | rsm PSW_SM_I, %r19 /* save I-bit state */ |
69 | load32 PA(1f), %r1 | 69 | load32 PA(1f), %r1 |
70 | nop | 70 | nop |
71 | nop | 71 | nop |
@@ -84,8 +84,7 @@ flush_tlb_all_local: | |||
84 | rfi | 84 | rfi |
85 | nop | 85 | nop |
86 | 86 | ||
87 | 1: ldil L%PA(cache_info), %r1 | 87 | 1: load32 PA(cache_info), %r1 |
88 | ldo R%PA(cache_info)(%r1), %r1 | ||
89 | 88 | ||
90 | /* Flush Instruction Tlb */ | 89 | /* Flush Instruction Tlb */ |
91 | 90 | ||
@@ -212,8 +211,7 @@ flush_instruction_cache_local: | |||
212 | .entry | 211 | .entry |
213 | 212 | ||
214 | mtsp %r0, %sr1 | 213 | mtsp %r0, %sr1 |
215 | ldil L%cache_info, %r1 | 214 | load32 cache_info, %r1 |
216 | ldo R%cache_info(%r1), %r1 | ||
217 | 215 | ||
218 | /* Flush Instruction Cache */ | 216 | /* Flush Instruction Cache */ |
219 | 217 | ||
@@ -254,8 +252,7 @@ flush_data_cache_local: | |||
254 | .entry | 252 | .entry |
255 | 253 | ||
256 | mtsp %r0, %sr1 | 254 | mtsp %r0, %sr1 |
257 | ldil L%cache_info, %r1 | 255 | load32 cache_info, %r1 |
258 | ldo R%cache_info(%r1), %r1 | ||
259 | 256 | ||
260 | /* Flush Data Cache */ | 257 | /* Flush Data Cache */ |
261 | 258 | ||
@@ -303,7 +300,8 @@ copy_user_page_asm: | |||
303 | */ | 300 | */ |
304 | 301 | ||
305 | ldd 0(%r25), %r19 | 302 | ldd 0(%r25), %r19 |
306 | ldi 32, %r1 /* PAGE_SIZE/128 == 32 */ | 303 | ldi ASM_PAGE_SIZE_DIV128, %r1 |
304 | |||
307 | ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */ | 305 | ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */ |
308 | ldw 128(%r25), %r0 /* prefetch 2 */ | 306 | ldw 128(%r25), %r0 /* prefetch 2 */ |
309 | 307 | ||
@@ -368,7 +366,7 @@ copy_user_page_asm: | |||
368 | * use ldd/std on a 32 bit kernel. | 366 | * use ldd/std on a 32 bit kernel. |
369 | */ | 367 | */ |
370 | ldw 0(%r25), %r19 | 368 | ldw 0(%r25), %r19 |
371 | ldi 64, %r1 /* PAGE_SIZE/64 == 64 */ | 369 | ldi ASM_PAGE_SIZE_DIV64, %r1 |
372 | 370 | ||
373 | 1: | 371 | 1: |
374 | ldw 4(%r25), %r20 | 372 | ldw 4(%r25), %r20 |
@@ -461,6 +459,7 @@ copy_user_page_asm: | |||
461 | sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */ | 459 | sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */ |
462 | 460 | ||
463 | ldil L%(TMPALIAS_MAP_START), %r28 | 461 | ldil L%(TMPALIAS_MAP_START), %r28 |
462 | /* FIXME for different page sizes != 4k */ | ||
464 | #ifdef CONFIG_64BIT | 463 | #ifdef CONFIG_64BIT |
465 | extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */ | 464 | extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */ |
466 | extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */ | 465 | extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */ |
@@ -551,6 +550,7 @@ __clear_user_page_asm: | |||
551 | #ifdef CONFIG_64BIT | 550 | #ifdef CONFIG_64BIT |
552 | #if (TMPALIAS_MAP_START >= 0x80000000) | 551 | #if (TMPALIAS_MAP_START >= 0x80000000) |
553 | depdi 0, 31,32, %r28 /* clear any sign extension */ | 552 | depdi 0, 31,32, %r28 /* clear any sign extension */ |
553 | /* FIXME: page size dependend */ | ||
554 | #endif | 554 | #endif |
555 | extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ | 555 | extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ |
556 | depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ | 556 | depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ |
@@ -566,10 +566,10 @@ __clear_user_page_asm: | |||
566 | pdtlb 0(%r28) | 566 | pdtlb 0(%r28) |
567 | 567 | ||
568 | #ifdef CONFIG_64BIT | 568 | #ifdef CONFIG_64BIT |
569 | ldi 32, %r1 /* PAGE_SIZE/128 == 32 */ | 569 | ldi ASM_PAGE_SIZE_DIV128, %r1 |
570 | 570 | ||
571 | /* PREFETCH (Write) has not (yet) been proven to help here */ | 571 | /* PREFETCH (Write) has not (yet) been proven to help here */ |
572 | /* #define PREFETCHW_OP ldd 256(%0), %r0 */ | 572 | /* #define PREFETCHW_OP ldd 256(%0), %r0 */ |
573 | 573 | ||
574 | 1: std %r0, 0(%r28) | 574 | 1: std %r0, 0(%r28) |
575 | std %r0, 8(%r28) | 575 | std %r0, 8(%r28) |
@@ -591,8 +591,7 @@ __clear_user_page_asm: | |||
591 | ldo 128(%r28), %r28 | 591 | ldo 128(%r28), %r28 |
592 | 592 | ||
593 | #else /* ! CONFIG_64BIT */ | 593 | #else /* ! CONFIG_64BIT */ |
594 | 594 | ldi ASM_PAGE_SIZE_DIV64, %r1 | |
595 | ldi 64, %r1 /* PAGE_SIZE/64 == 64 */ | ||
596 | 595 | ||
597 | 1: | 596 | 1: |
598 | stw %r0, 0(%r28) | 597 | stw %r0, 0(%r28) |
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index af88afef41bd..479d9a017cd1 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S | |||
@@ -55,7 +55,7 @@ | |||
55 | * pointers. | 55 | * pointers. |
56 | */ | 56 | */ |
57 | 57 | ||
58 | .align 4096 | 58 | .align ASM_PAGE_SIZE |
59 | linux_gateway_page: | 59 | linux_gateway_page: |
60 | 60 | ||
61 | /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */ | 61 | /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */ |
@@ -632,7 +632,7 @@ cas_action: | |||
632 | end_compare_and_swap: | 632 | end_compare_and_swap: |
633 | 633 | ||
634 | /* Make sure nothing else is placed on this page */ | 634 | /* Make sure nothing else is placed on this page */ |
635 | .align 4096 | 635 | .align ASM_PAGE_SIZE |
636 | .export end_linux_gateway_page | 636 | .export end_linux_gateway_page |
637 | end_linux_gateway_page: | 637 | end_linux_gateway_page: |
638 | 638 | ||
@@ -652,7 +652,7 @@ end_linux_gateway_page: | |||
652 | 652 | ||
653 | .section .rodata,"a" | 653 | .section .rodata,"a" |
654 | 654 | ||
655 | .align 4096 | 655 | .align ASM_PAGE_SIZE |
656 | /* Light-weight-syscall table */ | 656 | /* Light-weight-syscall table */ |
657 | /* Start of lws table. */ | 657 | /* Start of lws table. */ |
658 | .export lws_table | 658 | .export lws_table |
@@ -662,14 +662,14 @@ lws_table: | |||
662 | LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */ | 662 | LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */ |
663 | /* End of lws table */ | 663 | /* End of lws table */ |
664 | 664 | ||
665 | .align 4096 | 665 | .align ASM_PAGE_SIZE |
666 | .export sys_call_table | 666 | .export sys_call_table |
667 | .Lsys_call_table: | 667 | .Lsys_call_table: |
668 | sys_call_table: | 668 | sys_call_table: |
669 | #include "syscall_table.S" | 669 | #include "syscall_table.S" |
670 | 670 | ||
671 | #ifdef CONFIG_64BIT | 671 | #ifdef CONFIG_64BIT |
672 | .align 4096 | 672 | .align ASM_PAGE_SIZE |
673 | .export sys_call_table64 | 673 | .export sys_call_table64 |
674 | .Lsys_call_table64: | 674 | .Lsys_call_table64: |
675 | sys_call_table64: | 675 | sys_call_table64: |
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 6d6436a6b624..94dcc03a28ed 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S | |||
@@ -6,6 +6,7 @@ | |||
6 | * Copyright (C) 2000 Michael Ang <mang with subcarrier.org> | 6 | * Copyright (C) 2000 Michael Ang <mang with subcarrier.org> |
7 | * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> | 7 | * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> |
8 | * Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org> | 8 | * Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org> |
9 | * Copyright (C) 2006 Helge Deller <deller@gmx.de> | ||
9 | * | 10 | * |
10 | * | 11 | * |
11 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
@@ -27,6 +28,7 @@ | |||
27 | /* needed for the processor specific cache alignment size */ | 28 | /* needed for the processor specific cache alignment size */ |
28 | #include <asm/cache.h> | 29 | #include <asm/cache.h> |
29 | #include <asm/page.h> | 30 | #include <asm/page.h> |
31 | #include <asm/asm-offsets.h> | ||
30 | 32 | ||
31 | /* ld script to make hppa Linux kernel */ | 33 | /* ld script to make hppa Linux kernel */ |
32 | #ifndef CONFIG_64BIT | 34 | #ifndef CONFIG_64BIT |
@@ -68,7 +70,7 @@ SECTIONS | |||
68 | RODATA | 70 | RODATA |
69 | 71 | ||
70 | /* writeable */ | 72 | /* writeable */ |
71 | . = ALIGN(4096); /* Make sure this is page aligned so | 73 | . = ALIGN(ASM_PAGE_SIZE); /* Make sure this is page aligned so |
72 | that we can properly leave these | 74 | that we can properly leave these |
73 | as writable */ | 75 | as writable */ |
74 | data_start = .; | 76 | data_start = .; |
@@ -81,23 +83,17 @@ SECTIONS | |||
81 | __start___unwind = .; /* unwind info */ | 83 | __start___unwind = .; /* unwind info */ |
82 | .PARISC.unwind : { *(.PARISC.unwind) } | 84 | .PARISC.unwind : { *(.PARISC.unwind) } |
83 | __stop___unwind = .; | 85 | __stop___unwind = .; |
84 | 86 | ||
87 | /* rarely changed data like cpu maps */ | ||
88 | . = ALIGN(16); | ||
89 | .data.read_mostly : { *(.data.read_mostly) } | ||
90 | |||
91 | . = ALIGN(L1_CACHE_BYTES); | ||
85 | .data : { /* Data */ | 92 | .data : { /* Data */ |
86 | *(.data) | 93 | *(.data) |
87 | *(.data.vm0.pmd) | ||
88 | *(.data.vm0.pgd) | ||
89 | *(.data.vm0.pte) | ||
90 | CONSTRUCTORS | 94 | CONSTRUCTORS |
91 | } | 95 | } |
92 | 96 | ||
93 | . = ALIGN(4096); | ||
94 | /* nosave data is really only used for software suspend...it's here | ||
95 | * just in case we ever implement it */ | ||
96 | __nosave_begin = .; | ||
97 | .data_nosave : { *(.data.nosave) } | ||
98 | . = ALIGN(4096); | ||
99 | __nosave_end = .; | ||
100 | |||
101 | . = ALIGN(L1_CACHE_BYTES); | 97 | . = ALIGN(L1_CACHE_BYTES); |
102 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } | 98 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } |
103 | 99 | ||
@@ -105,12 +101,29 @@ SECTIONS | |||
105 | . = ALIGN(16); | 101 | . = ALIGN(16); |
106 | .data.lock_aligned : { *(.data.lock_aligned) } | 102 | .data.lock_aligned : { *(.data.lock_aligned) } |
107 | 103 | ||
108 | /* rarely changed data like cpu maps */ | 104 | . = ALIGN(ASM_PAGE_SIZE); |
109 | . = ALIGN(16); | 105 | /* nosave data is really only used for software suspend...it's here |
110 | .data.read_mostly : { *(.data.read_mostly) } | 106 | * just in case we ever implement it */ |
107 | __nosave_begin = .; | ||
108 | .data_nosave : { *(.data.nosave) } | ||
109 | . = ALIGN(ASM_PAGE_SIZE); | ||
110 | __nosave_end = .; | ||
111 | 111 | ||
112 | _edata = .; /* End of data section */ | 112 | _edata = .; /* End of data section */ |
113 | 113 | ||
114 | __bss_start = .; /* BSS */ | ||
115 | /* page table entries need to be PAGE_SIZE aligned */ | ||
116 | . = ALIGN(ASM_PAGE_SIZE); | ||
117 | .data.vmpages : { | ||
118 | *(.data.vm0.pmd) | ||
119 | *(.data.vm0.pgd) | ||
120 | *(.data.vm0.pte) | ||
121 | } | ||
122 | .bss : { *(.bss) *(COMMON) } | ||
123 | __bss_stop = .; | ||
124 | |||
125 | |||
126 | /* assembler code expects init_task to be 16k aligned */ | ||
114 | . = ALIGN(16384); /* init_task */ | 127 | . = ALIGN(16384); /* init_task */ |
115 | .data.init_task : { *(.data.init_task) } | 128 | .data.init_task : { *(.data.init_task) } |
116 | 129 | ||
@@ -126,6 +139,7 @@ SECTIONS | |||
126 | .dlt : { *(.dlt) } | 139 | .dlt : { *(.dlt) } |
127 | #endif | 140 | #endif |
128 | 141 | ||
142 | /* reserve space for interrupt stack by aligning __init* to 16k */ | ||
129 | . = ALIGN(16384); | 143 | . = ALIGN(16384); |
130 | __init_begin = .; | 144 | __init_begin = .; |
131 | .init.text : { | 145 | .init.text : { |
@@ -166,7 +180,7 @@ SECTIONS | |||
166 | from .altinstructions and .eh_frame */ | 180 | from .altinstructions and .eh_frame */ |
167 | .exit.text : { *(.exit.text) } | 181 | .exit.text : { *(.exit.text) } |
168 | .exit.data : { *(.exit.data) } | 182 | .exit.data : { *(.exit.data) } |
169 | . = ALIGN(4096); | 183 | . = ALIGN(ASM_PAGE_SIZE); |
170 | __initramfs_start = .; | 184 | __initramfs_start = .; |
171 | .init.ramfs : { *(.init.ramfs) } | 185 | .init.ramfs : { *(.init.ramfs) } |
172 | __initramfs_end = .; | 186 | __initramfs_end = .; |
@@ -174,14 +188,10 @@ SECTIONS | |||
174 | __per_cpu_start = .; | 188 | __per_cpu_start = .; |
175 | .data.percpu : { *(.data.percpu) } | 189 | .data.percpu : { *(.data.percpu) } |
176 | __per_cpu_end = .; | 190 | __per_cpu_end = .; |
177 | . = ALIGN(4096); | 191 | . = ALIGN(ASM_PAGE_SIZE); |
178 | __init_end = .; | 192 | __init_end = .; |
179 | /* freed after init ends here */ | 193 | /* freed after init ends here */ |
180 | 194 | ||
181 | __bss_start = .; /* BSS */ | ||
182 | .bss : { *(.bss) *(COMMON) } | ||
183 | __bss_stop = .; | ||
184 | |||
185 | _end = . ; | 195 | _end = . ; |
186 | 196 | ||
187 | /* Sections to be discarded */ | 197 | /* Sections to be discarded */ |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 3796be67cd53..631712562656 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * changed by Philipp Rumpf | 6 | * changed by Philipp Rumpf |
7 | * Copyright 1999 Philipp Rumpf (prumpf@tux.org) | 7 | * Copyright 1999 Philipp Rumpf (prumpf@tux.org) |
8 | * Copyright 2004 Randolph Chung (tausq@debian.org) | 8 | * Copyright 2004 Randolph Chung (tausq@debian.org) |
9 | * Copyright 2006 Helge Deller (deller@gmx.de) | ||
9 | * | 10 | * |
10 | */ | 11 | */ |
11 | 12 | ||
@@ -371,8 +372,8 @@ static void __init setup_bootmem(void) | |||
371 | 372 | ||
372 | void free_initmem(void) | 373 | void free_initmem(void) |
373 | { | 374 | { |
374 | unsigned long addr; | 375 | unsigned long addr, init_begin, init_end; |
375 | 376 | ||
376 | printk(KERN_INFO "Freeing unused kernel memory: "); | 377 | printk(KERN_INFO "Freeing unused kernel memory: "); |
377 | 378 | ||
378 | #ifdef CONFIG_DEBUG_KERNEL | 379 | #ifdef CONFIG_DEBUG_KERNEL |
@@ -395,8 +396,11 @@ void free_initmem(void) | |||
395 | local_irq_enable(); | 396 | local_irq_enable(); |
396 | #endif | 397 | #endif |
397 | 398 | ||
398 | addr = (unsigned long)(&__init_begin); | 399 | /* align __init_begin and __init_end to page size, |
399 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | 400 | ignoring linker script where we might have tried to save RAM */ |
401 | init_begin = PAGE_ALIGN((unsigned long)(&__init_begin)); | ||
402 | init_end = PAGE_ALIGN((unsigned long)(&__init_end)); | ||
403 | for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { | ||
400 | ClearPageReserved(virt_to_page(addr)); | 404 | ClearPageReserved(virt_to_page(addr)); |
401 | init_page_count(virt_to_page(addr)); | 405 | init_page_count(virt_to_page(addr)); |
402 | free_page(addr); | 406 | free_page(addr); |
@@ -407,7 +411,7 @@ void free_initmem(void) | |||
407 | /* set up a new led state on systems shipped LED State panel */ | 411 | /* set up a new led state on systems shipped LED State panel */ |
408 | pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); | 412 | pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); |
409 | 413 | ||
410 | printk("%luk freed\n", (unsigned long)(&__init_end - &__init_begin) >> 10); | 414 | printk("%luk freed\n", (init_end - init_begin) >> 10); |
411 | } | 415 | } |
412 | 416 | ||
413 | 417 | ||
@@ -639,11 +643,13 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd | |||
639 | * Map the fault vector writable so we can | 643 | * Map the fault vector writable so we can |
640 | * write the HPMC checksum. | 644 | * write the HPMC checksum. |
641 | */ | 645 | */ |
646 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
642 | if (address >= ro_start && address < ro_end | 647 | if (address >= ro_start && address < ro_end |
643 | && address != fv_addr | 648 | && address != fv_addr |
644 | && address != gw_addr) | 649 | && address != gw_addr) |
645 | pte = __mk_pte(address, PAGE_KERNEL_RO); | 650 | pte = __mk_pte(address, PAGE_KERNEL_RO); |
646 | else | 651 | else |
652 | #endif | ||
647 | pte = __mk_pte(address, pgprot); | 653 | pte = __mk_pte(address, pgprot); |
648 | 654 | ||
649 | if (address >= end_paddr) | 655 | if (address >= end_paddr) |
@@ -874,8 +880,7 @@ unsigned long alloc_sid(void) | |||
874 | flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ | 880 | flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ |
875 | spin_lock(&sid_lock); | 881 | spin_lock(&sid_lock); |
876 | } | 882 | } |
877 | if (free_space_ids == 0) | 883 | BUG_ON(free_space_ids == 0); |
878 | BUG(); | ||
879 | } | 884 | } |
880 | 885 | ||
881 | free_space_ids--; | 886 | free_space_ids--; |
@@ -899,8 +904,7 @@ void free_sid(unsigned long spaceid) | |||
899 | 904 | ||
900 | spin_lock(&sid_lock); | 905 | spin_lock(&sid_lock); |
901 | 906 | ||
902 | if (*dirty_space_offset & (1L << index)) | 907 | BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ |
903 | BUG(); /* attempt to free space id twice */ | ||
904 | 908 | ||
905 | *dirty_space_offset |= (1L << index); | 909 | *dirty_space_offset |= (1L << index); |
906 | dirty_space_ids++; | 910 | dirty_space_ids++; |
@@ -975,7 +979,7 @@ static void recycle_sids(void) | |||
975 | 979 | ||
976 | static unsigned long recycle_ndirty; | 980 | static unsigned long recycle_ndirty; |
977 | static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; | 981 | static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; |
978 | static unsigned int recycle_inuse = 0; | 982 | static unsigned int recycle_inuse; |
979 | 983 | ||
980 | void flush_tlb_all(void) | 984 | void flush_tlb_all(void) |
981 | { | 985 | { |
@@ -984,9 +988,7 @@ void flush_tlb_all(void) | |||
984 | do_recycle = 0; | 988 | do_recycle = 0; |
985 | spin_lock(&sid_lock); | 989 | spin_lock(&sid_lock); |
986 | if (dirty_space_ids > RECYCLE_THRESHOLD) { | 990 | if (dirty_space_ids > RECYCLE_THRESHOLD) { |
987 | if (recycle_inuse) { | 991 | BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */ |
988 | BUG(); /* FIXME: Use a semaphore/wait queue here */ | ||
989 | } | ||
990 | get_dirty_sids(&recycle_ndirty,recycle_dirty_array); | 992 | get_dirty_sids(&recycle_ndirty,recycle_dirty_array); |
991 | recycle_inuse++; | 993 | recycle_inuse++; |
992 | do_recycle++; | 994 | do_recycle++; |
diff --git a/include/asm-parisc/page.h b/include/asm-parisc/page.h index 45e02aa5bf4b..c0dd461fb8f1 100644 --- a/include/asm-parisc/page.h +++ b/include/asm-parisc/page.h | |||
@@ -1,13 +1,30 @@ | |||
1 | #ifndef _PARISC_PAGE_H | 1 | #ifndef _PARISC_PAGE_H |
2 | #define _PARISC_PAGE_H | 2 | #define _PARISC_PAGE_H |
3 | 3 | ||
4 | /* PAGE_SHIFT determines the page size */ | 4 | #if !defined(__KERNEL__) |
5 | #define PAGE_SHIFT 12 | 5 | /* this is for userspace applications (4k page size) */ |
6 | #define PAGE_SIZE (1UL << PAGE_SHIFT) | 6 | # define PAGE_SHIFT 12 /* 4k */ |
7 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 7 | # define PAGE_SIZE (1UL << PAGE_SHIFT) |
8 | # define PAGE_MASK (~(PAGE_SIZE-1)) | ||
9 | #endif | ||
10 | |||
8 | 11 | ||
9 | #ifdef __KERNEL__ | 12 | #ifdef __KERNEL__ |
10 | #include <linux/config.h> | 13 | #include <linux/config.h> |
14 | |||
15 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
16 | # define PAGE_SHIFT 12 /* 4k */ | ||
17 | #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) | ||
18 | # define PAGE_SHIFT 14 /* 16k */ | ||
19 | #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) | ||
20 | # define PAGE_SHIFT 16 /* 64k */ | ||
21 | #else | ||
22 | # error "unknown default kernel page size" | ||
23 | #endif | ||
24 | #define PAGE_SIZE (1UL << PAGE_SHIFT) | ||
25 | #define PAGE_MASK (~(PAGE_SIZE-1)) | ||
26 | |||
27 | |||
11 | #ifndef __ASSEMBLY__ | 28 | #ifndef __ASSEMBLY__ |
12 | 29 | ||
13 | #include <asm/types.h> | 30 | #include <asm/types.h> |
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h index 4e34c6b44059..aec089eb8b85 100644 --- a/include/asm-parisc/pgtable.h +++ b/include/asm-parisc/pgtable.h | |||
@@ -59,16 +59,15 @@ | |||
59 | #define ISTACK_SIZE 32768 /* Interrupt Stack Size */ | 59 | #define ISTACK_SIZE 32768 /* Interrupt Stack Size */ |
60 | #define ISTACK_ORDER 3 | 60 | #define ISTACK_ORDER 3 |
61 | 61 | ||
62 | /* This is the size of the initially mapped kernel memory (i.e. currently | 62 | /* This is the size of the initially mapped kernel memory */ |
63 | * 0 to 1<<23 == 8MB */ | ||
64 | #ifdef CONFIG_64BIT | 63 | #ifdef CONFIG_64BIT |
65 | #define KERNEL_INITIAL_ORDER 24 | 64 | #define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ |
66 | #else | 65 | #else |
67 | #define KERNEL_INITIAL_ORDER 23 | 66 | #define KERNEL_INITIAL_ORDER 23 /* 0 to 1<<23 = 8MB */ |
68 | #endif | 67 | #endif |
69 | #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) | 68 | #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) |
70 | 69 | ||
71 | #ifdef CONFIG_64BIT | 70 | #if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) |
72 | #define PT_NLEVELS 3 | 71 | #define PT_NLEVELS 3 |
73 | #define PGD_ORDER 1 /* Number of pages per pgd */ | 72 | #define PGD_ORDER 1 /* Number of pages per pgd */ |
74 | #define PMD_ORDER 1 /* Number of pages per pmd */ | 73 | #define PMD_ORDER 1 /* Number of pages per pmd */ |
@@ -111,11 +110,15 @@ | |||
111 | #define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD) | 110 | #define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD) |
112 | #define MAX_ADDRESS (1UL << MAX_ADDRBITS) | 111 | #define MAX_ADDRESS (1UL << MAX_ADDRBITS) |
113 | 112 | ||
114 | #define SPACEID_SHIFT (MAX_ADDRBITS - 32) | 113 | #define SPACEID_SHIFT (MAX_ADDRBITS - 32) |
115 | 114 | ||
116 | /* This calculates the number of initial pages we need for the initial | 115 | /* This calculates the number of initial pages we need for the initial |
117 | * page tables */ | 116 | * page tables */ |
118 | #define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) | 117 | #if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT) |
118 | # define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) | ||
119 | #else | ||
120 | # define PT_INITIAL (1) /* all initial PTEs fit into one page */ | ||
121 | #endif | ||
119 | 122 | ||
120 | /* | 123 | /* |
121 | * pgd entries used up by user/kernel: | 124 | * pgd entries used up by user/kernel: |
@@ -160,6 +163,10 @@ extern void *vmalloc_start; | |||
160 | * to zero */ | 163 | * to zero */ |
161 | #define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT) | 164 | #define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT) |
162 | 165 | ||
166 | /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */ | ||
167 | #define PFN_PTE_SHIFT 12 | ||
168 | |||
169 | |||
163 | /* this is how many bits may be used by the file functions */ | 170 | /* this is how many bits may be used by the file functions */ |
164 | #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT) | 171 | #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT) |
165 | 172 | ||
@@ -188,7 +195,8 @@ extern void *vmalloc_start; | |||
188 | /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds | 195 | /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds |
189 | * are page-aligned, we don't care about the PAGE_OFFSET bits, except | 196 | * are page-aligned, we don't care about the PAGE_OFFSET bits, except |
190 | * for a few meta-information bits, so we shift the address to be | 197 | * for a few meta-information bits, so we shift the address to be |
191 | * able to effectively address 40-bits of physical address space. */ | 198 | * able to effectively address 40/42/44-bits of physical address space |
199 | * depending on 4k/16k/64k PAGE_SIZE */ | ||
192 | #define _PxD_PRESENT_BIT 31 | 200 | #define _PxD_PRESENT_BIT 31 |
193 | #define _PxD_ATTACHED_BIT 30 | 201 | #define _PxD_ATTACHED_BIT 30 |
194 | #define _PxD_VALID_BIT 29 | 202 | #define _PxD_VALID_BIT 29 |
@@ -198,7 +206,7 @@ extern void *vmalloc_start; | |||
198 | #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) | 206 | #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) |
199 | #define PxD_FLAG_MASK (0xf) | 207 | #define PxD_FLAG_MASK (0xf) |
200 | #define PxD_FLAG_SHIFT (4) | 208 | #define PxD_FLAG_SHIFT (4) |
201 | #define PxD_VALUE_SHIFT (8) | 209 | #define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ |
202 | 210 | ||
203 | #ifndef __ASSEMBLY__ | 211 | #ifndef __ASSEMBLY__ |
204 | 212 | ||
@@ -246,6 +254,7 @@ extern void *vmalloc_start; | |||
246 | #define __S110 PAGE_RWX | 254 | #define __S110 PAGE_RWX |
247 | #define __S111 PAGE_RWX | 255 | #define __S111 PAGE_RWX |
248 | 256 | ||
257 | |||
249 | extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */ | 258 | extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */ |
250 | 259 | ||
251 | /* initial page tables for 0-8MB for kernel */ | 260 | /* initial page tables for 0-8MB for kernel */ |
@@ -272,7 +281,7 @@ extern unsigned long *empty_zero_page; | |||
272 | #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) | 281 | #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) |
273 | #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) | 282 | #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) |
274 | 283 | ||
275 | #ifdef CONFIG_64BIT | 284 | #if PT_NLEVELS == 3 |
276 | /* The first entry of the permanent pmd is not there if it contains | 285 | /* The first entry of the permanent pmd is not there if it contains |
277 | * the gateway marker */ | 286 | * the gateway marker */ |
278 | #define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) | 287 | #define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) |
@@ -282,7 +291,7 @@ extern unsigned long *empty_zero_page; | |||
282 | #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) | 291 | #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) |
283 | #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) | 292 | #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) |
284 | static inline void pmd_clear(pmd_t *pmd) { | 293 | static inline void pmd_clear(pmd_t *pmd) { |
285 | #ifdef CONFIG_64BIT | 294 | #if PT_NLEVELS == 3 |
286 | if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) | 295 | if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) |
287 | /* This is the entry pointing to the permanent pmd | 296 | /* This is the entry pointing to the permanent pmd |
288 | * attached to the pgd; cannot clear it */ | 297 | * attached to the pgd; cannot clear it */ |
@@ -303,7 +312,7 @@ static inline void pmd_clear(pmd_t *pmd) { | |||
303 | #define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID)) | 312 | #define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID)) |
304 | #define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT) | 313 | #define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT) |
305 | static inline void pgd_clear(pgd_t *pgd) { | 314 | static inline void pgd_clear(pgd_t *pgd) { |
306 | #ifdef CONFIG_64BIT | 315 | #if PT_NLEVELS == 3 |
307 | if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) | 316 | if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) |
308 | /* This is the permanent pmd attached to the pgd; cannot | 317 | /* This is the permanent pmd attached to the pgd; cannot |
309 | * free it */ | 318 | * free it */ |
@@ -351,7 +360,7 @@ extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return | |||
351 | ({ \ | 360 | ({ \ |
352 | pte_t __pte; \ | 361 | pte_t __pte; \ |
353 | \ | 362 | \ |
354 | pte_val(__pte) = ((addr)+pgprot_val(pgprot)); \ | 363 | pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot)); \ |
355 | \ | 364 | \ |
356 | __pte; \ | 365 | __pte; \ |
357 | }) | 366 | }) |
@@ -361,20 +370,16 @@ extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return | |||
361 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | 370 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) |
362 | { | 371 | { |
363 | pte_t pte; | 372 | pte_t pte; |
364 | pte_val(pte) = (pfn << PAGE_SHIFT) | pgprot_val(pgprot); | 373 | pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot); |
365 | return pte; | 374 | return pte; |
366 | } | 375 | } |
367 | 376 | ||
368 | /* This takes a physical page address that is used by the remapping functions */ | ||
369 | #define mk_pte_phys(physpage, pgprot) \ | ||
370 | ({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; }) | ||
371 | |||
372 | extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 377 | extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
373 | { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } | 378 | { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } |
374 | 379 | ||
375 | /* Permanent address of a page. On parisc we don't have highmem. */ | 380 | /* Permanent address of a page. On parisc we don't have highmem. */ |
376 | 381 | ||
377 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) | 382 | #define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT) |
378 | 383 | ||
379 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) | 384 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) |
380 | 385 | ||
@@ -499,6 +504,26 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
499 | 504 | ||
500 | #endif /* !__ASSEMBLY__ */ | 505 | #endif /* !__ASSEMBLY__ */ |
501 | 506 | ||
507 | |||
508 | /* TLB page size encoding - see table 3-1 in parisc20.pdf */ | ||
509 | #define _PAGE_SIZE_ENCODING_4K 0 | ||
510 | #define _PAGE_SIZE_ENCODING_16K 1 | ||
511 | #define _PAGE_SIZE_ENCODING_64K 2 | ||
512 | #define _PAGE_SIZE_ENCODING_256K 3 | ||
513 | #define _PAGE_SIZE_ENCODING_1M 4 | ||
514 | #define _PAGE_SIZE_ENCODING_4M 5 | ||
515 | #define _PAGE_SIZE_ENCODING_16M 6 | ||
516 | #define _PAGE_SIZE_ENCODING_64M 7 | ||
517 | |||
518 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
519 | # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K | ||
520 | #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) | ||
521 | # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K | ||
522 | #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) | ||
523 | # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K | ||
524 | #endif | ||
525 | |||
526 | |||
502 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 527 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
503 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 528 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
504 | 529 | ||