aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r--arch/parisc/kernel/asm-offsets.c8
-rw-r--r--arch/parisc/kernel/entry.S56
-rw-r--r--arch/parisc/kernel/head.S4
-rw-r--r--arch/parisc/kernel/setup.c14
-rw-r--r--arch/parisc/kernel/syscall.S4
-rw-r--r--arch/parisc/kernel/traps.c35
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S9
7 files changed, 80 insertions, 50 deletions
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 59001cea13f9..d2f62570a7b1 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -290,6 +290,14 @@ int main(void)
290 DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); 290 DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
291 DEFINE(ASM_PT_INITIAL, PT_INITIAL); 291 DEFINE(ASM_PT_INITIAL, PT_INITIAL);
292 BLANK(); 292 BLANK();
293 /* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text
294 * and kernel data on physical huge pages */
295#ifdef CONFIG_HUGETLB_PAGE
296 DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
297#else
298 DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
299#endif
300 BLANK();
293 DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); 301 DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
294 DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); 302 DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
295 DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); 303 DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index c5ef4081b01d..623496c11756 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -502,21 +502,38 @@
502 STREG \pte,0(\ptp) 502 STREG \pte,0(\ptp)
503 .endm 503 .endm
504 504
505 /* We have (depending on the page size):
506 * - 38 to 52-bit Physical Page Number
507 * - 12 to 26-bit page offset
508 */
505 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) 509 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
506 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ 510 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
507 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) 511 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
512 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
508 513
509 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 514 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
510 .macro convert_for_tlb_insert20 pte 515 .macro convert_for_tlb_insert20 pte,tmp
516#ifdef CONFIG_HUGETLB_PAGE
517 copy \pte,\tmp
518 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
519 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
520
521 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
522 (63-58)+PAGE_ADD_SHIFT,\pte
523 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
524 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
525 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
526#else /* Huge pages disabled */
511 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 527 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
512 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 528 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
513 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 529 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
514 (63-58)+PAGE_ADD_SHIFT,\pte 530 (63-58)+PAGE_ADD_SHIFT,\pte
531#endif
515 .endm 532 .endm
516 533
517 /* Convert the pte and prot to tlb insertion values. How 534 /* Convert the pte and prot to tlb insertion values. How
518 * this happens is quite subtle, read below */ 535 * this happens is quite subtle, read below */
519 .macro make_insert_tlb spc,pte,prot 536 .macro make_insert_tlb spc,pte,prot,tmp
520 space_to_prot \spc \prot /* create prot id from space */ 537 space_to_prot \spc \prot /* create prot id from space */
521 /* The following is the real subtlety. This is depositing 538 /* The following is the real subtlety. This is depositing
522 * T <-> _PAGE_REFTRAP 539 * T <-> _PAGE_REFTRAP
@@ -553,7 +570,7 @@
553 depdi 1,12,1,\prot 570 depdi 1,12,1,\prot
554 571
555 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 572 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
556 convert_for_tlb_insert20 \pte 573 convert_for_tlb_insert20 \pte \tmp
557 .endm 574 .endm
558 575
559 /* Identical macro to make_insert_tlb above, except it 576 /* Identical macro to make_insert_tlb above, except it
@@ -646,17 +663,12 @@
646 663
647 664
648 /* 665 /*
649 * Align fault_vector_20 on 4K boundary so that both 666 * Fault_vectors are architecturally required to be aligned on a 2K
650 * fault_vector_11 and fault_vector_20 are on the 667 * boundary
651 * same page. This is only necessary as long as we
652 * write protect the kernel text, which we may stop
653 * doing once we use large page translations to cover
654 * the static part of the kernel address space.
655 */ 668 */
656 669
657 .text 670 .text
658 671 .align 2048
659 .align 4096
660 672
661ENTRY(fault_vector_20) 673ENTRY(fault_vector_20)
662 /* First vector is invalid (0) */ 674 /* First vector is invalid (0) */
@@ -1147,7 +1159,7 @@ dtlb_miss_20w:
1147 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w 1159 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1148 update_accessed ptp,pte,t0,t1 1160 update_accessed ptp,pte,t0,t1
1149 1161
1150 make_insert_tlb spc,pte,prot 1162 make_insert_tlb spc,pte,prot,t1
1151 1163
1152 idtlbt pte,prot 1164 idtlbt pte,prot
1153 1165
@@ -1173,7 +1185,7 @@ nadtlb_miss_20w:
1173 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w 1185 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1174 update_accessed ptp,pte,t0,t1 1186 update_accessed ptp,pte,t0,t1
1175 1187
1176 make_insert_tlb spc,pte,prot 1188 make_insert_tlb spc,pte,prot,t1
1177 1189
1178 idtlbt pte,prot 1190 idtlbt pte,prot
1179 1191
@@ -1267,7 +1279,7 @@ dtlb_miss_20:
1267 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 1279 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1268 update_accessed ptp,pte,t0,t1 1280 update_accessed ptp,pte,t0,t1
1269 1281
1270 make_insert_tlb spc,pte,prot 1282 make_insert_tlb spc,pte,prot,t1
1271 1283
1272 f_extend pte,t1 1284 f_extend pte,t1
1273 1285
@@ -1295,7 +1307,7 @@ nadtlb_miss_20:
1295 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 1307 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1296 update_accessed ptp,pte,t0,t1 1308 update_accessed ptp,pte,t0,t1
1297 1309
1298 make_insert_tlb spc,pte,prot 1310 make_insert_tlb spc,pte,prot,t1
1299 1311
1300 f_extend pte,t1 1312 f_extend pte,t1
1301 1313
@@ -1404,7 +1416,7 @@ itlb_miss_20w:
1404 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1416 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1405 update_accessed ptp,pte,t0,t1 1417 update_accessed ptp,pte,t0,t1
1406 1418
1407 make_insert_tlb spc,pte,prot 1419 make_insert_tlb spc,pte,prot,t1
1408 1420
1409 iitlbt pte,prot 1421 iitlbt pte,prot
1410 1422
@@ -1428,7 +1440,7 @@ naitlb_miss_20w:
1428 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w 1440 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1429 update_accessed ptp,pte,t0,t1 1441 update_accessed ptp,pte,t0,t1
1430 1442
1431 make_insert_tlb spc,pte,prot 1443 make_insert_tlb spc,pte,prot,t1
1432 1444
1433 iitlbt pte,prot 1445 iitlbt pte,prot
1434 1446
@@ -1514,7 +1526,7 @@ itlb_miss_20:
1514 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1526 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1515 update_accessed ptp,pte,t0,t1 1527 update_accessed ptp,pte,t0,t1
1516 1528
1517 make_insert_tlb spc,pte,prot 1529 make_insert_tlb spc,pte,prot,t1
1518 1530
1519 f_extend pte,t1 1531 f_extend pte,t1
1520 1532
@@ -1534,7 +1546,7 @@ naitlb_miss_20:
1534 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 1546 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1535 update_accessed ptp,pte,t0,t1 1547 update_accessed ptp,pte,t0,t1
1536 1548
1537 make_insert_tlb spc,pte,prot 1549 make_insert_tlb spc,pte,prot,t1
1538 1550
1539 f_extend pte,t1 1551 f_extend pte,t1
1540 1552
@@ -1566,7 +1578,7 @@ dbit_trap_20w:
1566 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1578 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1567 update_dirty ptp,pte,t1 1579 update_dirty ptp,pte,t1
1568 1580
1569 make_insert_tlb spc,pte,prot 1581 make_insert_tlb spc,pte,prot,t1
1570 1582
1571 idtlbt pte,prot 1583 idtlbt pte,prot
1572 1584
@@ -1610,7 +1622,7 @@ dbit_trap_20:
1610 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1622 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1611 update_dirty ptp,pte,t1 1623 update_dirty ptp,pte,t1
1612 1624
1613 make_insert_tlb spc,pte,prot 1625 make_insert_tlb spc,pte,prot,t1
1614 1626
1615 f_extend pte,t1 1627 f_extend pte,t1
1616 1628
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index e7d64527aff9..75aa0db9f69e 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -69,7 +69,7 @@ $bss_loop:
69 stw,ma %arg2,4(%r1) 69 stw,ma %arg2,4(%r1)
70 stw,ma %arg3,4(%r1) 70 stw,ma %arg3,4(%r1)
71 71
72 /* Initialize startup VM. Just map first 8/16 MB of memory */ 72 /* Initialize startup VM. Just map first 16/32 MB of memory */
73 load32 PA(swapper_pg_dir),%r4 73 load32 PA(swapper_pg_dir),%r4
74 mtctl %r4,%cr24 /* Initialize kernel root pointer */ 74 mtctl %r4,%cr24 /* Initialize kernel root pointer */
75 mtctl %r4,%cr25 /* Initialize user root pointer */ 75 mtctl %r4,%cr25 /* Initialize user root pointer */
@@ -107,7 +107,7 @@ $bss_loop:
107 /* Now initialize the PTEs themselves. We use RWX for 107 /* Now initialize the PTEs themselves. We use RWX for
108 * everything ... it will get remapped correctly later */ 108 * everything ... it will get remapped correctly later */
109 ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */ 109 ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
110 ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ 110 load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
111 load32 PA(pg0),%r1 111 load32 PA(pg0),%r1
112 112
113$pgt_fill_loop: 113$pgt_fill_loop:
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 72a3c658ad7b..f7ea626e29c9 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -130,7 +130,16 @@ void __init setup_arch(char **cmdline_p)
130 printk(KERN_INFO "The 32-bit Kernel has started...\n"); 130 printk(KERN_INFO "The 32-bit Kernel has started...\n");
131#endif 131#endif
132 132
133 printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024)); 133 printk(KERN_INFO "Kernel default page size is %d KB. Huge pages ",
134 (int)(PAGE_SIZE / 1024));
135#ifdef CONFIG_HUGETLB_PAGE
136 printk(KERN_CONT "enabled with %d MB physical and %d MB virtual size",
137 1 << (REAL_HPAGE_SHIFT - 20), 1 << (HPAGE_SHIFT - 20));
138#else
139 printk(KERN_CONT "disabled");
140#endif
141 printk(KERN_CONT ".\n");
142
134 143
135 pdc_console_init(); 144 pdc_console_init();
136 145
@@ -377,6 +386,7 @@ arch_initcall(parisc_init);
377void start_parisc(void) 386void start_parisc(void)
378{ 387{
379 extern void start_kernel(void); 388 extern void start_kernel(void);
389 extern void early_trap_init(void);
380 390
381 int ret, cpunum; 391 int ret, cpunum;
382 struct pdc_coproc_cfg coproc_cfg; 392 struct pdc_coproc_cfg coproc_cfg;
@@ -397,6 +407,8 @@ void start_parisc(void)
397 panic("must have an fpu to boot linux"); 407 panic("must have an fpu to boot linux");
398 } 408 }
399 409
410 early_trap_init(); /* initialize checksum of fault_vector */
411
400 start_kernel(); 412 start_kernel();
401 // not reached 413 // not reached
402} 414}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 0b8d26d3ba43..3fbd7252a4b2 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -369,7 +369,7 @@ tracesys_exit:
369 ldo -16(%r30),%r29 /* Reference param save area */ 369 ldo -16(%r30),%r29 /* Reference param save area */
370#endif 370#endif
371 ldo TASK_REGS(%r1),%r26 371 ldo TASK_REGS(%r1),%r26
372 bl do_syscall_trace_exit,%r2 372 BL do_syscall_trace_exit,%r2
373 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ 373 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
374 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 374 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
375 LDREG TI_TASK(%r1), %r1 375 LDREG TI_TASK(%r1), %r1
@@ -390,7 +390,7 @@ tracesys_sigexit:
390#ifdef CONFIG_64BIT 390#ifdef CONFIG_64BIT
391 ldo -16(%r30),%r29 /* Reference param save area */ 391 ldo -16(%r30),%r29 /* Reference param save area */
392#endif 392#endif
393 bl do_syscall_trace_exit,%r2 393 BL do_syscall_trace_exit,%r2
394 ldo TASK_REGS(%r1),%r26 394 ldo TASK_REGS(%r1),%r26
395 395
396 ldil L%syscall_exit_rfi,%r1 396 ldil L%syscall_exit_rfi,%r1
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index b99b39f1da02..553b09855cfd 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -807,7 +807,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
807} 807}
808 808
809 809
810int __init check_ivt(void *iva) 810void __init initialize_ivt(const void *iva)
811{ 811{
812 extern u32 os_hpmc_size; 812 extern u32 os_hpmc_size;
813 extern const u32 os_hpmc[]; 813 extern const u32 os_hpmc[];
@@ -818,8 +818,8 @@ int __init check_ivt(void *iva)
818 u32 *hpmcp; 818 u32 *hpmcp;
819 u32 length; 819 u32 length;
820 820
821 if (strcmp((char *)iva, "cows can fly")) 821 if (strcmp((const char *)iva, "cows can fly"))
822 return -1; 822 panic("IVT invalid");
823 823
824 ivap = (u32 *)iva; 824 ivap = (u32 *)iva;
825 825
@@ -839,28 +839,23 @@ int __init check_ivt(void *iva)
839 check += ivap[i]; 839 check += ivap[i];
840 840
841 ivap[5] = -check; 841 ivap[5] = -check;
842
843 return 0;
844} 842}
845 843
846#ifndef CONFIG_64BIT
847extern const void fault_vector_11;
848#endif
849extern const void fault_vector_20;
850 844
851void __init trap_init(void) 845/* early_trap_init() is called before we set up kernel mappings and
846 * write-protect the kernel */
847void __init early_trap_init(void)
852{ 848{
853 void *iva; 849 extern const void fault_vector_20;
854 850
855 if (boot_cpu_data.cpu_type >= pcxu) 851#ifndef CONFIG_64BIT
856 iva = (void *) &fault_vector_20; 852 extern const void fault_vector_11;
857 else 853 initialize_ivt(&fault_vector_11);
858#ifdef CONFIG_64BIT
859 panic("Can't boot 64-bit OS on PA1.1 processor!");
860#else
861 iva = (void *) &fault_vector_11;
862#endif 854#endif
863 855
864 if (check_ivt(iva)) 856 initialize_ivt(&fault_vector_20);
865 panic("IVT invalid"); 857}
858
859void __init trap_init(void)
860{
866} 861}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 0dacc5ca555a..308f29081d46 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -60,7 +60,7 @@ SECTIONS
60 EXIT_DATA 60 EXIT_DATA
61 } 61 }
62 PERCPU_SECTION(8) 62 PERCPU_SECTION(8)
63 . = ALIGN(PAGE_SIZE); 63 . = ALIGN(HUGEPAGE_SIZE);
64 __init_end = .; 64 __init_end = .;
65 /* freed after init ends here */ 65 /* freed after init ends here */
66 66
@@ -116,7 +116,7 @@ SECTIONS
116 * that we can properly leave these 116 * that we can properly leave these
117 * as writable 117 * as writable
118 */ 118 */
119 . = ALIGN(PAGE_SIZE); 119 . = ALIGN(HUGEPAGE_SIZE);
120 data_start = .; 120 data_start = .;
121 121
122 EXCEPTION_TABLE(8) 122 EXCEPTION_TABLE(8)
@@ -135,8 +135,11 @@ SECTIONS
135 _edata = .; 135 _edata = .;
136 136
137 /* BSS */ 137 /* BSS */
138 BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8) 138 BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
139
140 /* bootmap is allocated in setup_bootmem() directly behind bss. */
139 141
142 . = ALIGN(HUGEPAGE_SIZE);
140 _end = . ; 143 _end = . ;
141 144
142 STABS_DEBUG 145 STABS_DEBUG