aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/kernel/entry.S9
-rw-r--r--arch/s390/kernel/entry64.S1
-rw-r--r--arch/s390/mm/init.c35
3 files changed, 28 insertions, 17 deletions
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 1a434a7004ee..d8948c342caf 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -228,8 +228,9 @@ sysc_do_svc:
228sysc_nr_ok: 228sysc_nr_ok:
229 mvc SP_ARGS(4,%r15),SP_R7(%r15) 229 mvc SP_ARGS(4,%r15),SP_R7(%r15)
230sysc_do_restart: 230sysc_do_restart:
231 l %r8,BASED(.Lsysc_table)
231 tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 232 tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
232 l %r8,sys_call_table-system_call(%r7,%r13) # get system call addr. 233 l %r8,0(%r7,%r8) # get system call addr.
233 bnz BASED(sysc_tracesys) 234 bnz BASED(sysc_tracesys)
234 basr %r14,%r8 # call sys_xxxx 235 basr %r14,%r8 # call sys_xxxx
235 st %r2,SP_R2(%r15) # store return value (change R2 on stack) 236 st %r2,SP_R2(%r15) # store return value (change R2 on stack)
@@ -330,9 +331,10 @@ sysc_tracesys:
330 basr %r14,%r1 331 basr %r14,%r1
331 clc SP_R2(4,%r15),BASED(.Lnr_syscalls) 332 clc SP_R2(4,%r15),BASED(.Lnr_syscalls)
332 bnl BASED(sysc_tracenogo) 333 bnl BASED(sysc_tracenogo)
334 l %r8,BASED(.Lsysc_table)
333 l %r7,SP_R2(%r15) # strace might have changed the 335 l %r7,SP_R2(%r15) # strace might have changed the
334 sll %r7,2 # system call 336 sll %r7,2 # system call
335 l %r8,sys_call_table-system_call(%r7,%r13) 337 l %r8,0(%r7,%r8)
336sysc_tracego: 338sysc_tracego:
337 lm %r3,%r6,SP_R3(%r15) 339 lm %r3,%r6,SP_R3(%r15)
338 l %r2,SP_ORIG_R2(%r15) 340 l %r2,SP_ORIG_R2(%r15)
@@ -1009,6 +1011,7 @@ cleanup_io_leave_insn:
1009.Ltrace: .long syscall_trace 1011.Ltrace: .long syscall_trace
1010.Lvfork: .long sys_vfork 1012.Lvfork: .long sys_vfork
1011.Lschedtail: .long schedule_tail 1013.Lschedtail: .long schedule_tail
1014.Lsysc_table: .long sys_call_table
1012 1015
1013.Lcritical_start: 1016.Lcritical_start:
1014 .long __critical_start + 0x80000000 1017 .long __critical_start + 0x80000000
@@ -1017,8 +1020,8 @@ cleanup_io_leave_insn:
1017.Lcleanup_critical: 1020.Lcleanup_critical:
1018 .long cleanup_critical 1021 .long cleanup_critical
1019 1022
1023 .section .rodata, "a"
1020#define SYSCALL(esa,esame,emu) .long esa 1024#define SYSCALL(esa,esame,emu) .long esa
1021sys_call_table: 1025sys_call_table:
1022#include "syscalls.S" 1026#include "syscalls.S"
1023#undef SYSCALL 1027#undef SYSCALL
1024
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index edad60771673..1ca499fa54b4 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -991,6 +991,7 @@ cleanup_io_leave_insn:
991.Lcritical_end: 991.Lcritical_end:
992 .quad __critical_end 992 .quad __critical_end
993 993
994 .section .rodata, "a"
994#define SYSCALL(esa,esame,emu) .long esame 995#define SYSCALL(esa,esame,emu) .long esame
995sys_call_table: 996sys_call_table:
996#include "syscalls.S" 997#include "syscalls.S"
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 81dce185f836..eb6ebfef134a 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/pagemap.h> 24#include <linux/pagemap.h>
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/pfn.h>
26 27
27#include <asm/processor.h> 28#include <asm/processor.h>
28#include <asm/system.h> 29#include <asm/system.h>
@@ -33,6 +34,7 @@
33#include <asm/lowcore.h> 34#include <asm/lowcore.h>
34#include <asm/tlb.h> 35#include <asm/tlb.h>
35#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
37#include <asm/sections.h>
36 38
37DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
38 40
@@ -89,17 +91,6 @@ void show_mem(void)
89 printk("%d pages swap cached\n",cached); 91 printk("%d pages swap cached\n",cached);
90} 92}
91 93
92/* References to section boundaries */
93
94extern unsigned long _text;
95extern unsigned long _etext;
96extern unsigned long _edata;
97extern unsigned long __bss_start;
98extern unsigned long _end;
99
100extern unsigned long __init_begin;
101extern unsigned long __init_end;
102
103extern unsigned long __initdata zholes_size[]; 94extern unsigned long __initdata zholes_size[];
104/* 95/*
105 * paging_init() sets up the page tables 96 * paging_init() sets up the page tables
@@ -116,6 +107,10 @@ void __init paging_init(void)
116 unsigned long pfn = 0; 107 unsigned long pfn = 0;
117 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 108 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
118 static const int ssm_mask = 0x04000000L; 109 static const int ssm_mask = 0x04000000L;
110 unsigned long ro_start_pfn, ro_end_pfn;
111
112 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
113 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
119 114
120 /* unmap whole virtual address space */ 115 /* unmap whole virtual address space */
121 116
@@ -143,7 +138,10 @@ void __init paging_init(void)
143 pg_dir++; 138 pg_dir++;
144 139
145 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) { 140 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
146 pte = pfn_pte(pfn, PAGE_KERNEL); 141 if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
142 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
143 else
144 pte = pfn_pte(pfn, PAGE_KERNEL);
147 if (pfn >= max_low_pfn) 145 if (pfn >= max_low_pfn)
148 pte_clear(&init_mm, 0, &pte); 146 pte_clear(&init_mm, 0, &pte);
149 set_pte(pg_table, pte); 147 set_pte(pg_table, pte);
@@ -175,6 +173,7 @@ void __init paging_init(void)
175} 173}
176 174
177#else /* CONFIG_64BIT */ 175#else /* CONFIG_64BIT */
176
178void __init paging_init(void) 177void __init paging_init(void)
179{ 178{
180 pgd_t * pg_dir; 179 pgd_t * pg_dir;
@@ -186,13 +185,15 @@ void __init paging_init(void)
186 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | 185 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
187 _KERN_REGION_TABLE; 186 _KERN_REGION_TABLE;
188 static const int ssm_mask = 0x04000000L; 187 static const int ssm_mask = 0x04000000L;
189
190 unsigned long zones_size[MAX_NR_ZONES]; 188 unsigned long zones_size[MAX_NR_ZONES];
191 unsigned long dma_pfn, high_pfn; 189 unsigned long dma_pfn, high_pfn;
190 unsigned long ro_start_pfn, ro_end_pfn;
192 191
193 memset(zones_size, 0, sizeof(zones_size)); 192 memset(zones_size, 0, sizeof(zones_size));
194 dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; 193 dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
195 high_pfn = max_low_pfn; 194 high_pfn = max_low_pfn;
195 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
196 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
196 197
197 if (dma_pfn > high_pfn) 198 if (dma_pfn > high_pfn)
198 zones_size[ZONE_DMA] = high_pfn; 199 zones_size[ZONE_DMA] = high_pfn;
@@ -231,7 +232,10 @@ void __init paging_init(void)
231 pmd_populate_kernel(&init_mm, pm_dir, pt_dir); 232 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
232 233
233 for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) { 234 for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
234 pte = pfn_pte(pfn, PAGE_KERNEL); 235 if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
236 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
237 else
238 pte = pfn_pte(pfn, PAGE_KERNEL);
235 if (pfn >= max_low_pfn) { 239 if (pfn >= max_low_pfn) {
236 pte_clear(&init_mm, 0, &pte); 240 pte_clear(&init_mm, 0, &pte);
237 continue; 241 continue;
@@ -282,6 +286,9 @@ void __init mem_init(void)
282 reservedpages << (PAGE_SHIFT-10), 286 reservedpages << (PAGE_SHIFT-10),
283 datasize >>10, 287 datasize >>10,
284 initsize >> 10); 288 initsize >> 10);
289 printk("Write protected kernel read-only data: %#lx - %#lx\n",
290 (unsigned long)&__start_rodata,
291 PFN_ALIGN((unsigned long)&__end_rodata) - 1);
285} 292}
286 293
287void free_initmem(void) 294void free_initmem(void)