aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/init.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2006-07-01 07:36:31 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-01 12:56:04 -0400
commitd882b172512758703ff8d9efb96505eaaee48d2e (patch)
tree64b61d69b4af1395b80446fbc61f0488fbaeedb3 /arch/s390/mm/init.c
parenta581c2a4697ee264699b364399b73477af408e00 (diff)
[PATCH] s390: put sys_call_table into .rodata section and write protect it
Put s390's syscall tables into .rodata section and write protect this section to prevent misuse of it. Suggested by Arjan van de Ven <arjan@infradead.org>. Cc: Arjan van de Ven <arjan@infradead.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/s390/mm/init.c')
-rw-r--r--arch/s390/mm/init.c35
1 files changed, 21 insertions, 14 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 81dce185f836..eb6ebfef134a 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/pagemap.h> 24#include <linux/pagemap.h>
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/pfn.h>
26 27
27#include <asm/processor.h> 28#include <asm/processor.h>
28#include <asm/system.h> 29#include <asm/system.h>
@@ -33,6 +34,7 @@
33#include <asm/lowcore.h> 34#include <asm/lowcore.h>
34#include <asm/tlb.h> 35#include <asm/tlb.h>
35#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
37#include <asm/sections.h>
36 38
37DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
38 40
@@ -89,17 +91,6 @@ void show_mem(void)
89 printk("%d pages swap cached\n",cached); 91 printk("%d pages swap cached\n",cached);
90} 92}
91 93
92/* References to section boundaries */
93
94extern unsigned long _text;
95extern unsigned long _etext;
96extern unsigned long _edata;
97extern unsigned long __bss_start;
98extern unsigned long _end;
99
100extern unsigned long __init_begin;
101extern unsigned long __init_end;
102
103extern unsigned long __initdata zholes_size[]; 94extern unsigned long __initdata zholes_size[];
104/* 95/*
105 * paging_init() sets up the page tables 96 * paging_init() sets up the page tables
@@ -116,6 +107,10 @@ void __init paging_init(void)
116 unsigned long pfn = 0; 107 unsigned long pfn = 0;
117 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 108 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
118 static const int ssm_mask = 0x04000000L; 109 static const int ssm_mask = 0x04000000L;
110 unsigned long ro_start_pfn, ro_end_pfn;
111
112 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
113 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
119 114
120 /* unmap whole virtual address space */ 115 /* unmap whole virtual address space */
121 116
@@ -143,7 +138,10 @@ void __init paging_init(void)
143 pg_dir++; 138 pg_dir++;
144 139
145 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) { 140 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
146 pte = pfn_pte(pfn, PAGE_KERNEL); 141 if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
142 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
143 else
144 pte = pfn_pte(pfn, PAGE_KERNEL);
147 if (pfn >= max_low_pfn) 145 if (pfn >= max_low_pfn)
148 pte_clear(&init_mm, 0, &pte); 146 pte_clear(&init_mm, 0, &pte);
149 set_pte(pg_table, pte); 147 set_pte(pg_table, pte);
@@ -175,6 +173,7 @@ void __init paging_init(void)
175} 173}
176 174
177#else /* CONFIG_64BIT */ 175#else /* CONFIG_64BIT */
176
178void __init paging_init(void) 177void __init paging_init(void)
179{ 178{
180 pgd_t * pg_dir; 179 pgd_t * pg_dir;
@@ -186,13 +185,15 @@ void __init paging_init(void)
186 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | 185 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
187 _KERN_REGION_TABLE; 186 _KERN_REGION_TABLE;
188 static const int ssm_mask = 0x04000000L; 187 static const int ssm_mask = 0x04000000L;
189
190 unsigned long zones_size[MAX_NR_ZONES]; 188 unsigned long zones_size[MAX_NR_ZONES];
191 unsigned long dma_pfn, high_pfn; 189 unsigned long dma_pfn, high_pfn;
190 unsigned long ro_start_pfn, ro_end_pfn;
192 191
193 memset(zones_size, 0, sizeof(zones_size)); 192 memset(zones_size, 0, sizeof(zones_size));
194 dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; 193 dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
195 high_pfn = max_low_pfn; 194 high_pfn = max_low_pfn;
195 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
196 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
196 197
197 if (dma_pfn > high_pfn) 198 if (dma_pfn > high_pfn)
198 zones_size[ZONE_DMA] = high_pfn; 199 zones_size[ZONE_DMA] = high_pfn;
@@ -231,7 +232,10 @@ void __init paging_init(void)
231 pmd_populate_kernel(&init_mm, pm_dir, pt_dir); 232 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
232 233
233 for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) { 234 for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
234 pte = pfn_pte(pfn, PAGE_KERNEL); 235 if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
236 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
237 else
238 pte = pfn_pte(pfn, PAGE_KERNEL);
235 if (pfn >= max_low_pfn) { 239 if (pfn >= max_low_pfn) {
236 pte_clear(&init_mm, 0, &pte); 240 pte_clear(&init_mm, 0, &pte);
237 continue; 241 continue;
@@ -282,6 +286,9 @@ void __init mem_init(void)
282 reservedpages << (PAGE_SHIFT-10), 286 reservedpages << (PAGE_SHIFT-10),
283 datasize >>10, 287 datasize >>10,
284 initsize >> 10); 288 initsize >> 10);
289 printk("Write protected kernel read-only data: %#lx - %#lx\n",
290 (unsigned long)&__start_rodata,
291 PFN_ALIGN((unsigned long)&__end_rodata) - 1);
285} 292}
286 293
287void free_initmem(void) 294void free_initmem(void)