aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2016-05-10 10:28:28 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-06-13 09:58:16 -0400
commitbab247ff5f669216e3ed2f9a4034c540187e874c (patch)
treea2913982c5d1ac465924c8e1632737d78dca8405 /arch/s390/mm
parente8a97e42dc986a081017b1e77e3a3c7f02a0a638 (diff)
s390/vmem: simplify vmem code for read-only mappings
For the kernel identity mapping map everything read-writeable and subsequently call set_memory_ro() to make the ro section read-only. This simplifies the code a lot. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/vmem.c37
1 files changed, 9 insertions, 28 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 0a7b03496f67..b200f976c36b 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -11,6 +11,7 @@
11#include <linux/hugetlb.h> 11#include <linux/hugetlb.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/memblock.h> 13#include <linux/memblock.h>
14#include <asm/cacheflush.h>
14#include <asm/pgalloc.h> 15#include <asm/pgalloc.h>
15#include <asm/pgtable.h> 16#include <asm/pgtable.h>
16#include <asm/setup.h> 17#include <asm/setup.h>
@@ -77,7 +78,7 @@ pte_t __ref *vmem_pte_alloc(void)
77/* 78/*
78 * Add a physical memory range to the 1:1 mapping. 79 * Add a physical memory range to the 1:1 mapping.
79 */ 80 */
80static int vmem_add_mem(unsigned long start, unsigned long size, int ro) 81static int vmem_add_mem(unsigned long start, unsigned long size)
81{ 82{
82 unsigned long end = start + size; 83 unsigned long end = start + size;
83 unsigned long address = start; 84 unsigned long address = start;
@@ -99,8 +100,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
99 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && 100 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
100 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) && 101 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
101 !debug_pagealloc_enabled()) { 102 !debug_pagealloc_enabled()) {
102 pud_val(*pu_dir) = address | 103 pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL);
103 pgprot_val(ro ? REGION3_KERNEL_RO : REGION3_KERNEL);
104 address += PUD_SIZE; 104 address += PUD_SIZE;
105 continue; 105 continue;
106 } 106 }
@@ -114,8 +114,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
114 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && 114 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
115 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) && 115 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
116 !debug_pagealloc_enabled()) { 116 !debug_pagealloc_enabled()) {
117 pmd_val(*pm_dir) = address | 117 pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL);
118 pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
119 address += PMD_SIZE; 118 address += PMD_SIZE;
120 continue; 119 continue;
121 } 120 }
@@ -127,8 +126,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
127 } 126 }
128 127
129 pt_dir = pte_offset_kernel(pm_dir, address); 128 pt_dir = pte_offset_kernel(pm_dir, address);
130 pte_val(*pt_dir) = address | 129 pte_val(*pt_dir) = address | pgprot_val(PAGE_KERNEL);
131 pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
132 address += PAGE_SIZE; 130 address += PAGE_SIZE;
133 } 131 }
134 ret = 0; 132 ret = 0;
@@ -338,7 +336,7 @@ int vmem_add_mapping(unsigned long start, unsigned long size)
338 if (ret) 336 if (ret)
339 goto out_free; 337 goto out_free;
340 338
341 ret = vmem_add_mem(start, size, 0); 339 ret = vmem_add_mem(start, size);
342 if (ret) 340 if (ret)
343 goto out_remove; 341 goto out_remove;
344 goto out; 342 goto out;
@@ -361,29 +359,12 @@ void __init vmem_map_init(void)
361{ 359{
362 unsigned long ro_start, ro_end; 360 unsigned long ro_start, ro_end;
363 struct memblock_region *reg; 361 struct memblock_region *reg;
364 phys_addr_t start, end;
365 362
363 for_each_memblock(memory, reg)
364 vmem_add_mem(reg->base, reg->size);
366 ro_start = PFN_ALIGN((unsigned long)&_stext); 365 ro_start = PFN_ALIGN((unsigned long)&_stext);
367 ro_end = (unsigned long)&_eshared & PAGE_MASK; 366 ro_end = (unsigned long)&_eshared & PAGE_MASK;
368 for_each_memblock(memory, reg) { 367 set_memory_ro(ro_start, (ro_end - ro_start) >> PAGE_SHIFT);
369 start = reg->base;
370 end = reg->base + reg->size;
371 if (start >= ro_end || end <= ro_start)
372 vmem_add_mem(start, end - start, 0);
373 else if (start >= ro_start && end <= ro_end)
374 vmem_add_mem(start, end - start, 1);
375 else if (start >= ro_start) {
376 vmem_add_mem(start, ro_end - start, 1);
377 vmem_add_mem(ro_end, end - ro_end, 0);
378 } else if (end < ro_end) {
379 vmem_add_mem(start, ro_start - start, 0);
380 vmem_add_mem(ro_start, end - ro_start, 1);
381 } else {
382 vmem_add_mem(start, ro_start - start, 0);
383 vmem_add_mem(ro_start, ro_end - ro_start, 1);
384 vmem_add_mem(ro_end, end - ro_end, 0);
385 }
386 }
387} 368}
388 369
389/* 370/*