aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2012-10-05 10:52:18 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2012-10-09 08:17:01 -0400
commitc972cc60c23f5a6309292bfcc91a441743ba027e (patch)
tree04f8fc1d4e78979d28a897a90bba5c3321555967 /arch/s390
parent021d48be48481821f6e3f53028915c0571874135 (diff)
s390/vmalloc: have separate modules area
Add a special module area on top of the vmalloc area, which may be only used for modules and bpf jit generated code. This makes sure that inter module branches will always happen without a trampoline and in addition having all the code within a 2GB frame is branch prediction unit friendly. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/pgtable.h21
-rw-r--r--arch/s390/kernel/module.c11
-rw-r--r--arch/s390/kernel/setup.c13
-rw-r--r--arch/s390/mm/dump_pagetables.c13
4 files changed, 46 insertions, 12 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 75b91bb772bd..dd647c919a66 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -119,13 +119,12 @@ static inline int is_zero_pfn(unsigned long pfn)
119 119
120#ifndef __ASSEMBLY__ 120#ifndef __ASSEMBLY__
121/* 121/*
122 * The vmalloc area will always be on the topmost area of the kernel 122 * The vmalloc and module area will always be on the topmost area of the kernel
123 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc, 123 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
124 * which should be enough for any sane case. 124 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
125 * By putting vmalloc at the top, we maximise the gap between physical 125 * modules will reside. That makes sure that inter module branches always
126 * memory and vmalloc to catch misplaced memory accesses. As a side 126 * happen without trampolines and in addition the placement within a 2GB frame
127 * effect, this also makes sure that 64 bit module code cannot be used 127 * is branch prediction unit friendly.
128 * as system call address.
129 */ 128 */
130extern unsigned long VMALLOC_START; 129extern unsigned long VMALLOC_START;
131extern unsigned long VMALLOC_END; 130extern unsigned long VMALLOC_END;
@@ -133,6 +132,14 @@ extern struct page *vmemmap;
133 132
134#define VMEM_MAX_PHYS ((unsigned long) vmemmap) 133#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
135 134
135#ifdef CONFIG_64BIT
136extern unsigned long MODULES_VADDR;
137extern unsigned long MODULES_END;
138#define MODULES_VADDR MODULES_VADDR
139#define MODULES_END MODULES_END
140#define MODULES_LEN (1UL << 31)
141#endif
142
136/* 143/*
137 * A 31 bit pagetable entry of S390 has following format: 144 * A 31 bit pagetable entry of S390 has following format:
138 * | PFRA | | OS | 145 * | PFRA | | OS |
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 46412b1d7e1e..4610deafd953 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -44,6 +44,17 @@
44#define PLT_ENTRY_SIZE 20 44#define PLT_ENTRY_SIZE 20
45#endif /* CONFIG_64BIT */ 45#endif /* CONFIG_64BIT */
46 46
47#ifdef CONFIG_64BIT
48void *module_alloc(unsigned long size)
49{
50 if (PAGE_ALIGN(size) > MODULES_LEN)
51 return NULL;
52 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
53 GFP_KERNEL, PAGE_KERNEL, -1,
54 __builtin_return_address(0));
55}
56#endif
57
47/* Free memory returned from module_alloc */ 58/* Free memory returned from module_alloc */
48void module_free(struct module *mod, void *module_region) 59void module_free(struct module *mod, void *module_region)
49{ 60{
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index bfb48f18169c..b1f2be9aaaad 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -105,6 +105,11 @@ EXPORT_SYMBOL(VMALLOC_END);
105struct page *vmemmap; 105struct page *vmemmap;
106EXPORT_SYMBOL(vmemmap); 106EXPORT_SYMBOL(vmemmap);
107 107
108#ifdef CONFIG_64BIT
109unsigned long MODULES_VADDR;
110unsigned long MODULES_END;
111#endif
112
108/* An array with a pointer to the lowcore of every CPU. */ 113/* An array with a pointer to the lowcore of every CPU. */
109struct _lowcore *lowcore_ptr[NR_CPUS]; 114struct _lowcore *lowcore_ptr[NR_CPUS];
110EXPORT_SYMBOL(lowcore_ptr); 115EXPORT_SYMBOL(lowcore_ptr);
@@ -544,19 +549,23 @@ static void __init setup_memory_end(void)
544 549
545 /* Choose kernel address space layout: 2, 3, or 4 levels. */ 550 /* Choose kernel address space layout: 2, 3, or 4 levels. */
546#ifdef CONFIG_64BIT 551#ifdef CONFIG_64BIT
547 vmalloc_size = VMALLOC_END ?: 128UL << 30; 552 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
548 tmp = (memory_end ?: real_memory_size) / PAGE_SIZE; 553 tmp = (memory_end ?: real_memory_size) / PAGE_SIZE;
549 tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size; 554 tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
550 if (tmp <= (1UL << 42)) 555 if (tmp <= (1UL << 42))
551 vmax = 1UL << 42; /* 3-level kernel page table */ 556 vmax = 1UL << 42; /* 3-level kernel page table */
552 else 557 else
553 vmax = 1UL << 53; /* 4-level kernel page table */ 558 vmax = 1UL << 53; /* 4-level kernel page table */
559 /* module area is at the end of the kernel address space. */
560 MODULES_END = vmax;
561 MODULES_VADDR = MODULES_END - MODULES_LEN;
562 VMALLOC_END = MODULES_VADDR;
554#else 563#else
555 vmalloc_size = VMALLOC_END ?: 96UL << 20; 564 vmalloc_size = VMALLOC_END ?: 96UL << 20;
556 vmax = 1UL << 31; /* 2-level kernel page table */ 565 vmax = 1UL << 31; /* 2-level kernel page table */
557#endif
558 /* vmalloc area is at the end of the kernel address space. */ 566 /* vmalloc area is at the end of the kernel address space. */
559 VMALLOC_END = vmax; 567 VMALLOC_END = vmax;
568#endif
560 VMALLOC_START = vmax - vmalloc_size; 569 VMALLOC_START = vmax - vmalloc_size;
561 570
562 /* Split remaining virtual space between 1:1 mapping & vmemmap array */ 571 /* Split remaining virtual space between 1:1 mapping & vmemmap array */
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index cd1c62d160ed..cbc6668acb85 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -18,6 +18,9 @@ enum address_markers_idx {
18 KERNEL_END_NR, 18 KERNEL_END_NR,
19 VMEMMAP_NR, 19 VMEMMAP_NR,
20 VMALLOC_NR, 20 VMALLOC_NR,
21#ifdef CONFIG_64BIT
22 MODULES_NR,
23#endif
21}; 24};
22 25
23static struct addr_marker address_markers[] = { 26static struct addr_marker address_markers[] = {
@@ -26,6 +29,9 @@ static struct addr_marker address_markers[] = {
26 [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"}, 29 [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"},
27 [VMEMMAP_NR] = {0, "vmemmap Area"}, 30 [VMEMMAP_NR] = {0, "vmemmap Area"},
28 [VMALLOC_NR] = {0, "vmalloc Area"}, 31 [VMALLOC_NR] = {0, "vmalloc Area"},
32#ifdef CONFIG_64BIT
33 [MODULES_NR] = {0, "Modules Area"},
34#endif
29 { -1, NULL } 35 { -1, NULL }
30}; 36};
31 37
@@ -205,11 +211,12 @@ static int pt_dump_init(void)
205 * kernel ASCE. We need this to keep the page table walker functions 211 * kernel ASCE. We need this to keep the page table walker functions
206 * from accessing non-existent entries. 212 * from accessing non-existent entries.
207 */ 213 */
208#ifdef CONFIG_64BIT 214#ifdef CONFIG_32BIT
215 max_addr = 1UL << 31;
216#else
209 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; 217 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
210 max_addr = 1UL << (max_addr * 11 + 31); 218 max_addr = 1UL << (max_addr * 11 + 31);
211#else 219 address_markers[MODULES_NR].start_address = MODULES_VADDR;
212 max_addr = 1UL << 31;
213#endif 220#endif
214 address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; 221 address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
215 address_markers[VMALLOC_NR].start_address = VMALLOC_START; 222 address_markers[VMALLOC_NR].start_address = VMALLOC_START;