diff options
| -rw-r--r-- | arch/arm64/kernel/kaslr.c | 6 | ||||
| -rw-r--r-- | arch/arm64/kernel/module.c | 2 |
2 files changed, 4 insertions, 4 deletions
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index b09b6f75f759..06941c1fe418 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c | |||
| @@ -145,15 +145,15 @@ u64 __init kaslr_early_init(u64 dt_phys) | |||
| 145 | 145 | ||
| 146 | if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { | 146 | if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { |
| 147 | /* | 147 | /* |
| 148 | * Randomize the module region over a 4 GB window covering the | 148 | * Randomize the module region over a 2 GB window covering the |
| 149 | * kernel. This reduces the risk of modules leaking information | 149 | * kernel. This reduces the risk of modules leaking information |
| 150 | * about the address of the kernel itself, but results in | 150 | * about the address of the kernel itself, but results in |
| 151 | * branches between modules and the core kernel that are | 151 | * branches between modules and the core kernel that are |
| 152 | * resolved via PLTs. (Branches between modules will be | 152 | * resolved via PLTs. (Branches between modules will be |
| 153 | * resolved normally.) | 153 | * resolved normally.) |
| 154 | */ | 154 | */ |
| 155 | module_range = SZ_4G - (u64)(_end - _stext); | 155 | module_range = SZ_2G - (u64)(_end - _stext); |
| 156 | module_alloc_base = max((u64)_end + offset - SZ_4G, | 156 | module_alloc_base = max((u64)_end + offset - SZ_2G, |
| 157 | (u64)MODULES_VADDR); | 157 | (u64)MODULES_VADDR); |
| 158 | } else { | 158 | } else { |
| 159 | /* | 159 | /* |
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index f713e2fc4d75..1e418e69b58c 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c | |||
| @@ -56,7 +56,7 @@ void *module_alloc(unsigned long size) | |||
| 56 | * can simply omit this fallback in that case. | 56 | * can simply omit this fallback in that case. |
| 57 | */ | 57 | */ |
| 58 | p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, | 58 | p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, |
| 59 | module_alloc_base + SZ_4G, GFP_KERNEL, | 59 | module_alloc_base + SZ_2G, GFP_KERNEL, |
| 60 | PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, | 60 | PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, |
| 61 | __builtin_return_address(0)); | 61 | __builtin_return_address(0)); |
| 62 | 62 | ||
