aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/module.c
diff options
context:
space:
mode:
authorRick Edgecombe <rick.p.edgecombe@intel.com>2019-04-25 20:11:37 -0400
committerIngo Molnar <mingo@kernel.org>2019-04-30 06:37:58 -0400
commit1a7b7d9220819afe79d1ec5d759fe4349bd2453e (patch)
treed05ed01580bb29bb146ee50b7cb0019736dd9d4e /kernel/module.c
parent868b104d7379e28013e9d48bdd2db25e0bdcf751 (diff)
modules: Use vmalloc special flag
Use new flag for handling freeing of special permissioned memory in vmalloc and remove places where memory was set RW before freeing which is no longer needed. Since freeing of VM_FLUSH_RESET_PERMS memory is not supported in an interrupt by vmalloc, the freeing of init sections is moved to a work queue. Instead of call_rcu it now uses synchronize_rcu() in the work queue. Lastly, there is now a WARN_ON in module_memfree since it should not be called in an interrupt with special memory as is required for VM_FLUSH_RESET_PERMS. Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: <akpm@linux-foundation.org> Cc: <ard.biesheuvel@linaro.org> Cc: <deneen.t.dock@intel.com> Cc: <kernel-hardening@lists.openwall.com> Cc: <kristen@linux.intel.com> Cc: <linux_dti@icloud.com> Cc: <will.deacon@arm.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jessica Yu <jeyu@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Rik van Riel <riel@surriel.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20190426001143.4983-18-namit@vmware.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/module.c')
-rw-r--r--kernel/module.c77
1 files changed, 39 insertions, 38 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 2b2845ae983e..a9020bdd4cf6 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -98,6 +98,10 @@ DEFINE_MUTEX(module_mutex);
98EXPORT_SYMBOL_GPL(module_mutex); 98EXPORT_SYMBOL_GPL(module_mutex);
99static LIST_HEAD(modules); 99static LIST_HEAD(modules);
100 100
101/* Work queue for freeing init sections in success case */
102static struct work_struct init_free_wq;
103static struct llist_head init_free_list;
104
101#ifdef CONFIG_MODULES_TREE_LOOKUP 105#ifdef CONFIG_MODULES_TREE_LOOKUP
102 106
103/* 107/*
@@ -1949,6 +1953,8 @@ void module_enable_ro(const struct module *mod, bool after_init)
1949 if (!rodata_enabled) 1953 if (!rodata_enabled)
1950 return; 1954 return;
1951 1955
1956 set_vm_flush_reset_perms(mod->core_layout.base);
1957 set_vm_flush_reset_perms(mod->init_layout.base);
1952 frob_text(&mod->core_layout, set_memory_ro); 1958 frob_text(&mod->core_layout, set_memory_ro);
1953 frob_text(&mod->core_layout, set_memory_x); 1959 frob_text(&mod->core_layout, set_memory_x);
1954 1960
@@ -1972,15 +1978,6 @@ static void module_enable_nx(const struct module *mod)
1972 frob_writable_data(&mod->init_layout, set_memory_nx); 1978 frob_writable_data(&mod->init_layout, set_memory_nx);
1973} 1979}
1974 1980
1975static void module_disable_nx(const struct module *mod)
1976{
1977 frob_rodata(&mod->core_layout, set_memory_x);
1978 frob_ro_after_init(&mod->core_layout, set_memory_x);
1979 frob_writable_data(&mod->core_layout, set_memory_x);
1980 frob_rodata(&mod->init_layout, set_memory_x);
1981 frob_writable_data(&mod->init_layout, set_memory_x);
1982}
1983
1984/* Iterate through all modules and set each module's text as RW */ 1981/* Iterate through all modules and set each module's text as RW */
1985void set_all_modules_text_rw(void) 1982void set_all_modules_text_rw(void)
1986{ 1983{
@@ -2024,23 +2021,8 @@ void set_all_modules_text_ro(void)
2024 } 2021 }
2025 mutex_unlock(&module_mutex); 2022 mutex_unlock(&module_mutex);
2026} 2023}
2027
2028static void disable_ro_nx(const struct module_layout *layout)
2029{
2030 if (rodata_enabled) {
2031 frob_text(layout, set_memory_rw);
2032 frob_rodata(layout, set_memory_rw);
2033 frob_ro_after_init(layout, set_memory_rw);
2034 }
2035 frob_rodata(layout, set_memory_x);
2036 frob_ro_after_init(layout, set_memory_x);
2037 frob_writable_data(layout, set_memory_x);
2038}
2039
2040#else 2024#else
2041static void disable_ro_nx(const struct module_layout *layout) { }
2042static void module_enable_nx(const struct module *mod) { } 2025static void module_enable_nx(const struct module *mod) { }
2043static void module_disable_nx(const struct module *mod) { }
2044#endif 2026#endif
2045 2027
2046#ifdef CONFIG_LIVEPATCH 2028#ifdef CONFIG_LIVEPATCH
@@ -2120,6 +2102,11 @@ static void free_module_elf(struct module *mod)
2120 2102
2121void __weak module_memfree(void *module_region) 2103void __weak module_memfree(void *module_region)
2122{ 2104{
2105 /*
2106 * This memory may be RO, and freeing RO memory in an interrupt is not
2107 * supported by vmalloc.
2108 */
2109 WARN_ON(in_interrupt());
2123 vfree(module_region); 2110 vfree(module_region);
2124} 2111}
2125 2112
@@ -2171,7 +2158,6 @@ static void free_module(struct module *mod)
2171 mutex_unlock(&module_mutex); 2158 mutex_unlock(&module_mutex);
2172 2159
2173 /* This may be empty, but that's OK */ 2160 /* This may be empty, but that's OK */
2174 disable_ro_nx(&mod->init_layout);
2175 module_arch_freeing_init(mod); 2161 module_arch_freeing_init(mod);
2176 module_memfree(mod->init_layout.base); 2162 module_memfree(mod->init_layout.base);
2177 kfree(mod->args); 2163 kfree(mod->args);
@@ -2181,7 +2167,6 @@ static void free_module(struct module *mod)
2181 lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); 2167 lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
2182 2168
2183 /* Finally, free the core (containing the module structure) */ 2169 /* Finally, free the core (containing the module structure) */
2184 disable_ro_nx(&mod->core_layout);
2185 module_memfree(mod->core_layout.base); 2170 module_memfree(mod->core_layout.base);
2186} 2171}
2187 2172
@@ -3420,17 +3405,34 @@ static void do_mod_ctors(struct module *mod)
3420 3405
3421/* For freeing module_init on success, in case kallsyms traversing */ 3406/* For freeing module_init on success, in case kallsyms traversing */
3422struct mod_initfree { 3407struct mod_initfree {
3423 struct rcu_head rcu; 3408 struct llist_node node;
3424 void *module_init; 3409 void *module_init;
3425}; 3410};
3426 3411
3427static void do_free_init(struct rcu_head *head) 3412static void do_free_init(struct work_struct *w)
3428{ 3413{
3429 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu); 3414 struct llist_node *pos, *n, *list;
3430 module_memfree(m->module_init); 3415 struct mod_initfree *initfree;
3431 kfree(m); 3416
3417 list = llist_del_all(&init_free_list);
3418
3419 synchronize_rcu();
3420
3421 llist_for_each_safe(pos, n, list) {
3422 initfree = container_of(pos, struct mod_initfree, node);
3423 module_memfree(initfree->module_init);
3424 kfree(initfree);
3425 }
3432} 3426}
3433 3427
3428static int __init modules_wq_init(void)
3429{
3430 INIT_WORK(&init_free_wq, do_free_init);
3431 init_llist_head(&init_free_list);
3432 return 0;
3433}
3434module_init(modules_wq_init);
3435
3434/* 3436/*
3435 * This is where the real work happens. 3437 * This is where the real work happens.
3436 * 3438 *
@@ -3507,7 +3509,6 @@ static noinline int do_init_module(struct module *mod)
3507#endif 3509#endif
3508 module_enable_ro(mod, true); 3510 module_enable_ro(mod, true);
3509 mod_tree_remove_init(mod); 3511 mod_tree_remove_init(mod);
3510 disable_ro_nx(&mod->init_layout);
3511 module_arch_freeing_init(mod); 3512 module_arch_freeing_init(mod);
3512 mod->init_layout.base = NULL; 3513 mod->init_layout.base = NULL;
3513 mod->init_layout.size = 0; 3514 mod->init_layout.size = 0;
@@ -3518,14 +3519,18 @@ static noinline int do_init_module(struct module *mod)
3518 * We want to free module_init, but be aware that kallsyms may be 3519 * We want to free module_init, but be aware that kallsyms may be
3519 * walking this with preempt disabled. In all the failure paths, we 3520 * walking this with preempt disabled. In all the failure paths, we
3520 * call synchronize_rcu(), but we don't want to slow down the success 3521 * call synchronize_rcu(), but we don't want to slow down the success
3521 * path, so use actual RCU here. 3522 * path. module_memfree() cannot be called in an interrupt, so do the
3523 * work and call synchronize_rcu() in a work queue.
3524 *
3522 * Note that module_alloc() on most architectures creates W+X page 3525 * Note that module_alloc() on most architectures creates W+X page
3523 * mappings which won't be cleaned up until do_free_init() runs. Any 3526 * mappings which won't be cleaned up until do_free_init() runs. Any
3524 * code such as mark_rodata_ro() which depends on those mappings to 3527 * code such as mark_rodata_ro() which depends on those mappings to
3525 * be cleaned up needs to sync with the queued work - ie 3528 * be cleaned up needs to sync with the queued work - ie
3526 * rcu_barrier() 3529 * rcu_barrier()
3527 */ 3530 */
3528 call_rcu(&freeinit->rcu, do_free_init); 3531 if (llist_add(&freeinit->node, &init_free_list))
3532 schedule_work(&init_free_wq);
3533
3529 mutex_unlock(&module_mutex); 3534 mutex_unlock(&module_mutex);
3530 wake_up_all(&module_wq); 3535 wake_up_all(&module_wq);
3531 3536
@@ -3822,10 +3827,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
3822 module_bug_cleanup(mod); 3827 module_bug_cleanup(mod);
3823 mutex_unlock(&module_mutex); 3828 mutex_unlock(&module_mutex);
3824 3829
3825 /* we can't deallocate the module until we clear memory protection */
3826 module_disable_ro(mod);
3827 module_disable_nx(mod);
3828
3829 ddebug_cleanup: 3830 ddebug_cleanup:
3830 ftrace_release_mod(mod); 3831 ftrace_release_mod(mod);
3831 dynamic_debug_remove(mod, info->debug); 3832 dynamic_debug_remove(mod, info->debug);