aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 23:55:41 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 23:55:41 -0500
commitd790be3863b28fd22e0781c1a3ddefcbfd5f7086 (patch)
tree56a9f83b66f336df73ff81d13a14a2d63ed4c0db
parent64ec45bff6b3dade2643ed4c0f688a15ecf46ea2 (diff)
parentb0a65b0cccd477b2fd8b7adad0ac39433df54829 (diff)
Merge tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull module updates from Rusty Russell: "The exciting thing here is the getting rid of stop_machine on module removal. This is possible by using a simple atomic_t for the counter, rather than our fancy per-cpu counter: it turns out that no one is doing a module increment per net packet, so the slowdown should be in the noise" * tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: param: do not set store func without write perm params: cleanup sysfs allocation kernel:module Fix coding style errors and warnings. module: Remove stop_machine from module unloading module: Replace module_ref with atomic_t refcnt lib/bug: Use RCU list ops for module_bug_list module: Unlink module with RCU synchronizing instead of stop_machine module: Wait for RCU synchronizing before releasing a module
-rw-r--r--include/linux/module.h16
-rw-r--r--include/trace/events/module.h2
-rw-r--r--kernel/module.c170
-rw-r--r--kernel/params.c97
-rw-r--r--lib/bug.c20
5 files changed, 137 insertions, 168 deletions
diff --git a/include/linux/module.h b/include/linux/module.h
index 71f282a4e307..ebfb0e153c6a 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -210,20 +210,6 @@ enum module_state {
210 MODULE_STATE_UNFORMED, /* Still setting it up. */ 210 MODULE_STATE_UNFORMED, /* Still setting it up. */
211}; 211};
212 212
213/**
214 * struct module_ref - per cpu module reference counts
215 * @incs: number of module get on this cpu
216 * @decs: number of module put on this cpu
217 *
218 * We force an alignment on 8 or 16 bytes, so that alloc_percpu()
219 * put @incs/@decs in same cache line, with no extra memory cost,
220 * since alloc_percpu() is fine grained.
221 */
222struct module_ref {
223 unsigned long incs;
224 unsigned long decs;
225} __attribute((aligned(2 * sizeof(unsigned long))));
226
227struct module { 213struct module {
228 enum module_state state; 214 enum module_state state;
229 215
@@ -367,7 +353,7 @@ struct module {
367 /* Destruction function. */ 353 /* Destruction function. */
368 void (*exit)(void); 354 void (*exit)(void);
369 355
370 struct module_ref __percpu *refptr; 356 atomic_t refcnt;
371#endif 357#endif
372 358
373#ifdef CONFIG_CONSTRUCTORS 359#ifdef CONFIG_CONSTRUCTORS
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 7c5cbfe3fc49..81c4c183d348 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -80,7 +80,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
80 80
81 TP_fast_assign( 81 TP_fast_assign(
82 __entry->ip = ip; 82 __entry->ip = ip;
83 __entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs); 83 __entry->refcnt = atomic_read(&mod->refcnt);
84 __assign_str(name, mod->name); 84 __assign_str(name, mod->name);
85 ), 85 ),
86 86
diff --git a/kernel/module.c b/kernel/module.c
index e52a8739361a..3965511ae133 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -42,7 +42,6 @@
42#include <linux/vermagic.h> 42#include <linux/vermagic.h>
43#include <linux/notifier.h> 43#include <linux/notifier.h>
44#include <linux/sched.h> 44#include <linux/sched.h>
45#include <linux/stop_machine.h>
46#include <linux/device.h> 45#include <linux/device.h>
47#include <linux/string.h> 46#include <linux/string.h>
48#include <linux/mutex.h> 47#include <linux/mutex.h>
@@ -98,7 +97,7 @@
98 * 1) List of modules (also safely readable with preempt_disable), 97 * 1) List of modules (also safely readable with preempt_disable),
99 * 2) module_use links, 98 * 2) module_use links,
100 * 3) module_addr_min/module_addr_max. 99 * 3) module_addr_min/module_addr_max.
101 * (delete uses stop_machine/add uses RCU list operations). */ 100 * (delete and add uses RCU list operations). */
102DEFINE_MUTEX(module_mutex); 101DEFINE_MUTEX(module_mutex);
103EXPORT_SYMBOL_GPL(module_mutex); 102EXPORT_SYMBOL_GPL(module_mutex);
104static LIST_HEAD(modules); 103static LIST_HEAD(modules);
@@ -158,13 +157,13 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
158 * Protected by module_mutex. */ 157 * Protected by module_mutex. */
159static unsigned long module_addr_min = -1UL, module_addr_max = 0; 158static unsigned long module_addr_min = -1UL, module_addr_max = 0;
160 159
161int register_module_notifier(struct notifier_block * nb) 160int register_module_notifier(struct notifier_block *nb)
162{ 161{
163 return blocking_notifier_chain_register(&module_notify_list, nb); 162 return blocking_notifier_chain_register(&module_notify_list, nb);
164} 163}
165EXPORT_SYMBOL(register_module_notifier); 164EXPORT_SYMBOL(register_module_notifier);
166 165
167int unregister_module_notifier(struct notifier_block * nb) 166int unregister_module_notifier(struct notifier_block *nb)
168{ 167{
169 return blocking_notifier_chain_unregister(&module_notify_list, nb); 168 return blocking_notifier_chain_unregister(&module_notify_list, nb);
170} 169}
@@ -628,18 +627,23 @@ static char last_unloaded_module[MODULE_NAME_LEN+1];
628 627
629EXPORT_TRACEPOINT_SYMBOL(module_get); 628EXPORT_TRACEPOINT_SYMBOL(module_get);
630 629
630/* MODULE_REF_BASE is the base reference count by kmodule loader. */
631#define MODULE_REF_BASE 1
632
631/* Init the unload section of the module. */ 633/* Init the unload section of the module. */
632static int module_unload_init(struct module *mod) 634static int module_unload_init(struct module *mod)
633{ 635{
634 mod->refptr = alloc_percpu(struct module_ref); 636 /*
635 if (!mod->refptr) 637 * Initialize reference counter to MODULE_REF_BASE.
636 return -ENOMEM; 638 * refcnt == 0 means module is going.
639 */
640 atomic_set(&mod->refcnt, MODULE_REF_BASE);
637 641
638 INIT_LIST_HEAD(&mod->source_list); 642 INIT_LIST_HEAD(&mod->source_list);
639 INIT_LIST_HEAD(&mod->target_list); 643 INIT_LIST_HEAD(&mod->target_list);
640 644
641 /* Hold reference count during initialization. */ 645 /* Hold reference count during initialization. */
642 raw_cpu_write(mod->refptr->incs, 1); 646 atomic_inc(&mod->refcnt);
643 647
644 return 0; 648 return 0;
645} 649}
@@ -721,8 +725,6 @@ static void module_unload_free(struct module *mod)
721 kfree(use); 725 kfree(use);
722 } 726 }
723 mutex_unlock(&module_mutex); 727 mutex_unlock(&module_mutex);
724
725 free_percpu(mod->refptr);
726} 728}
727 729
728#ifdef CONFIG_MODULE_FORCE_UNLOAD 730#ifdef CONFIG_MODULE_FORCE_UNLOAD
@@ -740,60 +742,39 @@ static inline int try_force_unload(unsigned int flags)
740} 742}
741#endif /* CONFIG_MODULE_FORCE_UNLOAD */ 743#endif /* CONFIG_MODULE_FORCE_UNLOAD */
742 744
743struct stopref 745/* Try to release refcount of module, 0 means success. */
746static int try_release_module_ref(struct module *mod)
744{ 747{
745 struct module *mod; 748 int ret;
746 int flags;
747 int *forced;
748};
749 749
750/* Whole machine is stopped with interrupts off when this runs. */ 750 /* Try to decrement refcnt which we set at loading */
751static int __try_stop_module(void *_sref) 751 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
752{ 752 BUG_ON(ret < 0);
753 struct stopref *sref = _sref; 753 if (ret)
754 /* Someone can put this right now, recover with checking */
755 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
756
757 return ret;
758}
754 759
760static int try_stop_module(struct module *mod, int flags, int *forced)
761{
755 /* If it's not unused, quit unless we're forcing. */ 762 /* If it's not unused, quit unless we're forcing. */
756 if (module_refcount(sref->mod) != 0) { 763 if (try_release_module_ref(mod) != 0) {
757 if (!(*sref->forced = try_force_unload(sref->flags))) 764 *forced = try_force_unload(flags);
765 if (!(*forced))
758 return -EWOULDBLOCK; 766 return -EWOULDBLOCK;
759 } 767 }
760 768
761 /* Mark it as dying. */ 769 /* Mark it as dying. */
762 sref->mod->state = MODULE_STATE_GOING; 770 mod->state = MODULE_STATE_GOING;
763 return 0;
764}
765
766static int try_stop_module(struct module *mod, int flags, int *forced)
767{
768 struct stopref sref = { mod, flags, forced };
769 771
770 return stop_machine(__try_stop_module, &sref, NULL); 772 return 0;
771} 773}
772 774
773unsigned long module_refcount(struct module *mod) 775unsigned long module_refcount(struct module *mod)
774{ 776{
775 unsigned long incs = 0, decs = 0; 777 return (unsigned long)atomic_read(&mod->refcnt) - MODULE_REF_BASE;
776 int cpu;
777
778 for_each_possible_cpu(cpu)
779 decs += per_cpu_ptr(mod->refptr, cpu)->decs;
780 /*
781 * ensure the incs are added up after the decs.
782 * module_put ensures incs are visible before decs with smp_wmb.
783 *
784 * This 2-count scheme avoids the situation where the refcount
785 * for CPU0 is read, then CPU0 increments the module refcount,
786 * then CPU1 drops that refcount, then the refcount for CPU1 is
787 * read. We would record a decrement but not its corresponding
788 * increment so we would see a low count (disaster).
789 *
790 * Rare situation? But module_refcount can be preempted, and we
791 * might be tallying up 4096+ CPUs. So it is not impossible.
792 */
793 smp_rmb();
794 for_each_possible_cpu(cpu)
795 incs += per_cpu_ptr(mod->refptr, cpu)->incs;
796 return incs - decs;
797} 778}
798EXPORT_SYMBOL(module_refcount); 779EXPORT_SYMBOL(module_refcount);
799 780
@@ -877,8 +858,10 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod)
877 858
878 seq_printf(m, " %lu ", module_refcount(mod)); 859 seq_printf(m, " %lu ", module_refcount(mod));
879 860
880 /* Always include a trailing , so userspace can differentiate 861 /*
881 between this and the old multi-field proc format. */ 862 * Always include a trailing , so userspace can differentiate
863 * between this and the old multi-field proc format.
864 */
882 list_for_each_entry(use, &mod->source_list, source_list) { 865 list_for_each_entry(use, &mod->source_list, source_list) {
883 printed_something = 1; 866 printed_something = 1;
884 seq_printf(m, "%s,", use->source->name); 867 seq_printf(m, "%s,", use->source->name);
@@ -886,11 +869,11 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod)
886 869
887 if (mod->init != NULL && mod->exit == NULL) { 870 if (mod->init != NULL && mod->exit == NULL) {
888 printed_something = 1; 871 printed_something = 1;
889 seq_printf(m, "[permanent],"); 872 seq_puts(m, "[permanent],");
890 } 873 }
891 874
892 if (!printed_something) 875 if (!printed_something)
893 seq_printf(m, "-"); 876 seq_puts(m, "-");
894} 877}
895 878
896void __symbol_put(const char *symbol) 879void __symbol_put(const char *symbol)
@@ -935,7 +918,7 @@ void __module_get(struct module *module)
935{ 918{
936 if (module) { 919 if (module) {
937 preempt_disable(); 920 preempt_disable();
938 __this_cpu_inc(module->refptr->incs); 921 atomic_inc(&module->refcnt);
939 trace_module_get(module, _RET_IP_); 922 trace_module_get(module, _RET_IP_);
940 preempt_enable(); 923 preempt_enable();
941 } 924 }
@@ -948,11 +931,11 @@ bool try_module_get(struct module *module)
948 931
949 if (module) { 932 if (module) {
950 preempt_disable(); 933 preempt_disable();
951 934 /* Note: here, we can fail to get a reference */
952 if (likely(module_is_live(module))) { 935 if (likely(module_is_live(module) &&
953 __this_cpu_inc(module->refptr->incs); 936 atomic_inc_not_zero(&module->refcnt) != 0))
954 trace_module_get(module, _RET_IP_); 937 trace_module_get(module, _RET_IP_);
955 } else 938 else
956 ret = false; 939 ret = false;
957 940
958 preempt_enable(); 941 preempt_enable();
@@ -963,11 +946,12 @@ EXPORT_SYMBOL(try_module_get);
963 946
964void module_put(struct module *module) 947void module_put(struct module *module)
965{ 948{
949 int ret;
950
966 if (module) { 951 if (module) {
967 preempt_disable(); 952 preempt_disable();
968 smp_wmb(); /* see comment in module_refcount */ 953 ret = atomic_dec_if_positive(&module->refcnt);
969 __this_cpu_inc(module->refptr->decs); 954 WARN_ON(ret < 0); /* Failed to put refcount */
970
971 trace_module_put(module, _RET_IP_); 955 trace_module_put(module, _RET_IP_);
972 preempt_enable(); 956 preempt_enable();
973 } 957 }
@@ -978,7 +962,7 @@ EXPORT_SYMBOL(module_put);
978static inline void print_unload_info(struct seq_file *m, struct module *mod) 962static inline void print_unload_info(struct seq_file *m, struct module *mod)
979{ 963{
980 /* We don't know the usage count, or what modules are using. */ 964 /* We don't know the usage count, or what modules are using. */
981 seq_printf(m, " - -"); 965 seq_puts(m, " - -");
982} 966}
983 967
984static inline void module_unload_free(struct module *mod) 968static inline void module_unload_free(struct module *mod)
@@ -1131,7 +1115,7 @@ static unsigned long maybe_relocated(unsigned long crc,
1131static int check_version(Elf_Shdr *sechdrs, 1115static int check_version(Elf_Shdr *sechdrs,
1132 unsigned int versindex, 1116 unsigned int versindex,
1133 const char *symname, 1117 const char *symname,
1134 struct module *mod, 1118 struct module *mod,
1135 const unsigned long *crc, 1119 const unsigned long *crc,
1136 const struct module *crc_owner) 1120 const struct module *crc_owner)
1137{ 1121{
@@ -1165,7 +1149,7 @@ static int check_version(Elf_Shdr *sechdrs,
1165 return 0; 1149 return 0;
1166 1150
1167bad_version: 1151bad_version:
1168 printk("%s: disagrees about version of symbol %s\n", 1152 pr_warn("%s: disagrees about version of symbol %s\n",
1169 mod->name, symname); 1153 mod->name, symname);
1170 return 0; 1154 return 0;
1171} 1155}
@@ -1200,7 +1184,7 @@ static inline int same_magic(const char *amagic, const char *bmagic,
1200static inline int check_version(Elf_Shdr *sechdrs, 1184static inline int check_version(Elf_Shdr *sechdrs,
1201 unsigned int versindex, 1185 unsigned int versindex,
1202 const char *symname, 1186 const char *symname,
1203 struct module *mod, 1187 struct module *mod,
1204 const unsigned long *crc, 1188 const unsigned long *crc,
1205 const struct module *crc_owner) 1189 const struct module *crc_owner)
1206{ 1190{
@@ -1288,15 +1272,13 @@ static inline bool sect_empty(const Elf_Shdr *sect)
1288 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; 1272 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1289} 1273}
1290 1274
1291struct module_sect_attr 1275struct module_sect_attr {
1292{
1293 struct module_attribute mattr; 1276 struct module_attribute mattr;
1294 char *name; 1277 char *name;
1295 unsigned long address; 1278 unsigned long address;
1296}; 1279};
1297 1280
1298struct module_sect_attrs 1281struct module_sect_attrs {
1299{
1300 struct attribute_group grp; 1282 struct attribute_group grp;
1301 unsigned int nsections; 1283 unsigned int nsections;
1302 struct module_sect_attr attrs[0]; 1284 struct module_sect_attr attrs[0];
@@ -1550,7 +1532,8 @@ static int module_add_modinfo_attrs(struct module *mod)
1550 (attr->test && attr->test(mod))) { 1532 (attr->test && attr->test(mod))) {
1551 memcpy(temp_attr, attr, sizeof(*temp_attr)); 1533 memcpy(temp_attr, attr, sizeof(*temp_attr));
1552 sysfs_attr_init(&temp_attr->attr); 1534 sysfs_attr_init(&temp_attr->attr);
1553 error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr); 1535 error = sysfs_create_file(&mod->mkobj.kobj,
1536 &temp_attr->attr);
1554 ++temp_attr; 1537 ++temp_attr;
1555 } 1538 }
1556 } 1539 }
@@ -1566,7 +1549,7 @@ static void module_remove_modinfo_attrs(struct module *mod)
1566 /* pick a field to test for end of list */ 1549 /* pick a field to test for end of list */
1567 if (!attr->attr.name) 1550 if (!attr->attr.name)
1568 break; 1551 break;
1569 sysfs_remove_file(&mod->mkobj.kobj,&attr->attr); 1552 sysfs_remove_file(&mod->mkobj.kobj, &attr->attr);
1570 if (attr->free) 1553 if (attr->free)
1571 attr->free(mod); 1554 attr->free(mod);
1572 } 1555 }
@@ -1697,18 +1680,6 @@ static void mod_sysfs_teardown(struct module *mod)
1697 mod_sysfs_fini(mod); 1680 mod_sysfs_fini(mod);
1698} 1681}
1699 1682
1700/*
1701 * unlink the module with the whole machine is stopped with interrupts off
1702 * - this defends against kallsyms not taking locks
1703 */
1704static int __unlink_module(void *_mod)
1705{
1706 struct module *mod = _mod;
1707 list_del(&mod->list);
1708 module_bug_cleanup(mod);
1709 return 0;
1710}
1711
1712#ifdef CONFIG_DEBUG_SET_MODULE_RONX 1683#ifdef CONFIG_DEBUG_SET_MODULE_RONX
1713/* 1684/*
1714 * LKM RO/NX protection: protect module's text/ro-data 1685 * LKM RO/NX protection: protect module's text/ro-data
@@ -1860,7 +1831,12 @@ static void free_module(struct module *mod)
1860 1831
1861 /* Now we can delete it from the lists */ 1832 /* Now we can delete it from the lists */
1862 mutex_lock(&module_mutex); 1833 mutex_lock(&module_mutex);
1863 stop_machine(__unlink_module, mod, NULL); 1834 /* Unlink carefully: kallsyms could be walking list. */
1835 list_del_rcu(&mod->list);
1836 /* Remove this module from bug list, this uses list_del_rcu */
1837 module_bug_cleanup(mod);
1838 /* Wait for RCU synchronizing before releasing mod->list and buglist. */
1839 synchronize_rcu();
1864 mutex_unlock(&module_mutex); 1840 mutex_unlock(&module_mutex);
1865 1841
1866 /* This may be NULL, but that's OK */ 1842 /* This may be NULL, but that's OK */
@@ -1955,7 +1931,7 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
1955 /* We compiled with -fno-common. These are not 1931 /* We compiled with -fno-common. These are not
1956 supposed to happen. */ 1932 supposed to happen. */
1957 pr_debug("Common symbol: %s\n", name); 1933 pr_debug("Common symbol: %s\n", name);
1958 printk("%s: please compile with -fno-common\n", 1934 pr_warn("%s: please compile with -fno-common\n",
1959 mod->name); 1935 mod->name);
1960 ret = -ENOEXEC; 1936 ret = -ENOEXEC;
1961 break; 1937 break;
@@ -2259,7 +2235,7 @@ static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2259} 2235}
2260 2236
2261static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, 2237static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2262 unsigned int shnum) 2238 unsigned int shnum)
2263{ 2239{
2264 const Elf_Shdr *sec; 2240 const Elf_Shdr *sec;
2265 2241
@@ -2735,7 +2711,7 @@ static int find_module_sections(struct module *mod, struct load_info *info)
2735 * This shouldn't happen with same compiler and binutils 2711 * This shouldn't happen with same compiler and binutils
2736 * building all parts of the module. 2712 * building all parts of the module.
2737 */ 2713 */
2738 printk(KERN_WARNING "%s: has both .ctors and .init_array.\n", 2714 pr_warn("%s: has both .ctors and .init_array.\n",
2739 mod->name); 2715 mod->name);
2740 return -EINVAL; 2716 return -EINVAL;
2741 } 2717 }
@@ -3023,8 +2999,10 @@ static int do_init_module(struct module *mod)
3023 if (mod->init != NULL) 2999 if (mod->init != NULL)
3024 ret = do_one_initcall(mod->init); 3000 ret = do_one_initcall(mod->init);
3025 if (ret < 0) { 3001 if (ret < 0) {
3026 /* Init routine failed: abort. Try to protect us from 3002 /*
3027 buggy refcounters. */ 3003 * Init routine failed: abort. Try to protect us from
3004 * buggy refcounters.
3005 */
3028 mod->state = MODULE_STATE_GOING; 3006 mod->state = MODULE_STATE_GOING;
3029 synchronize_sched(); 3007 synchronize_sched();
3030 module_put(mod); 3008 module_put(mod);
@@ -3202,7 +3180,7 @@ out:
3202 3180
3203static int unknown_module_param_cb(char *param, char *val, const char *modname) 3181static int unknown_module_param_cb(char *param, char *val, const char *modname)
3204{ 3182{
3205 /* Check for magic 'dyndbg' arg */ 3183 /* Check for magic 'dyndbg' arg */
3206 int ret = ddebug_dyndbg_module_param_cb(param, val, modname); 3184 int ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3207 if (ret != 0) 3185 if (ret != 0)
3208 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); 3186 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
@@ -3352,6 +3330,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
3352 /* Unlink carefully: kallsyms could be walking list. */ 3330 /* Unlink carefully: kallsyms could be walking list. */
3353 list_del_rcu(&mod->list); 3331 list_del_rcu(&mod->list);
3354 wake_up_all(&module_wq); 3332 wake_up_all(&module_wq);
3333 /* Wait for RCU synchronizing before releasing mod->list. */
3334 synchronize_rcu();
3355 mutex_unlock(&module_mutex); 3335 mutex_unlock(&module_mutex);
3356 free_module: 3336 free_module:
3357 module_deallocate(mod, info); 3337 module_deallocate(mod, info);
@@ -3685,8 +3665,8 @@ static int m_show(struct seq_file *m, void *p)
3685 3665
3686 /* Informative for users. */ 3666 /* Informative for users. */
3687 seq_printf(m, " %s", 3667 seq_printf(m, " %s",
3688 mod->state == MODULE_STATE_GOING ? "Unloading": 3668 mod->state == MODULE_STATE_GOING ? "Unloading" :
3689 mod->state == MODULE_STATE_COMING ? "Loading": 3669 mod->state == MODULE_STATE_COMING ? "Loading" :
3690 "Live"); 3670 "Live");
3691 /* Used by oprofile and other similar tools. */ 3671 /* Used by oprofile and other similar tools. */
3692 seq_printf(m, " 0x%pK", mod->module_core); 3672 seq_printf(m, " 0x%pK", mod->module_core);
@@ -3695,7 +3675,7 @@ static int m_show(struct seq_file *m, void *p)
3695 if (mod->taints) 3675 if (mod->taints)
3696 seq_printf(m, " %s", module_flags(mod, buf)); 3676 seq_printf(m, " %s", module_flags(mod, buf));
3697 3677
3698 seq_printf(m, "\n"); 3678 seq_puts(m, "\n");
3699 return 0; 3679 return 0;
3700} 3680}
3701 3681
diff --git a/kernel/params.c b/kernel/params.c
index db97b791390f..0af9b2c4e56c 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -603,74 +603,67 @@ static __modinit int add_sysfs_param(struct module_kobject *mk,
603 const struct kernel_param *kp, 603 const struct kernel_param *kp,
604 const char *name) 604 const char *name)
605{ 605{
606 struct module_param_attrs *new; 606 struct module_param_attrs *new_mp;
607 struct attribute **attrs; 607 struct attribute **new_attrs;
608 int err, num; 608 unsigned int i;
609 609
610 /* We don't bother calling this with invisible parameters. */ 610 /* We don't bother calling this with invisible parameters. */
611 BUG_ON(!kp->perm); 611 BUG_ON(!kp->perm);
612 612
613 if (!mk->mp) { 613 if (!mk->mp) {
614 num = 0; 614 /* First allocation. */
615 attrs = NULL; 615 mk->mp = kzalloc(sizeof(*mk->mp), GFP_KERNEL);
616 } else { 616 if (!mk->mp)
617 num = mk->mp->num; 617 return -ENOMEM;
618 attrs = mk->mp->grp.attrs; 618 mk->mp->grp.name = "parameters";
619 /* NULL-terminated attribute array. */
620 mk->mp->grp.attrs = kzalloc(sizeof(mk->mp->grp.attrs[0]),
621 GFP_KERNEL);
622 /* Caller will cleanup via free_module_param_attrs */
623 if (!mk->mp->grp.attrs)
624 return -ENOMEM;
619 } 625 }
620 626
621 /* Enlarge. */ 627 /* Enlarge allocations. */
622 new = krealloc(mk->mp, 628 new_mp = krealloc(mk->mp,
623 sizeof(*mk->mp) + sizeof(mk->mp->attrs[0]) * (num+1), 629 sizeof(*mk->mp) +
624 GFP_KERNEL); 630 sizeof(mk->mp->attrs[0]) * (mk->mp->num + 1),
625 if (!new) { 631 GFP_KERNEL);
626 kfree(attrs); 632 if (!new_mp)
627 err = -ENOMEM; 633 return -ENOMEM;
628 goto fail; 634 mk->mp = new_mp;
629 }
630 /* Despite looking like the typical realloc() bug, this is safe.
631 * We *want* the old 'attrs' to be freed either way, and we'll store
632 * the new one in the success case. */
633 attrs = krealloc(attrs, sizeof(new->grp.attrs[0])*(num+2), GFP_KERNEL);
634 if (!attrs) {
635 err = -ENOMEM;
636 goto fail_free_new;
637 }
638 635
639 /* Sysfs wants everything zeroed. */ 636 /* Extra pointer for NULL terminator */
640 memset(new, 0, sizeof(*new)); 637 new_attrs = krealloc(mk->mp->grp.attrs,
641 memset(&new->attrs[num], 0, sizeof(new->attrs[num])); 638 sizeof(mk->mp->grp.attrs[0]) * (mk->mp->num + 2),
642 memset(&attrs[num], 0, sizeof(attrs[num])); 639 GFP_KERNEL);
643 new->grp.name = "parameters"; 640 if (!new_attrs)
644 new->grp.attrs = attrs; 641 return -ENOMEM;
642 mk->mp->grp.attrs = new_attrs;
645 643
646 /* Tack new one on the end. */ 644 /* Tack new one on the end. */
647 sysfs_attr_init(&new->attrs[num].mattr.attr); 645 sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr);
648 new->attrs[num].param = kp; 646 mk->mp->attrs[mk->mp->num].param = kp;
649 new->attrs[num].mattr.show = param_attr_show; 647 mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show;
650 new->attrs[num].mattr.store = param_attr_store; 648 /* Do not allow runtime DAC changes to make param writable. */
651 new->attrs[num].mattr.attr.name = (char *)name; 649 if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0)
652 new->attrs[num].mattr.attr.mode = kp->perm; 650 mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store;
653 new->num = num+1; 651 mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name;
652 mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm;
653 mk->mp->num++;
654 654
655 /* Fix up all the pointers, since krealloc can move us */ 655 /* Fix up all the pointers, since krealloc can move us */
656 for (num = 0; num < new->num; num++) 656 for (i = 0; i < mk->mp->num; i++)
657 new->grp.attrs[num] = &new->attrs[num].mattr.attr; 657 mk->mp->grp.attrs[i] = &mk->mp->attrs[i].mattr.attr;
658 new->grp.attrs[num] = NULL; 658 mk->mp->grp.attrs[mk->mp->num] = NULL;
659
660 mk->mp = new;
661 return 0; 659 return 0;
662
663fail_free_new:
664 kfree(new);
665fail:
666 mk->mp = NULL;
667 return err;
668} 660}
669 661
670#ifdef CONFIG_MODULES 662#ifdef CONFIG_MODULES
671static void free_module_param_attrs(struct module_kobject *mk) 663static void free_module_param_attrs(struct module_kobject *mk)
672{ 664{
673 kfree(mk->mp->grp.attrs); 665 if (mk->mp)
666 kfree(mk->mp->grp.attrs);
674 kfree(mk->mp); 667 kfree(mk->mp);
675 mk->mp = NULL; 668 mk->mp = NULL;
676} 669}
@@ -695,8 +688,10 @@ int module_param_sysfs_setup(struct module *mod,
695 if (kparam[i].perm == 0) 688 if (kparam[i].perm == 0)
696 continue; 689 continue;
697 err = add_sysfs_param(&mod->mkobj, &kparam[i], kparam[i].name); 690 err = add_sysfs_param(&mod->mkobj, &kparam[i], kparam[i].name);
698 if (err) 691 if (err) {
692 free_module_param_attrs(&mod->mkobj);
699 return err; 693 return err;
694 }
700 params = true; 695 params = true;
701 } 696 }
702 697
diff --git a/lib/bug.c b/lib/bug.c
index d1d7c7878900..0c3bd9552b6f 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -64,16 +64,22 @@ static LIST_HEAD(module_bug_list);
64static const struct bug_entry *module_find_bug(unsigned long bugaddr) 64static const struct bug_entry *module_find_bug(unsigned long bugaddr)
65{ 65{
66 struct module *mod; 66 struct module *mod;
67 const struct bug_entry *bug = NULL;
67 68
68 list_for_each_entry(mod, &module_bug_list, bug_list) { 69 rcu_read_lock();
69 const struct bug_entry *bug = mod->bug_table; 70 list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
70 unsigned i; 71 unsigned i;
71 72
73 bug = mod->bug_table;
72 for (i = 0; i < mod->num_bugs; ++i, ++bug) 74 for (i = 0; i < mod->num_bugs; ++i, ++bug)
73 if (bugaddr == bug_addr(bug)) 75 if (bugaddr == bug_addr(bug))
74 return bug; 76 goto out;
75 } 77 }
76 return NULL; 78 bug = NULL;
79out:
80 rcu_read_unlock();
81
82 return bug;
77} 83}
78 84
79void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, 85void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
@@ -99,13 +105,15 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
99 * Strictly speaking this should have a spinlock to protect against 105 * Strictly speaking this should have a spinlock to protect against
100 * traversals, but since we only traverse on BUG()s, a spinlock 106 * traversals, but since we only traverse on BUG()s, a spinlock
101 * could potentially lead to deadlock and thus be counter-productive. 107 * could potentially lead to deadlock and thus be counter-productive.
108 * Thus, this uses RCU to safely manipulate the bug list, since BUG
109 * must run in non-interruptive state.
102 */ 110 */
103 list_add(&mod->bug_list, &module_bug_list); 111 list_add_rcu(&mod->bug_list, &module_bug_list);
104} 112}
105 113
106void module_bug_cleanup(struct module *mod) 114void module_bug_cleanup(struct module *mod)
107{ 115{
108 list_del(&mod->bug_list); 116 list_del_rcu(&mod->bug_list);
109} 117}
110 118
111#else 119#else