aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/module.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 5184877ce98a..d1a161be7b04 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2940,7 +2940,6 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
2940{ 2940{
2941 /* Module within temporary copy. */ 2941 /* Module within temporary copy. */
2942 struct module *mod; 2942 struct module *mod;
2943 Elf_Shdr *pcpusec;
2944 int err; 2943 int err;
2945 2944
2946 mod = setup_load_info(info, flags); 2945 mod = setup_load_info(info, flags);
@@ -2955,17 +2954,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
2955 err = module_frob_arch_sections(info->hdr, info->sechdrs, 2954 err = module_frob_arch_sections(info->hdr, info->sechdrs,
2956 info->secstrings, mod); 2955 info->secstrings, mod);
2957 if (err < 0) 2956 if (err < 0)
2958 goto out; 2957 return ERR_PTR(err);
2959 2958
2960 pcpusec = &info->sechdrs[info->index.pcpu]; 2959 /* We will do a special allocation for per-cpu sections later. */
2961 if (pcpusec->sh_size) { 2960 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
2962 /* We have a special allocation for this section. */
2963 err = percpu_modalloc(mod,
2964 pcpusec->sh_size, pcpusec->sh_addralign);
2965 if (err)
2966 goto out;
2967 pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC;
2968 }
2969 2961
2970 /* Determine total sizes, and put offsets in sh_entsize. For now 2962 /* Determine total sizes, and put offsets in sh_entsize. For now
2971 this is done generically; there doesn't appear to be any 2963 this is done generically; there doesn't appear to be any
@@ -2976,17 +2968,22 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
2976 /* Allocate and move to the final place */ 2968 /* Allocate and move to the final place */
2977 err = move_module(mod, info); 2969 err = move_module(mod, info);
2978 if (err) 2970 if (err)
2979 goto free_percpu; 2971 return ERR_PTR(err);
2980 2972
2981 /* Module has been copied to its final place now: return it. */ 2973 /* Module has been copied to its final place now: return it. */
2982 mod = (void *)info->sechdrs[info->index.mod].sh_addr; 2974 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2983 kmemleak_load_module(mod, info); 2975 kmemleak_load_module(mod, info);
2984 return mod; 2976 return mod;
2977}
2985 2978
2986free_percpu: 2979static int alloc_module_percpu(struct module *mod, struct load_info *info)
2987 percpu_modfree(mod); 2980{
2988out: 2981 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
2989 return ERR_PTR(err); 2982 if (!pcpusec->sh_size)
2983 return 0;
2984
2985 /* We have a special allocation for this section. */
2986 return percpu_modalloc(mod, pcpusec->sh_size, pcpusec->sh_addralign);
2990} 2987}
2991 2988
2992/* mod is no longer valid after this! */ 2989/* mod is no longer valid after this! */
@@ -3262,6 +3259,11 @@ static int load_module(struct load_info *info, const char __user *uargs,
3262 } 3259 }
3263#endif 3260#endif
3264 3261
3262 /* To avoid stressing percpu allocator, do this once we're unique. */
3263 err = alloc_module_percpu(mod, info);
3264 if (err)
3265 goto unlink_mod;
3266
3265 /* Now module is in final location, initialize linked lists, etc. */ 3267 /* Now module is in final location, initialize linked lists, etc. */
3266 err = module_unload_init(mod); 3268 err = module_unload_init(mod);
3267 if (err) 3269 if (err)