aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-26 00:06:30 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-26 00:06:30 -0500
commit422d26b6ecd77af8c77f2a40580679459825170f (patch)
tree632e690e458fb7b27db200cd6fcd5429e143e419 /kernel
parent4c271bb67c04253c1e99006eb48fb773a8fe8c0f (diff)
parent949db153b6466c6f7cad5a427ecea94985927311 (diff)
Merge 3.8-rc5 into driver-core-next
This resolves a gpio driver merge issue pointed out in linux-next. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/async.c27
-rw-r--r--kernel/compat.c23
-rw-r--r--kernel/debug/kdb/kdb_main.c2
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/module.c154
-rw-r--r--kernel/ptrace.c74
-rw-r--r--kernel/sched/core.c3
-rw-r--r--kernel/signal.c24
-rw-r--r--kernel/trace/ftrace.c2
9 files changed, 223 insertions, 92 deletions
diff --git a/kernel/async.c b/kernel/async.c
index a1d585c351d6..6f34904a0b53 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -86,18 +86,27 @@ static atomic_t entry_count;
86 */ 86 */
87static async_cookie_t __lowest_in_progress(struct async_domain *running) 87static async_cookie_t __lowest_in_progress(struct async_domain *running)
88{ 88{
89 async_cookie_t first_running = next_cookie; /* infinity value */
90 async_cookie_t first_pending = next_cookie; /* ditto */
89 struct async_entry *entry; 91 struct async_entry *entry;
90 92
93 /*
94 * Both running and pending lists are sorted but not disjoint.
95 * Take the first cookies from both and return the min.
96 */
91 if (!list_empty(&running->domain)) { 97 if (!list_empty(&running->domain)) {
92 entry = list_first_entry(&running->domain, typeof(*entry), list); 98 entry = list_first_entry(&running->domain, typeof(*entry), list);
93 return entry->cookie; 99 first_running = entry->cookie;
94 } 100 }
95 101
96 list_for_each_entry(entry, &async_pending, list) 102 list_for_each_entry(entry, &async_pending, list) {
97 if (entry->running == running) 103 if (entry->running == running) {
98 return entry->cookie; 104 first_pending = entry->cookie;
105 break;
106 }
107 }
99 108
100 return next_cookie; /* "infinity" value */ 109 return min(first_running, first_pending);
101} 110}
102 111
103static async_cookie_t lowest_in_progress(struct async_domain *running) 112static async_cookie_t lowest_in_progress(struct async_domain *running)
@@ -118,13 +127,17 @@ static void async_run_entry_fn(struct work_struct *work)
118{ 127{
119 struct async_entry *entry = 128 struct async_entry *entry =
120 container_of(work, struct async_entry, work); 129 container_of(work, struct async_entry, work);
130 struct async_entry *pos;
121 unsigned long flags; 131 unsigned long flags;
122 ktime_t uninitialized_var(calltime), delta, rettime; 132 ktime_t uninitialized_var(calltime), delta, rettime;
123 struct async_domain *running = entry->running; 133 struct async_domain *running = entry->running;
124 134
125 /* 1) move self to the running queue */ 135 /* 1) move self to the running queue, make sure it stays sorted */
126 spin_lock_irqsave(&async_lock, flags); 136 spin_lock_irqsave(&async_lock, flags);
127 list_move_tail(&entry->list, &running->domain); 137 list_for_each_entry_reverse(pos, &running->domain, list)
138 if (entry->cookie < pos->cookie)
139 break;
140 list_move_tail(&entry->list, &pos->list);
128 spin_unlock_irqrestore(&async_lock, flags); 141 spin_unlock_irqrestore(&async_lock, flags);
129 142
130 /* 2) run (and print duration) */ 143 /* 2) run (and print duration) */
diff --git a/kernel/compat.c b/kernel/compat.c
index f6150e92dfc9..36700e9e2be9 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -535,9 +535,11 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
535 return 0; 535 return 0;
536} 536}
537 537
538asmlinkage long 538COMPAT_SYSCALL_DEFINE4(wait4,
539compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, 539 compat_pid_t, pid,
540 struct compat_rusage __user *ru) 540 compat_uint_t __user *, stat_addr,
541 int, options,
542 struct compat_rusage __user *, ru)
541{ 543{
542 if (!ru) { 544 if (!ru) {
543 return sys_wait4(pid, stat_addr, options, NULL); 545 return sys_wait4(pid, stat_addr, options, NULL);
@@ -564,9 +566,10 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
564 } 566 }
565} 567}
566 568
567asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, 569COMPAT_SYSCALL_DEFINE5(waitid,
568 struct compat_siginfo __user *uinfo, int options, 570 int, which, compat_pid_t, pid,
569 struct compat_rusage __user *uru) 571 struct compat_siginfo __user *, uinfo, int, options,
572 struct compat_rusage __user *, uru)
570{ 573{
571 siginfo_t info; 574 siginfo_t info;
572 struct rusage ru; 575 struct rusage ru;
@@ -584,7 +587,11 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
584 return ret; 587 return ret;
585 588
586 if (uru) { 589 if (uru) {
587 ret = put_compat_rusage(&ru, uru); 590 /* sys_waitid() overwrites everything in ru */
591 if (COMPAT_USE_64BIT_TIME)
592 ret = copy_to_user(uru, &ru, sizeof(ru));
593 else
594 ret = put_compat_rusage(&ru, uru);
588 if (ret) 595 if (ret)
589 return ret; 596 return ret;
590 } 597 }
@@ -994,7 +1001,7 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese,
994 sigset_from_compat(&s, &s32); 1001 sigset_from_compat(&s, &s32);
995 1002
996 if (uts) { 1003 if (uts) {
997 if (get_compat_timespec(&t, uts)) 1004 if (compat_get_timespec(&t, uts))
998 return -EFAULT; 1005 return -EFAULT;
999 } 1006 }
1000 1007
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 4d5f8d5612f3..8875254120b6 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1970,6 +1970,8 @@ static int kdb_lsmod(int argc, const char **argv)
1970 1970
1971 kdb_printf("Module Size modstruct Used by\n"); 1971 kdb_printf("Module Size modstruct Used by\n");
1972 list_for_each_entry(mod, kdb_modules, list) { 1972 list_for_each_entry(mod, kdb_modules, list) {
1973 if (mod->state == MODULE_STATE_UNFORMED)
1974 continue;
1973 1975
1974 kdb_printf("%-20s%8u 0x%p ", mod->name, 1976 kdb_printf("%-20s%8u 0x%p ", mod->name,
1975 mod->core_size, (void *)mod); 1977 mod->core_size, (void *)mod);
diff --git a/kernel/fork.c b/kernel/fork.c
index 65ca6d27f24e..c535f33bbb9c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1668,8 +1668,10 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
1668 int, tls_val) 1668 int, tls_val)
1669#endif 1669#endif
1670{ 1670{
1671 return do_fork(clone_flags, newsp, 0, 1671 long ret = do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr);
1672 parent_tidptr, child_tidptr); 1672 asmlinkage_protect(5, ret, clone_flags, newsp,
1673 parent_tidptr, child_tidptr, tls_val);
1674 return ret;
1673} 1675}
1674#endif 1676#endif
1675 1677
diff --git a/kernel/module.c b/kernel/module.c
index b10b048367e1..eab08274ec9b 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -188,6 +188,7 @@ struct load_info {
188 ongoing or failed initialization etc. */ 188 ongoing or failed initialization etc. */
189static inline int strong_try_module_get(struct module *mod) 189static inline int strong_try_module_get(struct module *mod)
190{ 190{
191 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
191 if (mod && mod->state == MODULE_STATE_COMING) 192 if (mod && mod->state == MODULE_STATE_COMING)
192 return -EBUSY; 193 return -EBUSY;
193 if (try_module_get(mod)) 194 if (try_module_get(mod))
@@ -343,6 +344,9 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
343#endif 344#endif
344 }; 345 };
345 346
347 if (mod->state == MODULE_STATE_UNFORMED)
348 continue;
349
346 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) 350 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
347 return true; 351 return true;
348 } 352 }
@@ -450,16 +454,24 @@ const struct kernel_symbol *find_symbol(const char *name,
450EXPORT_SYMBOL_GPL(find_symbol); 454EXPORT_SYMBOL_GPL(find_symbol);
451 455
452/* Search for module by name: must hold module_mutex. */ 456/* Search for module by name: must hold module_mutex. */
453struct module *find_module(const char *name) 457static struct module *find_module_all(const char *name,
458 bool even_unformed)
454{ 459{
455 struct module *mod; 460 struct module *mod;
456 461
457 list_for_each_entry(mod, &modules, list) { 462 list_for_each_entry(mod, &modules, list) {
463 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
464 continue;
458 if (strcmp(mod->name, name) == 0) 465 if (strcmp(mod->name, name) == 0)
459 return mod; 466 return mod;
460 } 467 }
461 return NULL; 468 return NULL;
462} 469}
470
471struct module *find_module(const char *name)
472{
473 return find_module_all(name, false);
474}
463EXPORT_SYMBOL_GPL(find_module); 475EXPORT_SYMBOL_GPL(find_module);
464 476
465#ifdef CONFIG_SMP 477#ifdef CONFIG_SMP
@@ -525,6 +537,8 @@ bool is_module_percpu_address(unsigned long addr)
525 preempt_disable(); 537 preempt_disable();
526 538
527 list_for_each_entry_rcu(mod, &modules, list) { 539 list_for_each_entry_rcu(mod, &modules, list) {
540 if (mod->state == MODULE_STATE_UNFORMED)
541 continue;
528 if (!mod->percpu_size) 542 if (!mod->percpu_size)
529 continue; 543 continue;
530 for_each_possible_cpu(cpu) { 544 for_each_possible_cpu(cpu) {
@@ -1048,6 +1062,8 @@ static ssize_t show_initstate(struct module_attribute *mattr,
1048 case MODULE_STATE_GOING: 1062 case MODULE_STATE_GOING:
1049 state = "going"; 1063 state = "going";
1050 break; 1064 break;
1065 default:
1066 BUG();
1051 } 1067 }
1052 return sprintf(buffer, "%s\n", state); 1068 return sprintf(buffer, "%s\n", state);
1053} 1069}
@@ -1786,6 +1802,8 @@ void set_all_modules_text_rw(void)
1786 1802
1787 mutex_lock(&module_mutex); 1803 mutex_lock(&module_mutex);
1788 list_for_each_entry_rcu(mod, &modules, list) { 1804 list_for_each_entry_rcu(mod, &modules, list) {
1805 if (mod->state == MODULE_STATE_UNFORMED)
1806 continue;
1789 if ((mod->module_core) && (mod->core_text_size)) { 1807 if ((mod->module_core) && (mod->core_text_size)) {
1790 set_page_attributes(mod->module_core, 1808 set_page_attributes(mod->module_core,
1791 mod->module_core + mod->core_text_size, 1809 mod->module_core + mod->core_text_size,
@@ -1807,6 +1825,8 @@ void set_all_modules_text_ro(void)
1807 1825
1808 mutex_lock(&module_mutex); 1826 mutex_lock(&module_mutex);
1809 list_for_each_entry_rcu(mod, &modules, list) { 1827 list_for_each_entry_rcu(mod, &modules, list) {
1828 if (mod->state == MODULE_STATE_UNFORMED)
1829 continue;
1810 if ((mod->module_core) && (mod->core_text_size)) { 1830 if ((mod->module_core) && (mod->core_text_size)) {
1811 set_page_attributes(mod->module_core, 1831 set_page_attributes(mod->module_core,
1812 mod->module_core + mod->core_text_size, 1832 mod->module_core + mod->core_text_size,
@@ -2527,6 +2547,13 @@ static int copy_module_from_fd(int fd, struct load_info *info)
2527 err = -EFBIG; 2547 err = -EFBIG;
2528 goto out; 2548 goto out;
2529 } 2549 }
2550
2551 /* Don't hand 0 to vmalloc, it whines. */
2552 if (stat.size == 0) {
2553 err = -EINVAL;
2554 goto out;
2555 }
2556
2530 info->hdr = vmalloc(stat.size); 2557 info->hdr = vmalloc(stat.size);
2531 if (!info->hdr) { 2558 if (!info->hdr) {
2532 err = -ENOMEM; 2559 err = -ENOMEM;
@@ -2990,8 +3017,9 @@ static bool finished_loading(const char *name)
2990 bool ret; 3017 bool ret;
2991 3018
2992 mutex_lock(&module_mutex); 3019 mutex_lock(&module_mutex);
2993 mod = find_module(name); 3020 mod = find_module_all(name, true);
2994 ret = !mod || mod->state != MODULE_STATE_COMING; 3021 ret = !mod || mod->state == MODULE_STATE_LIVE
3022 || mod->state == MODULE_STATE_GOING;
2995 mutex_unlock(&module_mutex); 3023 mutex_unlock(&module_mutex);
2996 3024
2997 return ret; 3025 return ret;
@@ -3136,6 +3164,32 @@ static int load_module(struct load_info *info, const char __user *uargs,
3136 goto free_copy; 3164 goto free_copy;
3137 } 3165 }
3138 3166
3167 /*
3168 * We try to place it in the list now to make sure it's unique
3169 * before we dedicate too many resources. In particular,
3170 * temporary percpu memory exhaustion.
3171 */
3172 mod->state = MODULE_STATE_UNFORMED;
3173again:
3174 mutex_lock(&module_mutex);
3175 if ((old = find_module_all(mod->name, true)) != NULL) {
3176 if (old->state == MODULE_STATE_COMING
3177 || old->state == MODULE_STATE_UNFORMED) {
3178 /* Wait in case it fails to load. */
3179 mutex_unlock(&module_mutex);
3180 err = wait_event_interruptible(module_wq,
3181 finished_loading(mod->name));
3182 if (err)
3183 goto free_module;
3184 goto again;
3185 }
3186 err = -EEXIST;
3187 mutex_unlock(&module_mutex);
3188 goto free_module;
3189 }
3190 list_add_rcu(&mod->list, &modules);
3191 mutex_unlock(&module_mutex);
3192
3139#ifdef CONFIG_MODULE_SIG 3193#ifdef CONFIG_MODULE_SIG
3140 mod->sig_ok = info->sig_ok; 3194 mod->sig_ok = info->sig_ok;
3141 if (!mod->sig_ok) 3195 if (!mod->sig_ok)
@@ -3145,7 +3199,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
3145 /* Now module is in final location, initialize linked lists, etc. */ 3199 /* Now module is in final location, initialize linked lists, etc. */
3146 err = module_unload_init(mod); 3200 err = module_unload_init(mod);
3147 if (err) 3201 if (err)
3148 goto free_module; 3202 goto unlink_mod;
3149 3203
3150 /* Now we've got everything in the final locations, we can 3204 /* Now we've got everything in the final locations, we can
3151 * find optional sections. */ 3205 * find optional sections. */
@@ -3180,54 +3234,33 @@ static int load_module(struct load_info *info, const char __user *uargs,
3180 goto free_arch_cleanup; 3234 goto free_arch_cleanup;
3181 } 3235 }
3182 3236
3183 /* Mark state as coming so strong_try_module_get() ignores us. */
3184 mod->state = MODULE_STATE_COMING;
3185
3186 /* Now sew it into the lists so we can get lockdep and oops
3187 * info during argument parsing. No one should access us, since
3188 * strong_try_module_get() will fail.
3189 * lockdep/oops can run asynchronous, so use the RCU list insertion
3190 * function to insert in a way safe to concurrent readers.
3191 * The mutex protects against concurrent writers.
3192 */
3193again:
3194 mutex_lock(&module_mutex);
3195 if ((old = find_module(mod->name)) != NULL) {
3196 if (old->state == MODULE_STATE_COMING) {
3197 /* Wait in case it fails to load. */
3198 mutex_unlock(&module_mutex);
3199 err = wait_event_interruptible(module_wq,
3200 finished_loading(mod->name));
3201 if (err)
3202 goto free_arch_cleanup;
3203 goto again;
3204 }
3205 err = -EEXIST;
3206 goto unlock;
3207 }
3208
3209 /* This has to be done once we're sure module name is unique. */
3210 dynamic_debug_setup(info->debug, info->num_debug); 3237 dynamic_debug_setup(info->debug, info->num_debug);
3211 3238
3212 /* Find duplicate symbols */ 3239 mutex_lock(&module_mutex);
3240 /* Find duplicate symbols (must be called under lock). */
3213 err = verify_export_symbols(mod); 3241 err = verify_export_symbols(mod);
3214 if (err < 0) 3242 if (err < 0)
3215 goto ddebug; 3243 goto ddebug_cleanup;
3216 3244
3245 /* This relies on module_mutex for list integrity. */
3217 module_bug_finalize(info->hdr, info->sechdrs, mod); 3246 module_bug_finalize(info->hdr, info->sechdrs, mod);
3218 list_add_rcu(&mod->list, &modules); 3247
3248 /* Mark state as coming so strong_try_module_get() ignores us,
3249 * but kallsyms etc. can see us. */
3250 mod->state = MODULE_STATE_COMING;
3251
3219 mutex_unlock(&module_mutex); 3252 mutex_unlock(&module_mutex);
3220 3253
3221 /* Module is ready to execute: parsing args may do that. */ 3254 /* Module is ready to execute: parsing args may do that. */
3222 err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, 3255 err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3223 -32768, 32767, &ddebug_dyndbg_module_param_cb); 3256 -32768, 32767, &ddebug_dyndbg_module_param_cb);
3224 if (err < 0) 3257 if (err < 0)
3225 goto unlink; 3258 goto bug_cleanup;
3226 3259
3227 /* Link in to syfs. */ 3260 /* Link in to syfs. */
3228 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); 3261 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3229 if (err < 0) 3262 if (err < 0)
3230 goto unlink; 3263 goto bug_cleanup;
3231 3264
3232 /* Get rid of temporary copy. */ 3265 /* Get rid of temporary copy. */
3233 free_copy(info); 3266 free_copy(info);
@@ -3237,16 +3270,13 @@ again:
3237 3270
3238 return do_init_module(mod); 3271 return do_init_module(mod);
3239 3272
3240 unlink: 3273 bug_cleanup:
3274 /* module_bug_cleanup needs module_mutex protection */
3241 mutex_lock(&module_mutex); 3275 mutex_lock(&module_mutex);
3242 /* Unlink carefully: kallsyms could be walking list. */
3243 list_del_rcu(&mod->list);
3244 module_bug_cleanup(mod); 3276 module_bug_cleanup(mod);
3245 wake_up_all(&module_wq); 3277 ddebug_cleanup:
3246 ddebug:
3247 dynamic_debug_remove(info->debug);
3248 unlock:
3249 mutex_unlock(&module_mutex); 3278 mutex_unlock(&module_mutex);
3279 dynamic_debug_remove(info->debug);
3250 synchronize_sched(); 3280 synchronize_sched();
3251 kfree(mod->args); 3281 kfree(mod->args);
3252 free_arch_cleanup: 3282 free_arch_cleanup:
@@ -3255,6 +3285,12 @@ again:
3255 free_modinfo(mod); 3285 free_modinfo(mod);
3256 free_unload: 3286 free_unload:
3257 module_unload_free(mod); 3287 module_unload_free(mod);
3288 unlink_mod:
3289 mutex_lock(&module_mutex);
3290 /* Unlink carefully: kallsyms could be walking list. */
3291 list_del_rcu(&mod->list);
3292 wake_up_all(&module_wq);
3293 mutex_unlock(&module_mutex);
3258 free_module: 3294 free_module:
3259 module_deallocate(mod, info); 3295 module_deallocate(mod, info);
3260 free_copy: 3296 free_copy:
@@ -3377,6 +3413,8 @@ const char *module_address_lookup(unsigned long addr,
3377 3413
3378 preempt_disable(); 3414 preempt_disable();
3379 list_for_each_entry_rcu(mod, &modules, list) { 3415 list_for_each_entry_rcu(mod, &modules, list) {
3416 if (mod->state == MODULE_STATE_UNFORMED)
3417 continue;
3380 if (within_module_init(addr, mod) || 3418 if (within_module_init(addr, mod) ||
3381 within_module_core(addr, mod)) { 3419 within_module_core(addr, mod)) {
3382 if (modname) 3420 if (modname)
@@ -3400,6 +3438,8 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
3400 3438
3401 preempt_disable(); 3439 preempt_disable();
3402 list_for_each_entry_rcu(mod, &modules, list) { 3440 list_for_each_entry_rcu(mod, &modules, list) {
3441 if (mod->state == MODULE_STATE_UNFORMED)
3442 continue;
3403 if (within_module_init(addr, mod) || 3443 if (within_module_init(addr, mod) ||
3404 within_module_core(addr, mod)) { 3444 within_module_core(addr, mod)) {
3405 const char *sym; 3445 const char *sym;
@@ -3424,6 +3464,8 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3424 3464
3425 preempt_disable(); 3465 preempt_disable();
3426 list_for_each_entry_rcu(mod, &modules, list) { 3466 list_for_each_entry_rcu(mod, &modules, list) {
3467 if (mod->state == MODULE_STATE_UNFORMED)
3468 continue;
3427 if (within_module_init(addr, mod) || 3469 if (within_module_init(addr, mod) ||
3428 within_module_core(addr, mod)) { 3470 within_module_core(addr, mod)) {
3429 const char *sym; 3471 const char *sym;
@@ -3451,6 +3493,8 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3451 3493
3452 preempt_disable(); 3494 preempt_disable();
3453 list_for_each_entry_rcu(mod, &modules, list) { 3495 list_for_each_entry_rcu(mod, &modules, list) {
3496 if (mod->state == MODULE_STATE_UNFORMED)
3497 continue;
3454 if (symnum < mod->num_symtab) { 3498 if (symnum < mod->num_symtab) {
3455 *value = mod->symtab[symnum].st_value; 3499 *value = mod->symtab[symnum].st_value;
3456 *type = mod->symtab[symnum].st_info; 3500 *type = mod->symtab[symnum].st_info;
@@ -3493,9 +3537,12 @@ unsigned long module_kallsyms_lookup_name(const char *name)
3493 ret = mod_find_symname(mod, colon+1); 3537 ret = mod_find_symname(mod, colon+1);
3494 *colon = ':'; 3538 *colon = ':';
3495 } else { 3539 } else {
3496 list_for_each_entry_rcu(mod, &modules, list) 3540 list_for_each_entry_rcu(mod, &modules, list) {
3541 if (mod->state == MODULE_STATE_UNFORMED)
3542 continue;
3497 if ((ret = mod_find_symname(mod, name)) != 0) 3543 if ((ret = mod_find_symname(mod, name)) != 0)
3498 break; 3544 break;
3545 }
3499 } 3546 }
3500 preempt_enable(); 3547 preempt_enable();
3501 return ret; 3548 return ret;
@@ -3510,6 +3557,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3510 int ret; 3557 int ret;
3511 3558
3512 list_for_each_entry(mod, &modules, list) { 3559 list_for_each_entry(mod, &modules, list) {
3560 if (mod->state == MODULE_STATE_UNFORMED)
3561 continue;
3513 for (i = 0; i < mod->num_symtab; i++) { 3562 for (i = 0; i < mod->num_symtab; i++) {
3514 ret = fn(data, mod->strtab + mod->symtab[i].st_name, 3563 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3515 mod, mod->symtab[i].st_value); 3564 mod, mod->symtab[i].st_value);
@@ -3525,6 +3574,7 @@ static char *module_flags(struct module *mod, char *buf)
3525{ 3574{
3526 int bx = 0; 3575 int bx = 0;
3527 3576
3577 BUG_ON(mod->state == MODULE_STATE_UNFORMED);
3528 if (mod->taints || 3578 if (mod->taints ||
3529 mod->state == MODULE_STATE_GOING || 3579 mod->state == MODULE_STATE_GOING ||
3530 mod->state == MODULE_STATE_COMING) { 3580 mod->state == MODULE_STATE_COMING) {
@@ -3566,6 +3616,10 @@ static int m_show(struct seq_file *m, void *p)
3566 struct module *mod = list_entry(p, struct module, list); 3616 struct module *mod = list_entry(p, struct module, list);
3567 char buf[8]; 3617 char buf[8];
3568 3618
3619 /* We always ignore unformed modules. */
3620 if (mod->state == MODULE_STATE_UNFORMED)
3621 return 0;
3622
3569 seq_printf(m, "%s %u", 3623 seq_printf(m, "%s %u",
3570 mod->name, mod->init_size + mod->core_size); 3624 mod->name, mod->init_size + mod->core_size);
3571 print_unload_info(m, mod); 3625 print_unload_info(m, mod);
@@ -3626,6 +3680,8 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
3626 3680
3627 preempt_disable(); 3681 preempt_disable();
3628 list_for_each_entry_rcu(mod, &modules, list) { 3682 list_for_each_entry_rcu(mod, &modules, list) {
3683 if (mod->state == MODULE_STATE_UNFORMED)
3684 continue;
3629 if (mod->num_exentries == 0) 3685 if (mod->num_exentries == 0)
3630 continue; 3686 continue;
3631 3687
@@ -3674,10 +3730,13 @@ struct module *__module_address(unsigned long addr)
3674 if (addr < module_addr_min || addr > module_addr_max) 3730 if (addr < module_addr_min || addr > module_addr_max)
3675 return NULL; 3731 return NULL;
3676 3732
3677 list_for_each_entry_rcu(mod, &modules, list) 3733 list_for_each_entry_rcu(mod, &modules, list) {
3734 if (mod->state == MODULE_STATE_UNFORMED)
3735 continue;
3678 if (within_module_core(addr, mod) 3736 if (within_module_core(addr, mod)
3679 || within_module_init(addr, mod)) 3737 || within_module_init(addr, mod))
3680 return mod; 3738 return mod;
3739 }
3681 return NULL; 3740 return NULL;
3682} 3741}
3683EXPORT_SYMBOL_GPL(__module_address); 3742EXPORT_SYMBOL_GPL(__module_address);
@@ -3730,8 +3789,11 @@ void print_modules(void)
3730 printk(KERN_DEFAULT "Modules linked in:"); 3789 printk(KERN_DEFAULT "Modules linked in:");
3731 /* Most callers should already have preempt disabled, but make sure */ 3790 /* Most callers should already have preempt disabled, but make sure */
3732 preempt_disable(); 3791 preempt_disable();
3733 list_for_each_entry_rcu(mod, &modules, list) 3792 list_for_each_entry_rcu(mod, &modules, list) {
3793 if (mod->state == MODULE_STATE_UNFORMED)
3794 continue;
3734 printk(" %s%s", mod->name, module_flags(mod, buf)); 3795 printk(" %s%s", mod->name, module_flags(mod, buf));
3796 }
3735 preempt_enable(); 3797 preempt_enable();
3736 if (last_unloaded_module[0]) 3798 if (last_unloaded_module[0])
3737 printk(" [last unloaded: %s]", last_unloaded_module); 3799 printk(" [last unloaded: %s]", last_unloaded_module);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 1599157336a6..6cbeaae4406d 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child)
117 * TASK_KILLABLE sleeps. 117 * TASK_KILLABLE sleeps.
118 */ 118 */
119 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) 119 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
120 signal_wake_up(child, task_is_traced(child)); 120 ptrace_signal_wake_up(child, true);
121 121
122 spin_unlock(&child->sighand->siglock); 122 spin_unlock(&child->sighand->siglock);
123} 123}
124 124
125/* Ensure that nothing can wake it up, even SIGKILL */
126static bool ptrace_freeze_traced(struct task_struct *task)
127{
128 bool ret = false;
129
130 /* Lockless, nobody but us can set this flag */
131 if (task->jobctl & JOBCTL_LISTENING)
132 return ret;
133
134 spin_lock_irq(&task->sighand->siglock);
135 if (task_is_traced(task) && !__fatal_signal_pending(task)) {
136 task->state = __TASK_TRACED;
137 ret = true;
138 }
139 spin_unlock_irq(&task->sighand->siglock);
140
141 return ret;
142}
143
144static void ptrace_unfreeze_traced(struct task_struct *task)
145{
146 if (task->state != __TASK_TRACED)
147 return;
148
149 WARN_ON(!task->ptrace || task->parent != current);
150
151 spin_lock_irq(&task->sighand->siglock);
152 if (__fatal_signal_pending(task))
153 wake_up_state(task, __TASK_TRACED);
154 else
155 task->state = TASK_TRACED;
156 spin_unlock_irq(&task->sighand->siglock);
157}
158
125/** 159/**
126 * ptrace_check_attach - check whether ptracee is ready for ptrace operation 160 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
127 * @child: ptracee to check for 161 * @child: ptracee to check for
@@ -139,7 +173,7 @@ void __ptrace_unlink(struct task_struct *child)
139 * RETURNS: 173 * RETURNS:
140 * 0 on success, -ESRCH if %child is not ready. 174 * 0 on success, -ESRCH if %child is not ready.
141 */ 175 */
142int ptrace_check_attach(struct task_struct *child, bool ignore_state) 176static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
143{ 177{
144 int ret = -ESRCH; 178 int ret = -ESRCH;
145 179
@@ -151,24 +185,29 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
151 * be changed by us so it's not changing right after this. 185 * be changed by us so it's not changing right after this.
152 */ 186 */
153 read_lock(&tasklist_lock); 187 read_lock(&tasklist_lock);
154 if ((child->ptrace & PT_PTRACED) && child->parent == current) { 188 if (child->ptrace && child->parent == current) {
189 WARN_ON(child->state == __TASK_TRACED);
155 /* 190 /*
156 * child->sighand can't be NULL, release_task() 191 * child->sighand can't be NULL, release_task()
157 * does ptrace_unlink() before __exit_signal(). 192 * does ptrace_unlink() before __exit_signal().
158 */ 193 */
159 spin_lock_irq(&child->sighand->siglock); 194 if (ignore_state || ptrace_freeze_traced(child))
160 WARN_ON_ONCE(task_is_stopped(child));
161 if (ignore_state || (task_is_traced(child) &&
162 !(child->jobctl & JOBCTL_LISTENING)))
163 ret = 0; 195 ret = 0;
164 spin_unlock_irq(&child->sighand->siglock);
165 } 196 }
166 read_unlock(&tasklist_lock); 197 read_unlock(&tasklist_lock);
167 198
168 if (!ret && !ignore_state) 199 if (!ret && !ignore_state) {
169 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; 200 if (!wait_task_inactive(child, __TASK_TRACED)) {
201 /*
202 * This can only happen if may_ptrace_stop() fails and
203 * ptrace_stop() changes ->state back to TASK_RUNNING,
204 * so we should not worry about leaking __TASK_TRACED.
205 */
206 WARN_ON(child->state == __TASK_TRACED);
207 ret = -ESRCH;
208 }
209 }
170 210
171 /* All systems go.. */
172 return ret; 211 return ret;
173} 212}
174 213
@@ -317,7 +356,7 @@ static int ptrace_attach(struct task_struct *task, long request,
317 */ 356 */
318 if (task_is_stopped(task) && 357 if (task_is_stopped(task) &&
319 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) 358 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
320 signal_wake_up(task, 1); 359 signal_wake_up_state(task, __TASK_STOPPED);
321 360
322 spin_unlock(&task->sighand->siglock); 361 spin_unlock(&task->sighand->siglock);
323 362
@@ -737,7 +776,7 @@ int ptrace_request(struct task_struct *child, long request,
737 * tracee into STOP. 776 * tracee into STOP.
738 */ 777 */
739 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) 778 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
740 signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); 779 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
741 780
742 unlock_task_sighand(child, &flags); 781 unlock_task_sighand(child, &flags);
743 ret = 0; 782 ret = 0;
@@ -763,7 +802,7 @@ int ptrace_request(struct task_struct *child, long request,
763 * start of this trap and now. Trigger re-trap. 802 * start of this trap and now. Trigger re-trap.
764 */ 803 */
765 if (child->jobctl & JOBCTL_TRAP_NOTIFY) 804 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
766 signal_wake_up(child, true); 805 ptrace_signal_wake_up(child, true);
767 ret = 0; 806 ret = 0;
768 } 807 }
769 unlock_task_sighand(child, &flags); 808 unlock_task_sighand(child, &flags);
@@ -900,6 +939,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
900 goto out_put_task_struct; 939 goto out_put_task_struct;
901 940
902 ret = arch_ptrace(child, request, addr, data); 941 ret = arch_ptrace(child, request, addr, data);
942 if (ret || request != PTRACE_DETACH)
943 ptrace_unfreeze_traced(child);
903 944
904 out_put_task_struct: 945 out_put_task_struct:
905 put_task_struct(child); 946 put_task_struct(child);
@@ -1039,8 +1080,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
1039 1080
1040 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1081 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1041 request == PTRACE_INTERRUPT); 1082 request == PTRACE_INTERRUPT);
1042 if (!ret) 1083 if (!ret) {
1043 ret = compat_arch_ptrace(child, request, addr, data); 1084 ret = compat_arch_ptrace(child, request, addr, data);
1085 if (ret || request != PTRACE_DETACH)
1086 ptrace_unfreeze_traced(child);
1087 }
1044 1088
1045 out_put_task_struct: 1089 out_put_task_struct:
1046 put_task_struct(child); 1090 put_task_struct(child);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 257002c13bb0..26058d0bebba 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1523,7 +1523,8 @@ out:
1523 */ 1523 */
1524int wake_up_process(struct task_struct *p) 1524int wake_up_process(struct task_struct *p)
1525{ 1525{
1526 return try_to_wake_up(p, TASK_ALL, 0); 1526 WARN_ON(task_is_stopped_or_traced(p));
1527 return try_to_wake_up(p, TASK_NORMAL, 0);
1527} 1528}
1528EXPORT_SYMBOL(wake_up_process); 1529EXPORT_SYMBOL(wake_up_process);
1529 1530
diff --git a/kernel/signal.c b/kernel/signal.c
index 372771e948c2..3d09cf6cde75 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -680,23 +680,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
680 * No need to set need_resched since signal event passing 680 * No need to set need_resched since signal event passing
681 * goes through ->blocked 681 * goes through ->blocked
682 */ 682 */
683void signal_wake_up(struct task_struct *t, int resume) 683void signal_wake_up_state(struct task_struct *t, unsigned int state)
684{ 684{
685 unsigned int mask;
686
687 set_tsk_thread_flag(t, TIF_SIGPENDING); 685 set_tsk_thread_flag(t, TIF_SIGPENDING);
688
689 /* 686 /*
690 * For SIGKILL, we want to wake it up in the stopped/traced/killable 687 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
691 * case. We don't check t->state here because there is a race with it 688 * case. We don't check t->state here because there is a race with it
692 * executing another processor and just now entering stopped state. 689 * executing another processor and just now entering stopped state.
693 * By using wake_up_state, we ensure the process will wake up and 690 * By using wake_up_state, we ensure the process will wake up and
694 * handle its death signal. 691 * handle its death signal.
695 */ 692 */
696 mask = TASK_INTERRUPTIBLE; 693 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
697 if (resume)
698 mask |= TASK_WAKEKILL;
699 if (!wake_up_state(t, mask))
700 kick_process(t); 694 kick_process(t);
701} 695}
702 696
@@ -844,7 +838,7 @@ static void ptrace_trap_notify(struct task_struct *t)
844 assert_spin_locked(&t->sighand->siglock); 838 assert_spin_locked(&t->sighand->siglock);
845 839
846 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); 840 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
847 signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); 841 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
848} 842}
849 843
850/* 844/*
@@ -1800,6 +1794,10 @@ static inline int may_ptrace_stop(void)
1800 * If SIGKILL was already sent before the caller unlocked 1794 * If SIGKILL was already sent before the caller unlocked
1801 * ->siglock we must see ->core_state != NULL. Otherwise it 1795 * ->siglock we must see ->core_state != NULL. Otherwise it
1802 * is safe to enter schedule(). 1796 * is safe to enter schedule().
1797 *
1798 * This is almost outdated, a task with the pending SIGKILL can't
1799 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1800 * after SIGKILL was already dequeued.
1803 */ 1801 */
1804 if (unlikely(current->mm->core_state) && 1802 if (unlikely(current->mm->core_state) &&
1805 unlikely(current->mm == current->parent->mm)) 1803 unlikely(current->mm == current->parent->mm))
@@ -1925,6 +1923,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1925 if (gstop_done) 1923 if (gstop_done)
1926 do_notify_parent_cldstop(current, false, why); 1924 do_notify_parent_cldstop(current, false, why);
1927 1925
1926 /* tasklist protects us from ptrace_freeze_traced() */
1928 __set_current_state(TASK_RUNNING); 1927 __set_current_state(TASK_RUNNING);
1929 if (clear_code) 1928 if (clear_code)
1930 current->exit_code = 0; 1929 current->exit_code = 0;
@@ -3116,8 +3115,9 @@ int __save_altstack(stack_t __user *uss, unsigned long sp)
3116 3115
3117#ifdef CONFIG_COMPAT 3116#ifdef CONFIG_COMPAT
3118#ifdef CONFIG_GENERIC_SIGALTSTACK 3117#ifdef CONFIG_GENERIC_SIGALTSTACK
3119asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, 3118COMPAT_SYSCALL_DEFINE2(sigaltstack,
3120 compat_stack_t __user *uoss_ptr) 3119 const compat_stack_t __user *, uss_ptr,
3120 compat_stack_t __user *, uoss_ptr)
3121{ 3121{
3122 stack_t uss, uoss; 3122 stack_t uss, uoss;
3123 int ret; 3123 int ret;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 3ffe4c5ad3f3..41473b4ad7a4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3998,7 +3998,7 @@ static int ftrace_module_notify(struct notifier_block *self,
3998 3998
3999struct notifier_block ftrace_module_nb = { 3999struct notifier_block ftrace_module_nb = {
4000 .notifier_call = ftrace_module_notify, 4000 .notifier_call = ftrace_module_notify,
4001 .priority = 0, 4001 .priority = INT_MAX, /* Run before anything that can use kprobes */
4002}; 4002};
4003 4003
4004extern unsigned long __start_mcount_loc[]; 4004extern unsigned long __start_mcount_loc[];