diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/async.c | 30 | ||||
| -rw-r--r-- | kernel/audit.c | 40 | ||||
| -rw-r--r-- | kernel/audit_tree.c | 26 | ||||
| -rw-r--r-- | kernel/audit_watch.c | 2 | ||||
| -rw-r--r-- | kernel/auditfilter.c | 1 | ||||
| -rw-r--r-- | kernel/auditsc.c | 20 | ||||
| -rw-r--r-- | kernel/compat.c | 23 | ||||
| -rw-r--r-- | kernel/debug/kdb/kdb_main.c | 2 | ||||
| -rw-r--r-- | kernel/fork.c | 6 | ||||
| -rw-r--r-- | kernel/module.c | 181 | ||||
| -rw-r--r-- | kernel/printk.c | 9 | ||||
| -rw-r--r-- | kernel/ptrace.c | 74 | ||||
| -rw-r--r-- | kernel/rwsem.c | 10 | ||||
| -rw-r--r-- | kernel/sched/core.c | 3 | ||||
| -rw-r--r-- | kernel/signal.c | 24 | ||||
| -rw-r--r-- | kernel/smp.c | 13 | ||||
| -rw-r--r-- | kernel/time/clockevents.c | 1 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 17 |
19 files changed, 348 insertions, 136 deletions
diff --git a/kernel/async.c b/kernel/async.c index 9d3118384858..6f34904a0b53 100644 --- a/kernel/async.c +++ b/kernel/async.c | |||
| @@ -86,18 +86,27 @@ static atomic_t entry_count; | |||
| 86 | */ | 86 | */ |
| 87 | static async_cookie_t __lowest_in_progress(struct async_domain *running) | 87 | static async_cookie_t __lowest_in_progress(struct async_domain *running) |
| 88 | { | 88 | { |
| 89 | async_cookie_t first_running = next_cookie; /* infinity value */ | ||
| 90 | async_cookie_t first_pending = next_cookie; /* ditto */ | ||
| 89 | struct async_entry *entry; | 91 | struct async_entry *entry; |
| 90 | 92 | ||
| 93 | /* | ||
| 94 | * Both running and pending lists are sorted but not disjoint. | ||
| 95 | * Take the first cookies from both and return the min. | ||
| 96 | */ | ||
| 91 | if (!list_empty(&running->domain)) { | 97 | if (!list_empty(&running->domain)) { |
| 92 | entry = list_first_entry(&running->domain, typeof(*entry), list); | 98 | entry = list_first_entry(&running->domain, typeof(*entry), list); |
| 93 | return entry->cookie; | 99 | first_running = entry->cookie; |
| 94 | } | 100 | } |
| 95 | 101 | ||
| 96 | list_for_each_entry(entry, &async_pending, list) | 102 | list_for_each_entry(entry, &async_pending, list) { |
| 97 | if (entry->running == running) | 103 | if (entry->running == running) { |
| 98 | return entry->cookie; | 104 | first_pending = entry->cookie; |
| 105 | break; | ||
| 106 | } | ||
| 107 | } | ||
| 99 | 108 | ||
| 100 | return next_cookie; /* "infinity" value */ | 109 | return min(first_running, first_pending); |
| 101 | } | 110 | } |
| 102 | 111 | ||
| 103 | static async_cookie_t lowest_in_progress(struct async_domain *running) | 112 | static async_cookie_t lowest_in_progress(struct async_domain *running) |
| @@ -118,13 +127,17 @@ static void async_run_entry_fn(struct work_struct *work) | |||
| 118 | { | 127 | { |
| 119 | struct async_entry *entry = | 128 | struct async_entry *entry = |
| 120 | container_of(work, struct async_entry, work); | 129 | container_of(work, struct async_entry, work); |
| 130 | struct async_entry *pos; | ||
| 121 | unsigned long flags; | 131 | unsigned long flags; |
| 122 | ktime_t uninitialized_var(calltime), delta, rettime; | 132 | ktime_t uninitialized_var(calltime), delta, rettime; |
| 123 | struct async_domain *running = entry->running; | 133 | struct async_domain *running = entry->running; |
| 124 | 134 | ||
| 125 | /* 1) move self to the running queue */ | 135 | /* 1) move self to the running queue, make sure it stays sorted */ |
| 126 | spin_lock_irqsave(&async_lock, flags); | 136 | spin_lock_irqsave(&async_lock, flags); |
| 127 | list_move_tail(&entry->list, &running->domain); | 137 | list_for_each_entry_reverse(pos, &running->domain, list) |
| 138 | if (entry->cookie < pos->cookie) | ||
| 139 | break; | ||
| 140 | list_move_tail(&entry->list, &pos->list); | ||
| 128 | spin_unlock_irqrestore(&async_lock, flags); | 141 | spin_unlock_irqrestore(&async_lock, flags); |
| 129 | 142 | ||
| 130 | /* 2) run (and print duration) */ | 143 | /* 2) run (and print duration) */ |
| @@ -196,6 +209,9 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a | |||
| 196 | atomic_inc(&entry_count); | 209 | atomic_inc(&entry_count); |
| 197 | spin_unlock_irqrestore(&async_lock, flags); | 210 | spin_unlock_irqrestore(&async_lock, flags); |
| 198 | 211 | ||
| 212 | /* mark that this task has queued an async job, used by module init */ | ||
| 213 | current->flags |= PF_USED_ASYNC; | ||
| 214 | |||
| 199 | /* schedule for execution */ | 215 | /* schedule for execution */ |
| 200 | queue_work(system_unbound_wq, &entry->work); | 216 | queue_work(system_unbound_wq, &entry->work); |
| 201 | 217 | ||
diff --git a/kernel/audit.c b/kernel/audit.c index 40414e9143db..d596e5355f15 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
| @@ -272,6 +272,8 @@ static int audit_log_config_change(char *function_name, int new, int old, | |||
| 272 | int rc = 0; | 272 | int rc = 0; |
| 273 | 273 | ||
| 274 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | 274 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); |
| 275 | if (unlikely(!ab)) | ||
| 276 | return rc; | ||
| 275 | audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new, | 277 | audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new, |
| 276 | old, from_kuid(&init_user_ns, loginuid), sessionid); | 278 | old, from_kuid(&init_user_ns, loginuid), sessionid); |
| 277 | if (sid) { | 279 | if (sid) { |
| @@ -619,6 +621,8 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type, | |||
| 619 | } | 621 | } |
| 620 | 622 | ||
| 621 | *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); | 623 | *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); |
| 624 | if (unlikely(!*ab)) | ||
| 625 | return rc; | ||
| 622 | audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u", | 626 | audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u", |
| 623 | task_tgid_vnr(current), | 627 | task_tgid_vnr(current), |
| 624 | from_kuid(&init_user_ns, current_uid()), | 628 | from_kuid(&init_user_ns, current_uid()), |
| @@ -1097,6 +1101,23 @@ static inline void audit_get_stamp(struct audit_context *ctx, | |||
| 1097 | } | 1101 | } |
| 1098 | } | 1102 | } |
| 1099 | 1103 | ||
| 1104 | /* | ||
| 1105 | * Wait for auditd to drain the queue a little | ||
| 1106 | */ | ||
| 1107 | static void wait_for_auditd(unsigned long sleep_time) | ||
| 1108 | { | ||
| 1109 | DECLARE_WAITQUEUE(wait, current); | ||
| 1110 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1111 | add_wait_queue(&audit_backlog_wait, &wait); | ||
| 1112 | |||
| 1113 | if (audit_backlog_limit && | ||
| 1114 | skb_queue_len(&audit_skb_queue) > audit_backlog_limit) | ||
| 1115 | schedule_timeout(sleep_time); | ||
| 1116 | |||
| 1117 | __set_current_state(TASK_RUNNING); | ||
| 1118 | remove_wait_queue(&audit_backlog_wait, &wait); | ||
| 1119 | } | ||
| 1120 | |||
| 1100 | /* Obtain an audit buffer. This routine does locking to obtain the | 1121 | /* Obtain an audit buffer. This routine does locking to obtain the |
| 1101 | * audit buffer, but then no locking is required for calls to | 1122 | * audit buffer, but then no locking is required for calls to |
| 1102 | * audit_log_*format. If the tsk is a task that is currently in a | 1123 | * audit_log_*format. If the tsk is a task that is currently in a |
| @@ -1142,20 +1163,13 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, | |||
| 1142 | 1163 | ||
| 1143 | while (audit_backlog_limit | 1164 | while (audit_backlog_limit |
| 1144 | && skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) { | 1165 | && skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) { |
| 1145 | if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time | 1166 | if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time) { |
| 1146 | && time_before(jiffies, timeout_start + audit_backlog_wait_time)) { | 1167 | unsigned long sleep_time; |
| 1147 | 1168 | ||
| 1148 | /* Wait for auditd to drain the queue a little */ | 1169 | sleep_time = timeout_start + audit_backlog_wait_time - |
| 1149 | DECLARE_WAITQUEUE(wait, current); | 1170 | jiffies; |
| 1150 | set_current_state(TASK_INTERRUPTIBLE); | 1171 | if ((long)sleep_time > 0) |
| 1151 | add_wait_queue(&audit_backlog_wait, &wait); | 1172 | wait_for_auditd(sleep_time); |
| 1152 | |||
| 1153 | if (audit_backlog_limit && | ||
| 1154 | skb_queue_len(&audit_skb_queue) > audit_backlog_limit) | ||
| 1155 | schedule_timeout(timeout_start + audit_backlog_wait_time - jiffies); | ||
| 1156 | |||
| 1157 | __set_current_state(TASK_RUNNING); | ||
| 1158 | remove_wait_queue(&audit_backlog_wait, &wait); | ||
| 1159 | continue; | 1173 | continue; |
| 1160 | } | 1174 | } |
| 1161 | if (audit_rate_check() && printk_ratelimit()) | 1175 | if (audit_rate_check() && printk_ratelimit()) |
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index e81175ef25f8..642a89c4f3d6 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
| @@ -449,11 +449,26 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) | |||
| 449 | return 0; | 449 | return 0; |
| 450 | } | 450 | } |
| 451 | 451 | ||
| 452 | static void audit_log_remove_rule(struct audit_krule *rule) | ||
| 453 | { | ||
| 454 | struct audit_buffer *ab; | ||
| 455 | |||
| 456 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | ||
| 457 | if (unlikely(!ab)) | ||
| 458 | return; | ||
| 459 | audit_log_format(ab, "op="); | ||
| 460 | audit_log_string(ab, "remove rule"); | ||
| 461 | audit_log_format(ab, " dir="); | ||
| 462 | audit_log_untrustedstring(ab, rule->tree->pathname); | ||
| 463 | audit_log_key(ab, rule->filterkey); | ||
| 464 | audit_log_format(ab, " list=%d res=1", rule->listnr); | ||
| 465 | audit_log_end(ab); | ||
| 466 | } | ||
| 467 | |||
| 452 | static void kill_rules(struct audit_tree *tree) | 468 | static void kill_rules(struct audit_tree *tree) |
| 453 | { | 469 | { |
| 454 | struct audit_krule *rule, *next; | 470 | struct audit_krule *rule, *next; |
| 455 | struct audit_entry *entry; | 471 | struct audit_entry *entry; |
| 456 | struct audit_buffer *ab; | ||
| 457 | 472 | ||
| 458 | list_for_each_entry_safe(rule, next, &tree->rules, rlist) { | 473 | list_for_each_entry_safe(rule, next, &tree->rules, rlist) { |
| 459 | entry = container_of(rule, struct audit_entry, rule); | 474 | entry = container_of(rule, struct audit_entry, rule); |
| @@ -461,14 +476,7 @@ static void kill_rules(struct audit_tree *tree) | |||
| 461 | list_del_init(&rule->rlist); | 476 | list_del_init(&rule->rlist); |
| 462 | if (rule->tree) { | 477 | if (rule->tree) { |
| 463 | /* not a half-baked one */ | 478 | /* not a half-baked one */ |
| 464 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | 479 | audit_log_remove_rule(rule); |
| 465 | audit_log_format(ab, "op="); | ||
| 466 | audit_log_string(ab, "remove rule"); | ||
| 467 | audit_log_format(ab, " dir="); | ||
| 468 | audit_log_untrustedstring(ab, rule->tree->pathname); | ||
| 469 | audit_log_key(ab, rule->filterkey); | ||
| 470 | audit_log_format(ab, " list=%d res=1", rule->listnr); | ||
| 471 | audit_log_end(ab); | ||
| 472 | rule->tree = NULL; | 480 | rule->tree = NULL; |
| 473 | list_del_rcu(&entry->list); | 481 | list_del_rcu(&entry->list); |
| 474 | list_del(&entry->rule.list); | 482 | list_del(&entry->rule.list); |
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 4a599f699adc..22831c4d369c 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c | |||
| @@ -240,6 +240,8 @@ static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watc | |||
| 240 | if (audit_enabled) { | 240 | if (audit_enabled) { |
| 241 | struct audit_buffer *ab; | 241 | struct audit_buffer *ab; |
| 242 | ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE); | 242 | ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE); |
| 243 | if (unlikely(!ab)) | ||
| 244 | return; | ||
| 243 | audit_log_format(ab, "auid=%u ses=%u op=", | 245 | audit_log_format(ab, "auid=%u ses=%u op=", |
| 244 | from_kuid(&init_user_ns, audit_get_loginuid(current)), | 246 | from_kuid(&init_user_ns, audit_get_loginuid(current)), |
| 245 | audit_get_sessionid(current)); | 247 | audit_get_sessionid(current)); |
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 7f19f23d38a3..f9fc54bbe06f 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
| @@ -1144,7 +1144,6 @@ static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid, | |||
| 1144 | * audit_receive_filter - apply all rules to the specified message type | 1144 | * audit_receive_filter - apply all rules to the specified message type |
| 1145 | * @type: audit message type | 1145 | * @type: audit message type |
| 1146 | * @pid: target pid for netlink audit messages | 1146 | * @pid: target pid for netlink audit messages |
| 1147 | * @uid: target uid for netlink audit messages | ||
| 1148 | * @seq: netlink audit message sequence (serial) number | 1147 | * @seq: netlink audit message sequence (serial) number |
| 1149 | * @data: payload data | 1148 | * @data: payload data |
| 1150 | * @datasz: size of payload data | 1149 | * @datasz: size of payload data |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index e37e6a12c5e3..a371f857a0a9 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
| @@ -1464,14 +1464,14 @@ static void show_special(struct audit_context *context, int *call_panic) | |||
| 1464 | audit_log_end(ab); | 1464 | audit_log_end(ab); |
| 1465 | ab = audit_log_start(context, GFP_KERNEL, | 1465 | ab = audit_log_start(context, GFP_KERNEL, |
| 1466 | AUDIT_IPC_SET_PERM); | 1466 | AUDIT_IPC_SET_PERM); |
| 1467 | if (unlikely(!ab)) | ||
| 1468 | return; | ||
| 1467 | audit_log_format(ab, | 1469 | audit_log_format(ab, |
| 1468 | "qbytes=%lx ouid=%u ogid=%u mode=%#ho", | 1470 | "qbytes=%lx ouid=%u ogid=%u mode=%#ho", |
| 1469 | context->ipc.qbytes, | 1471 | context->ipc.qbytes, |
| 1470 | context->ipc.perm_uid, | 1472 | context->ipc.perm_uid, |
| 1471 | context->ipc.perm_gid, | 1473 | context->ipc.perm_gid, |
| 1472 | context->ipc.perm_mode); | 1474 | context->ipc.perm_mode); |
| 1473 | if (!ab) | ||
| 1474 | return; | ||
| 1475 | } | 1475 | } |
| 1476 | break; } | 1476 | break; } |
| 1477 | case AUDIT_MQ_OPEN: { | 1477 | case AUDIT_MQ_OPEN: { |
| @@ -2675,7 +2675,7 @@ void __audit_mmap_fd(int fd, int flags) | |||
| 2675 | context->type = AUDIT_MMAP; | 2675 | context->type = AUDIT_MMAP; |
| 2676 | } | 2676 | } |
| 2677 | 2677 | ||
| 2678 | static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr) | 2678 | static void audit_log_task(struct audit_buffer *ab) |
| 2679 | { | 2679 | { |
| 2680 | kuid_t auid, uid; | 2680 | kuid_t auid, uid; |
| 2681 | kgid_t gid; | 2681 | kgid_t gid; |
| @@ -2693,6 +2693,11 @@ static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr) | |||
| 2693 | audit_log_task_context(ab); | 2693 | audit_log_task_context(ab); |
| 2694 | audit_log_format(ab, " pid=%d comm=", current->pid); | 2694 | audit_log_format(ab, " pid=%d comm=", current->pid); |
| 2695 | audit_log_untrustedstring(ab, current->comm); | 2695 | audit_log_untrustedstring(ab, current->comm); |
| 2696 | } | ||
| 2697 | |||
| 2698 | static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr) | ||
| 2699 | { | ||
| 2700 | audit_log_task(ab); | ||
| 2696 | audit_log_format(ab, " reason="); | 2701 | audit_log_format(ab, " reason="); |
| 2697 | audit_log_string(ab, reason); | 2702 | audit_log_string(ab, reason); |
| 2698 | audit_log_format(ab, " sig=%ld", signr); | 2703 | audit_log_format(ab, " sig=%ld", signr); |
| @@ -2715,6 +2720,8 @@ void audit_core_dumps(long signr) | |||
| 2715 | return; | 2720 | return; |
| 2716 | 2721 | ||
| 2717 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); | 2722 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); |
| 2723 | if (unlikely(!ab)) | ||
| 2724 | return; | ||
| 2718 | audit_log_abend(ab, "memory violation", signr); | 2725 | audit_log_abend(ab, "memory violation", signr); |
| 2719 | audit_log_end(ab); | 2726 | audit_log_end(ab); |
| 2720 | } | 2727 | } |
| @@ -2723,8 +2730,11 @@ void __audit_seccomp(unsigned long syscall, long signr, int code) | |||
| 2723 | { | 2730 | { |
| 2724 | struct audit_buffer *ab; | 2731 | struct audit_buffer *ab; |
| 2725 | 2732 | ||
| 2726 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); | 2733 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_SECCOMP); |
| 2727 | audit_log_abend(ab, "seccomp", signr); | 2734 | if (unlikely(!ab)) |
| 2735 | return; | ||
| 2736 | audit_log_task(ab); | ||
| 2737 | audit_log_format(ab, " sig=%ld", signr); | ||
| 2728 | audit_log_format(ab, " syscall=%ld", syscall); | 2738 | audit_log_format(ab, " syscall=%ld", syscall); |
| 2729 | audit_log_format(ab, " compat=%d", is_compat_task()); | 2739 | audit_log_format(ab, " compat=%d", is_compat_task()); |
| 2730 | audit_log_format(ab, " ip=0x%lx", KSTK_EIP(current)); | 2740 | audit_log_format(ab, " ip=0x%lx", KSTK_EIP(current)); |
diff --git a/kernel/compat.c b/kernel/compat.c index f6150e92dfc9..36700e9e2be9 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
| @@ -535,9 +535,11 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru) | |||
| 535 | return 0; | 535 | return 0; |
| 536 | } | 536 | } |
| 537 | 537 | ||
| 538 | asmlinkage long | 538 | COMPAT_SYSCALL_DEFINE4(wait4, |
| 539 | compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, | 539 | compat_pid_t, pid, |
| 540 | struct compat_rusage __user *ru) | 540 | compat_uint_t __user *, stat_addr, |
| 541 | int, options, | ||
| 542 | struct compat_rusage __user *, ru) | ||
| 541 | { | 543 | { |
| 542 | if (!ru) { | 544 | if (!ru) { |
| 543 | return sys_wait4(pid, stat_addr, options, NULL); | 545 | return sys_wait4(pid, stat_addr, options, NULL); |
| @@ -564,9 +566,10 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, | |||
| 564 | } | 566 | } |
| 565 | } | 567 | } |
| 566 | 568 | ||
| 567 | asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, | 569 | COMPAT_SYSCALL_DEFINE5(waitid, |
| 568 | struct compat_siginfo __user *uinfo, int options, | 570 | int, which, compat_pid_t, pid, |
| 569 | struct compat_rusage __user *uru) | 571 | struct compat_siginfo __user *, uinfo, int, options, |
| 572 | struct compat_rusage __user *, uru) | ||
| 570 | { | 573 | { |
| 571 | siginfo_t info; | 574 | siginfo_t info; |
| 572 | struct rusage ru; | 575 | struct rusage ru; |
| @@ -584,7 +587,11 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, | |||
| 584 | return ret; | 587 | return ret; |
| 585 | 588 | ||
| 586 | if (uru) { | 589 | if (uru) { |
| 587 | ret = put_compat_rusage(&ru, uru); | 590 | /* sys_waitid() overwrites everything in ru */ |
| 591 | if (COMPAT_USE_64BIT_TIME) | ||
| 592 | ret = copy_to_user(uru, &ru, sizeof(ru)); | ||
| 593 | else | ||
| 594 | ret = put_compat_rusage(&ru, uru); | ||
| 588 | if (ret) | 595 | if (ret) |
| 589 | return ret; | 596 | return ret; |
| 590 | } | 597 | } |
| @@ -994,7 +1001,7 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese, | |||
| 994 | sigset_from_compat(&s, &s32); | 1001 | sigset_from_compat(&s, &s32); |
| 995 | 1002 | ||
| 996 | if (uts) { | 1003 | if (uts) { |
| 997 | if (get_compat_timespec(&t, uts)) | 1004 | if (compat_get_timespec(&t, uts)) |
| 998 | return -EFAULT; | 1005 | return -EFAULT; |
| 999 | } | 1006 | } |
| 1000 | 1007 | ||
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 4d5f8d5612f3..8875254120b6 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c | |||
| @@ -1970,6 +1970,8 @@ static int kdb_lsmod(int argc, const char **argv) | |||
| 1970 | 1970 | ||
| 1971 | kdb_printf("Module Size modstruct Used by\n"); | 1971 | kdb_printf("Module Size modstruct Used by\n"); |
| 1972 | list_for_each_entry(mod, kdb_modules, list) { | 1972 | list_for_each_entry(mod, kdb_modules, list) { |
| 1973 | if (mod->state == MODULE_STATE_UNFORMED) | ||
| 1974 | continue; | ||
| 1973 | 1975 | ||
| 1974 | kdb_printf("%-20s%8u 0x%p ", mod->name, | 1976 | kdb_printf("%-20s%8u 0x%p ", mod->name, |
| 1975 | mod->core_size, (void *)mod); | 1977 | mod->core_size, (void *)mod); |
diff --git a/kernel/fork.c b/kernel/fork.c index 65ca6d27f24e..c535f33bbb9c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -1668,8 +1668,10 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, | |||
| 1668 | int, tls_val) | 1668 | int, tls_val) |
| 1669 | #endif | 1669 | #endif |
| 1670 | { | 1670 | { |
| 1671 | return do_fork(clone_flags, newsp, 0, | 1671 | long ret = do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr); |
| 1672 | parent_tidptr, child_tidptr); | 1672 | asmlinkage_protect(5, ret, clone_flags, newsp, |
| 1673 | parent_tidptr, child_tidptr, tls_val); | ||
| 1674 | return ret; | ||
| 1673 | } | 1675 | } |
| 1674 | #endif | 1676 | #endif |
| 1675 | 1677 | ||
diff --git a/kernel/module.c b/kernel/module.c index 250092c1d57d..eab08274ec9b 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -188,6 +188,7 @@ struct load_info { | |||
| 188 | ongoing or failed initialization etc. */ | 188 | ongoing or failed initialization etc. */ |
| 189 | static inline int strong_try_module_get(struct module *mod) | 189 | static inline int strong_try_module_get(struct module *mod) |
| 190 | { | 190 | { |
| 191 | BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); | ||
| 191 | if (mod && mod->state == MODULE_STATE_COMING) | 192 | if (mod && mod->state == MODULE_STATE_COMING) |
| 192 | return -EBUSY; | 193 | return -EBUSY; |
| 193 | if (try_module_get(mod)) | 194 | if (try_module_get(mod)) |
| @@ -343,6 +344,9 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, | |||
| 343 | #endif | 344 | #endif |
| 344 | }; | 345 | }; |
| 345 | 346 | ||
| 347 | if (mod->state == MODULE_STATE_UNFORMED) | ||
| 348 | continue; | ||
| 349 | |||
| 346 | if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) | 350 | if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) |
| 347 | return true; | 351 | return true; |
| 348 | } | 352 | } |
| @@ -450,16 +454,24 @@ const struct kernel_symbol *find_symbol(const char *name, | |||
| 450 | EXPORT_SYMBOL_GPL(find_symbol); | 454 | EXPORT_SYMBOL_GPL(find_symbol); |
| 451 | 455 | ||
| 452 | /* Search for module by name: must hold module_mutex. */ | 456 | /* Search for module by name: must hold module_mutex. */ |
| 453 | struct module *find_module(const char *name) | 457 | static struct module *find_module_all(const char *name, |
| 458 | bool even_unformed) | ||
| 454 | { | 459 | { |
| 455 | struct module *mod; | 460 | struct module *mod; |
| 456 | 461 | ||
| 457 | list_for_each_entry(mod, &modules, list) { | 462 | list_for_each_entry(mod, &modules, list) { |
| 463 | if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) | ||
| 464 | continue; | ||
| 458 | if (strcmp(mod->name, name) == 0) | 465 | if (strcmp(mod->name, name) == 0) |
| 459 | return mod; | 466 | return mod; |
| 460 | } | 467 | } |
| 461 | return NULL; | 468 | return NULL; |
| 462 | } | 469 | } |
| 470 | |||
| 471 | struct module *find_module(const char *name) | ||
| 472 | { | ||
| 473 | return find_module_all(name, false); | ||
| 474 | } | ||
| 463 | EXPORT_SYMBOL_GPL(find_module); | 475 | EXPORT_SYMBOL_GPL(find_module); |
| 464 | 476 | ||
| 465 | #ifdef CONFIG_SMP | 477 | #ifdef CONFIG_SMP |
| @@ -525,6 +537,8 @@ bool is_module_percpu_address(unsigned long addr) | |||
| 525 | preempt_disable(); | 537 | preempt_disable(); |
| 526 | 538 | ||
| 527 | list_for_each_entry_rcu(mod, &modules, list) { | 539 | list_for_each_entry_rcu(mod, &modules, list) { |
| 540 | if (mod->state == MODULE_STATE_UNFORMED) | ||
| 541 | continue; | ||
| 528 | if (!mod->percpu_size) | 542 | if (!mod->percpu_size) |
| 529 | continue; | 543 | continue; |
| 530 | for_each_possible_cpu(cpu) { | 544 | for_each_possible_cpu(cpu) { |
| @@ -1048,6 +1062,8 @@ static ssize_t show_initstate(struct module_attribute *mattr, | |||
| 1048 | case MODULE_STATE_GOING: | 1062 | case MODULE_STATE_GOING: |
| 1049 | state = "going"; | 1063 | state = "going"; |
| 1050 | break; | 1064 | break; |
| 1065 | default: | ||
| 1066 | BUG(); | ||
| 1051 | } | 1067 | } |
| 1052 | return sprintf(buffer, "%s\n", state); | 1068 | return sprintf(buffer, "%s\n", state); |
| 1053 | } | 1069 | } |
| @@ -1786,6 +1802,8 @@ void set_all_modules_text_rw(void) | |||
| 1786 | 1802 | ||
| 1787 | mutex_lock(&module_mutex); | 1803 | mutex_lock(&module_mutex); |
| 1788 | list_for_each_entry_rcu(mod, &modules, list) { | 1804 | list_for_each_entry_rcu(mod, &modules, list) { |
| 1805 | if (mod->state == MODULE_STATE_UNFORMED) | ||
| 1806 | continue; | ||
| 1789 | if ((mod->module_core) && (mod->core_text_size)) { | 1807 | if ((mod->module_core) && (mod->core_text_size)) { |
| 1790 | set_page_attributes(mod->module_core, | 1808 | set_page_attributes(mod->module_core, |
| 1791 | mod->module_core + mod->core_text_size, | 1809 | mod->module_core + mod->core_text_size, |
| @@ -1807,6 +1825,8 @@ void set_all_modules_text_ro(void) | |||
| 1807 | 1825 | ||
| 1808 | mutex_lock(&module_mutex); | 1826 | mutex_lock(&module_mutex); |
| 1809 | list_for_each_entry_rcu(mod, &modules, list) { | 1827 | list_for_each_entry_rcu(mod, &modules, list) { |
| 1828 | if (mod->state == MODULE_STATE_UNFORMED) | ||
| 1829 | continue; | ||
| 1810 | if ((mod->module_core) && (mod->core_text_size)) { | 1830 | if ((mod->module_core) && (mod->core_text_size)) { |
| 1811 | set_page_attributes(mod->module_core, | 1831 | set_page_attributes(mod->module_core, |
| 1812 | mod->module_core + mod->core_text_size, | 1832 | mod->module_core + mod->core_text_size, |
| @@ -2527,6 +2547,13 @@ static int copy_module_from_fd(int fd, struct load_info *info) | |||
| 2527 | err = -EFBIG; | 2547 | err = -EFBIG; |
| 2528 | goto out; | 2548 | goto out; |
| 2529 | } | 2549 | } |
| 2550 | |||
| 2551 | /* Don't hand 0 to vmalloc, it whines. */ | ||
| 2552 | if (stat.size == 0) { | ||
| 2553 | err = -EINVAL; | ||
| 2554 | goto out; | ||
| 2555 | } | ||
| 2556 | |||
| 2530 | info->hdr = vmalloc(stat.size); | 2557 | info->hdr = vmalloc(stat.size); |
| 2531 | if (!info->hdr) { | 2558 | if (!info->hdr) { |
| 2532 | err = -ENOMEM; | 2559 | err = -ENOMEM; |
| @@ -2990,8 +3017,9 @@ static bool finished_loading(const char *name) | |||
| 2990 | bool ret; | 3017 | bool ret; |
| 2991 | 3018 | ||
| 2992 | mutex_lock(&module_mutex); | 3019 | mutex_lock(&module_mutex); |
| 2993 | mod = find_module(name); | 3020 | mod = find_module_all(name, true); |
| 2994 | ret = !mod || mod->state != MODULE_STATE_COMING; | 3021 | ret = !mod || mod->state == MODULE_STATE_LIVE |
| 3022 | || mod->state == MODULE_STATE_GOING; | ||
| 2995 | mutex_unlock(&module_mutex); | 3023 | mutex_unlock(&module_mutex); |
| 2996 | 3024 | ||
| 2997 | return ret; | 3025 | return ret; |
| @@ -3013,6 +3041,12 @@ static int do_init_module(struct module *mod) | |||
| 3013 | { | 3041 | { |
| 3014 | int ret = 0; | 3042 | int ret = 0; |
| 3015 | 3043 | ||
| 3044 | /* | ||
| 3045 | * We want to find out whether @mod uses async during init. Clear | ||
| 3046 | * PF_USED_ASYNC. async_schedule*() will set it. | ||
| 3047 | */ | ||
| 3048 | current->flags &= ~PF_USED_ASYNC; | ||
| 3049 | |||
| 3016 | blocking_notifier_call_chain(&module_notify_list, | 3050 | blocking_notifier_call_chain(&module_notify_list, |
| 3017 | MODULE_STATE_COMING, mod); | 3051 | MODULE_STATE_COMING, mod); |
| 3018 | 3052 | ||
| @@ -3058,8 +3092,25 @@ static int do_init_module(struct module *mod) | |||
| 3058 | blocking_notifier_call_chain(&module_notify_list, | 3092 | blocking_notifier_call_chain(&module_notify_list, |
| 3059 | MODULE_STATE_LIVE, mod); | 3093 | MODULE_STATE_LIVE, mod); |
| 3060 | 3094 | ||
| 3061 | /* We need to finish all async code before the module init sequence is done */ | 3095 | /* |
| 3062 | async_synchronize_full(); | 3096 | * We need to finish all async code before the module init sequence |
| 3097 | * is done. This has potential to deadlock. For example, a newly | ||
| 3098 | * detected block device can trigger request_module() of the | ||
| 3099 | * default iosched from async probing task. Once userland helper | ||
| 3100 | * reaches here, async_synchronize_full() will wait on the async | ||
| 3101 | * task waiting on request_module() and deadlock. | ||
| 3102 | * | ||
| 3103 | * This deadlock is avoided by perfomring async_synchronize_full() | ||
| 3104 | * iff module init queued any async jobs. This isn't a full | ||
| 3105 | * solution as it will deadlock the same if module loading from | ||
| 3106 | * async jobs nests more than once; however, due to the various | ||
| 3107 | * constraints, this hack seems to be the best option for now. | ||
| 3108 | * Please refer to the following thread for details. | ||
| 3109 | * | ||
| 3110 | * http://thread.gmane.org/gmane.linux.kernel/1420814 | ||
| 3111 | */ | ||
| 3112 | if (current->flags & PF_USED_ASYNC) | ||
| 3113 | async_synchronize_full(); | ||
| 3063 | 3114 | ||
| 3064 | mutex_lock(&module_mutex); | 3115 | mutex_lock(&module_mutex); |
| 3065 | /* Drop initial reference. */ | 3116 | /* Drop initial reference. */ |
| @@ -3113,6 +3164,32 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3113 | goto free_copy; | 3164 | goto free_copy; |
| 3114 | } | 3165 | } |
| 3115 | 3166 | ||
| 3167 | /* | ||
| 3168 | * We try to place it in the list now to make sure it's unique | ||
| 3169 | * before we dedicate too many resources. In particular, | ||
| 3170 | * temporary percpu memory exhaustion. | ||
| 3171 | */ | ||
| 3172 | mod->state = MODULE_STATE_UNFORMED; | ||
| 3173 | again: | ||
| 3174 | mutex_lock(&module_mutex); | ||
| 3175 | if ((old = find_module_all(mod->name, true)) != NULL) { | ||
| 3176 | if (old->state == MODULE_STATE_COMING | ||
| 3177 | || old->state == MODULE_STATE_UNFORMED) { | ||
| 3178 | /* Wait in case it fails to load. */ | ||
| 3179 | mutex_unlock(&module_mutex); | ||
| 3180 | err = wait_event_interruptible(module_wq, | ||
| 3181 | finished_loading(mod->name)); | ||
| 3182 | if (err) | ||
| 3183 | goto free_module; | ||
| 3184 | goto again; | ||
| 3185 | } | ||
| 3186 | err = -EEXIST; | ||
| 3187 | mutex_unlock(&module_mutex); | ||
| 3188 | goto free_module; | ||
| 3189 | } | ||
| 3190 | list_add_rcu(&mod->list, &modules); | ||
| 3191 | mutex_unlock(&module_mutex); | ||
| 3192 | |||
| 3116 | #ifdef CONFIG_MODULE_SIG | 3193 | #ifdef CONFIG_MODULE_SIG |
| 3117 | mod->sig_ok = info->sig_ok; | 3194 | mod->sig_ok = info->sig_ok; |
| 3118 | if (!mod->sig_ok) | 3195 | if (!mod->sig_ok) |
| @@ -3122,7 +3199,7 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3122 | /* Now module is in final location, initialize linked lists, etc. */ | 3199 | /* Now module is in final location, initialize linked lists, etc. */ |
| 3123 | err = module_unload_init(mod); | 3200 | err = module_unload_init(mod); |
| 3124 | if (err) | 3201 | if (err) |
| 3125 | goto free_module; | 3202 | goto unlink_mod; |
| 3126 | 3203 | ||
| 3127 | /* Now we've got everything in the final locations, we can | 3204 | /* Now we've got everything in the final locations, we can |
| 3128 | * find optional sections. */ | 3205 | * find optional sections. */ |
| @@ -3157,54 +3234,33 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3157 | goto free_arch_cleanup; | 3234 | goto free_arch_cleanup; |
| 3158 | } | 3235 | } |
| 3159 | 3236 | ||
| 3160 | /* Mark state as coming so strong_try_module_get() ignores us. */ | ||
| 3161 | mod->state = MODULE_STATE_COMING; | ||
| 3162 | |||
| 3163 | /* Now sew it into the lists so we can get lockdep and oops | ||
| 3164 | * info during argument parsing. No one should access us, since | ||
| 3165 | * strong_try_module_get() will fail. | ||
| 3166 | * lockdep/oops can run asynchronous, so use the RCU list insertion | ||
| 3167 | * function to insert in a way safe to concurrent readers. | ||
| 3168 | * The mutex protects against concurrent writers. | ||
| 3169 | */ | ||
| 3170 | again: | ||
| 3171 | mutex_lock(&module_mutex); | ||
| 3172 | if ((old = find_module(mod->name)) != NULL) { | ||
| 3173 | if (old->state == MODULE_STATE_COMING) { | ||
| 3174 | /* Wait in case it fails to load. */ | ||
| 3175 | mutex_unlock(&module_mutex); | ||
| 3176 | err = wait_event_interruptible(module_wq, | ||
| 3177 | finished_loading(mod->name)); | ||
| 3178 | if (err) | ||
| 3179 | goto free_arch_cleanup; | ||
| 3180 | goto again; | ||
| 3181 | } | ||
| 3182 | err = -EEXIST; | ||
| 3183 | goto unlock; | ||
| 3184 | } | ||
| 3185 | |||
| 3186 | /* This has to be done once we're sure module name is unique. */ | ||
| 3187 | dynamic_debug_setup(info->debug, info->num_debug); | 3237 | dynamic_debug_setup(info->debug, info->num_debug); |
| 3188 | 3238 | ||
| 3189 | /* Find duplicate symbols */ | 3239 | mutex_lock(&module_mutex); |
| 3240 | /* Find duplicate symbols (must be called under lock). */ | ||
| 3190 | err = verify_export_symbols(mod); | 3241 | err = verify_export_symbols(mod); |
| 3191 | if (err < 0) | 3242 | if (err < 0) |
| 3192 | goto ddebug; | 3243 | goto ddebug_cleanup; |
| 3193 | 3244 | ||
| 3245 | /* This relies on module_mutex for list integrity. */ | ||
| 3194 | module_bug_finalize(info->hdr, info->sechdrs, mod); | 3246 | module_bug_finalize(info->hdr, info->sechdrs, mod); |
| 3195 | list_add_rcu(&mod->list, &modules); | 3247 | |
| 3248 | /* Mark state as coming so strong_try_module_get() ignores us, | ||
| 3249 | * but kallsyms etc. can see us. */ | ||
| 3250 | mod->state = MODULE_STATE_COMING; | ||
| 3251 | |||
| 3196 | mutex_unlock(&module_mutex); | 3252 | mutex_unlock(&module_mutex); |
| 3197 | 3253 | ||
| 3198 | /* Module is ready to execute: parsing args may do that. */ | 3254 | /* Module is ready to execute: parsing args may do that. */ |
| 3199 | err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, | 3255 | err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, |
| 3200 | -32768, 32767, &ddebug_dyndbg_module_param_cb); | 3256 | -32768, 32767, &ddebug_dyndbg_module_param_cb); |
| 3201 | if (err < 0) | 3257 | if (err < 0) |
| 3202 | goto unlink; | 3258 | goto bug_cleanup; |
| 3203 | 3259 | ||
| 3204 | /* Link in to syfs. */ | 3260 | /* Link in to syfs. */ |
| 3205 | err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); | 3261 | err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); |
| 3206 | if (err < 0) | 3262 | if (err < 0) |
| 3207 | goto unlink; | 3263 | goto bug_cleanup; |
| 3208 | 3264 | ||
| 3209 | /* Get rid of temporary copy. */ | 3265 | /* Get rid of temporary copy. */ |
| 3210 | free_copy(info); | 3266 | free_copy(info); |
| @@ -3214,16 +3270,13 @@ again: | |||
| 3214 | 3270 | ||
| 3215 | return do_init_module(mod); | 3271 | return do_init_module(mod); |
| 3216 | 3272 | ||
| 3217 | unlink: | 3273 | bug_cleanup: |
| 3274 | /* module_bug_cleanup needs module_mutex protection */ | ||
| 3218 | mutex_lock(&module_mutex); | 3275 | mutex_lock(&module_mutex); |
| 3219 | /* Unlink carefully: kallsyms could be walking list. */ | ||
| 3220 | list_del_rcu(&mod->list); | ||
| 3221 | module_bug_cleanup(mod); | 3276 | module_bug_cleanup(mod); |
| 3222 | wake_up_all(&module_wq); | 3277 | ddebug_cleanup: |
| 3223 | ddebug: | ||
| 3224 | dynamic_debug_remove(info->debug); | ||
| 3225 | unlock: | ||
| 3226 | mutex_unlock(&module_mutex); | 3278 | mutex_unlock(&module_mutex); |
| 3279 | dynamic_debug_remove(info->debug); | ||
| 3227 | synchronize_sched(); | 3280 | synchronize_sched(); |
| 3228 | kfree(mod->args); | 3281 | kfree(mod->args); |
| 3229 | free_arch_cleanup: | 3282 | free_arch_cleanup: |
| @@ -3232,6 +3285,12 @@ again: | |||
| 3232 | free_modinfo(mod); | 3285 | free_modinfo(mod); |
| 3233 | free_unload: | 3286 | free_unload: |
| 3234 | module_unload_free(mod); | 3287 | module_unload_free(mod); |
| 3288 | unlink_mod: | ||
| 3289 | mutex_lock(&module_mutex); | ||
| 3290 | /* Unlink carefully: kallsyms could be walking list. */ | ||
| 3291 | list_del_rcu(&mod->list); | ||
| 3292 | wake_up_all(&module_wq); | ||
| 3293 | mutex_unlock(&module_mutex); | ||
| 3235 | free_module: | 3294 | free_module: |
| 3236 | module_deallocate(mod, info); | 3295 | module_deallocate(mod, info); |
| 3237 | free_copy: | 3296 | free_copy: |
| @@ -3354,6 +3413,8 @@ const char *module_address_lookup(unsigned long addr, | |||
| 3354 | 3413 | ||
| 3355 | preempt_disable(); | 3414 | preempt_disable(); |
| 3356 | list_for_each_entry_rcu(mod, &modules, list) { | 3415 | list_for_each_entry_rcu(mod, &modules, list) { |
| 3416 | if (mod->state == MODULE_STATE_UNFORMED) | ||
| 3417 | continue; | ||
| 3357 | if (within_module_init(addr, mod) || | 3418 | if (within_module_init(addr, mod) || |
| 3358 | within_module_core(addr, mod)) { | 3419 | within_module_core(addr, mod)) { |
| 3359 | if (modname) | 3420 | if (modname) |
| @@ -3377,6 +3438,8 @@ int lookup_module_symbol_name(unsigned long addr, char *symname) | |||
| 3377 | 3438 | ||
| 3378 | preempt_disable(); | 3439 | preempt_disable(); |
| 3379 | list_for_each_entry_rcu(mod, &modules, list) { | 3440 | list_for_each_entry_rcu(mod, &modules, list) { |
| 3441 | if (mod->state == MODULE_STATE_UNFORMED) | ||
| 3442 | continue; | ||
| 3380 | if (within_module_init(addr, mod) || | 3443 | if (within_module_init(addr, mod) || |
| 3381 | within_module_core(addr, mod)) { | 3444 | within_module_core(addr, mod)) { |
| 3382 | const char *sym; | 3445 | const char *sym; |
| @@ -3401,6 +3464,8 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, | |||
| 3401 | 3464 | ||
| 3402 | preempt_disable(); | 3465 | preempt_disable(); |
| 3403 | list_for_each_entry_rcu(mod, &modules, list) { | 3466 | list_for_each_entry_rcu(mod, &modules, list) { |
| 3467 | if (mod->state == MODULE_STATE_UNFORMED) | ||
| 3468 | continue; | ||
| 3404 | if (within_module_init(addr, mod) || | 3469 | if (within_module_init(addr, mod) || |
| 3405 | within_module_core(addr, mod)) { | 3470 | within_module_core(addr, mod)) { |
| 3406 | const char *sym; | 3471 | const char *sym; |
| @@ -3428,6 +3493,8 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, | |||
| 3428 | 3493 | ||
| 3429 | preempt_disable(); | 3494 | preempt_disable(); |
| 3430 | list_for_each_entry_rcu(mod, &modules, list) { | 3495 | list_for_each_entry_rcu(mod, &modules, list) { |
| 3496 | if (mod->state == MODULE_STATE_UNFORMED) | ||
| 3497 | continue; | ||
| 3431 | if (symnum < mod->num_symtab) { | 3498 | if (symnum < mod->num_symtab) { |
| 3432 | *value = mod->symtab[symnum].st_value; | 3499 | *value = mod->symtab[symnum].st_value; |
| 3433 | *type = mod->symtab[symnum].st_info; | 3500 | *type = mod->symtab[symnum].st_info; |
| @@ -3470,9 +3537,12 @@ unsigned long module_kallsyms_lookup_name(const char *name) | |||
| 3470 | ret = mod_find_symname(mod, colon+1); | 3537 | ret = mod_find_symname(mod, colon+1); |
| 3471 | *colon = ':'; | 3538 | *colon = ':'; |
| 3472 | } else { | 3539 | } else { |
| 3473 | list_for_each_entry_rcu(mod, &modules, list) | 3540 | list_for_each_entry_rcu(mod, &modules, list) { |
| 3541 | if (mod->state == MODULE_STATE_UNFORMED) | ||
| 3542 | continue; | ||
| 3474 | if ((ret = mod_find_symname(mod, name)) != 0) | 3543 | if ((ret = mod_find_symname(mod, name)) != 0) |
| 3475 | break; | 3544 | break; |
| 3545 | } | ||
| 3476 | } | 3546 | } |
| 3477 | preempt_enable(); | 3547 | preempt_enable(); |
| 3478 | return ret; | 3548 | return ret; |
| @@ -3487,6 +3557,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, | |||
| 3487 | int ret; | 3557 | int ret; |
| 3488 | 3558 | ||
| 3489 | list_for_each_entry(mod, &modules, list) { | 3559 | list_for_each_entry(mod, &modules, list) { |
| 3560 | if (mod->state == MODULE_STATE_UNFORMED) | ||
| 3561 | continue; | ||
| 3490 | for (i = 0; i < mod->num_symtab; i++) { | 3562 | for (i = 0; i < mod->num_symtab; i++) { |
| 3491 | ret = fn(data, mod->strtab + mod->symtab[i].st_name, | 3563 | ret = fn(data, mod->strtab + mod->symtab[i].st_name, |
| 3492 | mod, mod->symtab[i].st_value); | 3564 | mod, mod->symtab[i].st_value); |
| @@ -3502,6 +3574,7 @@ static char *module_flags(struct module *mod, char *buf) | |||
| 3502 | { | 3574 | { |
| 3503 | int bx = 0; | 3575 | int bx = 0; |
| 3504 | 3576 | ||
| 3577 | BUG_ON(mod->state == MODULE_STATE_UNFORMED); | ||
| 3505 | if (mod->taints || | 3578 | if (mod->taints || |
| 3506 | mod->state == MODULE_STATE_GOING || | 3579 | mod->state == MODULE_STATE_GOING || |
| 3507 | mod->state == MODULE_STATE_COMING) { | 3580 | mod->state == MODULE_STATE_COMING) { |
| @@ -3543,6 +3616,10 @@ static int m_show(struct seq_file *m, void *p) | |||
| 3543 | struct module *mod = list_entry(p, struct module, list); | 3616 | struct module *mod = list_entry(p, struct module, list); |
| 3544 | char buf[8]; | 3617 | char buf[8]; |
| 3545 | 3618 | ||
| 3619 | /* We always ignore unformed modules. */ | ||
| 3620 | if (mod->state == MODULE_STATE_UNFORMED) | ||
| 3621 | return 0; | ||
| 3622 | |||
| 3546 | seq_printf(m, "%s %u", | 3623 | seq_printf(m, "%s %u", |
| 3547 | mod->name, mod->init_size + mod->core_size); | 3624 | mod->name, mod->init_size + mod->core_size); |
| 3548 | print_unload_info(m, mod); | 3625 | print_unload_info(m, mod); |
| @@ -3603,6 +3680,8 @@ const struct exception_table_entry *search_module_extables(unsigned long addr) | |||
| 3603 | 3680 | ||
| 3604 | preempt_disable(); | 3681 | preempt_disable(); |
| 3605 | list_for_each_entry_rcu(mod, &modules, list) { | 3682 | list_for_each_entry_rcu(mod, &modules, list) { |
| 3683 | if (mod->state == MODULE_STATE_UNFORMED) | ||
| 3684 | continue; | ||
| 3606 | if (mod->num_exentries == 0) | 3685 | if (mod->num_exentries == 0) |
| 3607 | continue; | 3686 | continue; |
| 3608 | 3687 | ||
| @@ -3651,10 +3730,13 @@ struct module *__module_address(unsigned long addr) | |||
| 3651 | if (addr < module_addr_min || addr > module_addr_max) | 3730 | if (addr < module_addr_min || addr > module_addr_max) |
| 3652 | return NULL; | 3731 | return NULL; |
| 3653 | 3732 | ||
| 3654 | list_for_each_entry_rcu(mod, &modules, list) | 3733 | list_for_each_entry_rcu(mod, &modules, list) { |
| 3734 | if (mod->state == MODULE_STATE_UNFORMED) | ||
| 3735 | continue; | ||
| 3655 | if (within_module_core(addr, mod) | 3736 | if (within_module_core(addr, mod) |
| 3656 | || within_module_init(addr, mod)) | 3737 | || within_module_init(addr, mod)) |
| 3657 | return mod; | 3738 | return mod; |
| 3739 | } | ||
| 3658 | return NULL; | 3740 | return NULL; |
| 3659 | } | 3741 | } |
| 3660 | EXPORT_SYMBOL_GPL(__module_address); | 3742 | EXPORT_SYMBOL_GPL(__module_address); |
| @@ -3707,8 +3789,11 @@ void print_modules(void) | |||
| 3707 | printk(KERN_DEFAULT "Modules linked in:"); | 3789 | printk(KERN_DEFAULT "Modules linked in:"); |
| 3708 | /* Most callers should already have preempt disabled, but make sure */ | 3790 | /* Most callers should already have preempt disabled, but make sure */ |
| 3709 | preempt_disable(); | 3791 | preempt_disable(); |
| 3710 | list_for_each_entry_rcu(mod, &modules, list) | 3792 | list_for_each_entry_rcu(mod, &modules, list) { |
| 3793 | if (mod->state == MODULE_STATE_UNFORMED) | ||
| 3794 | continue; | ||
| 3711 | printk(" %s%s", mod->name, module_flags(mod, buf)); | 3795 | printk(" %s%s", mod->name, module_flags(mod, buf)); |
| 3796 | } | ||
| 3712 | preempt_enable(); | 3797 | preempt_enable(); |
| 3713 | if (last_unloaded_module[0]) | 3798 | if (last_unloaded_module[0]) |
| 3714 | printk(" [last unloaded: %s]", last_unloaded_module); | 3799 | printk(" [last unloaded: %s]", last_unloaded_module); |
diff --git a/kernel/printk.c b/kernel/printk.c index 357f714ddd49..267ce780abe8 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -87,12 +87,6 @@ static DEFINE_SEMAPHORE(console_sem); | |||
| 87 | struct console *console_drivers; | 87 | struct console *console_drivers; |
| 88 | EXPORT_SYMBOL_GPL(console_drivers); | 88 | EXPORT_SYMBOL_GPL(console_drivers); |
| 89 | 89 | ||
| 90 | #ifdef CONFIG_LOCKDEP | ||
| 91 | static struct lockdep_map console_lock_dep_map = { | ||
| 92 | .name = "console_lock" | ||
| 93 | }; | ||
| 94 | #endif | ||
| 95 | |||
| 96 | /* | 90 | /* |
| 97 | * This is used for debugging the mess that is the VT code by | 91 | * This is used for debugging the mess that is the VT code by |
| 98 | * keeping track if we have the console semaphore held. It's | 92 | * keeping track if we have the console semaphore held. It's |
| @@ -1924,7 +1918,6 @@ void console_lock(void) | |||
| 1924 | return; | 1918 | return; |
| 1925 | console_locked = 1; | 1919 | console_locked = 1; |
| 1926 | console_may_schedule = 1; | 1920 | console_may_schedule = 1; |
| 1927 | mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_); | ||
| 1928 | } | 1921 | } |
| 1929 | EXPORT_SYMBOL(console_lock); | 1922 | EXPORT_SYMBOL(console_lock); |
| 1930 | 1923 | ||
| @@ -1946,7 +1939,6 @@ int console_trylock(void) | |||
| 1946 | } | 1939 | } |
| 1947 | console_locked = 1; | 1940 | console_locked = 1; |
| 1948 | console_may_schedule = 0; | 1941 | console_may_schedule = 0; |
| 1949 | mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_); | ||
| 1950 | return 1; | 1942 | return 1; |
| 1951 | } | 1943 | } |
| 1952 | EXPORT_SYMBOL(console_trylock); | 1944 | EXPORT_SYMBOL(console_trylock); |
| @@ -2107,7 +2099,6 @@ skip: | |||
| 2107 | local_irq_restore(flags); | 2099 | local_irq_restore(flags); |
| 2108 | } | 2100 | } |
| 2109 | console_locked = 0; | 2101 | console_locked = 0; |
| 2110 | mutex_release(&console_lock_dep_map, 1, _RET_IP_); | ||
| 2111 | 2102 | ||
| 2112 | /* Release the exclusive_console once it is used */ | 2103 | /* Release the exclusive_console once it is used */ |
| 2113 | if (unlikely(exclusive_console)) | 2104 | if (unlikely(exclusive_console)) |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 1599157336a6..6cbeaae4406d 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
| @@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child) | |||
| 117 | * TASK_KILLABLE sleeps. | 117 | * TASK_KILLABLE sleeps. |
| 118 | */ | 118 | */ |
| 119 | if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) | 119 | if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) |
| 120 | signal_wake_up(child, task_is_traced(child)); | 120 | ptrace_signal_wake_up(child, true); |
| 121 | 121 | ||
| 122 | spin_unlock(&child->sighand->siglock); | 122 | spin_unlock(&child->sighand->siglock); |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | /* Ensure that nothing can wake it up, even SIGKILL */ | ||
| 126 | static bool ptrace_freeze_traced(struct task_struct *task) | ||
| 127 | { | ||
| 128 | bool ret = false; | ||
| 129 | |||
| 130 | /* Lockless, nobody but us can set this flag */ | ||
| 131 | if (task->jobctl & JOBCTL_LISTENING) | ||
| 132 | return ret; | ||
| 133 | |||
| 134 | spin_lock_irq(&task->sighand->siglock); | ||
| 135 | if (task_is_traced(task) && !__fatal_signal_pending(task)) { | ||
| 136 | task->state = __TASK_TRACED; | ||
| 137 | ret = true; | ||
| 138 | } | ||
| 139 | spin_unlock_irq(&task->sighand->siglock); | ||
| 140 | |||
| 141 | return ret; | ||
| 142 | } | ||
| 143 | |||
| 144 | static void ptrace_unfreeze_traced(struct task_struct *task) | ||
| 145 | { | ||
| 146 | if (task->state != __TASK_TRACED) | ||
| 147 | return; | ||
| 148 | |||
| 149 | WARN_ON(!task->ptrace || task->parent != current); | ||
| 150 | |||
| 151 | spin_lock_irq(&task->sighand->siglock); | ||
| 152 | if (__fatal_signal_pending(task)) | ||
| 153 | wake_up_state(task, __TASK_TRACED); | ||
| 154 | else | ||
| 155 | task->state = TASK_TRACED; | ||
| 156 | spin_unlock_irq(&task->sighand->siglock); | ||
| 157 | } | ||
| 158 | |||
| 125 | /** | 159 | /** |
| 126 | * ptrace_check_attach - check whether ptracee is ready for ptrace operation | 160 | * ptrace_check_attach - check whether ptracee is ready for ptrace operation |
| 127 | * @child: ptracee to check for | 161 | * @child: ptracee to check for |
| @@ -139,7 +173,7 @@ void __ptrace_unlink(struct task_struct *child) | |||
| 139 | * RETURNS: | 173 | * RETURNS: |
| 140 | * 0 on success, -ESRCH if %child is not ready. | 174 | * 0 on success, -ESRCH if %child is not ready. |
| 141 | */ | 175 | */ |
| 142 | int ptrace_check_attach(struct task_struct *child, bool ignore_state) | 176 | static int ptrace_check_attach(struct task_struct *child, bool ignore_state) |
| 143 | { | 177 | { |
| 144 | int ret = -ESRCH; | 178 | int ret = -ESRCH; |
| 145 | 179 | ||
| @@ -151,24 +185,29 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state) | |||
| 151 | * be changed by us so it's not changing right after this. | 185 | * be changed by us so it's not changing right after this. |
| 152 | */ | 186 | */ |
| 153 | read_lock(&tasklist_lock); | 187 | read_lock(&tasklist_lock); |
| 154 | if ((child->ptrace & PT_PTRACED) && child->parent == current) { | 188 | if (child->ptrace && child->parent == current) { |
| 189 | WARN_ON(child->state == __TASK_TRACED); | ||
| 155 | /* | 190 | /* |
| 156 | * child->sighand can't be NULL, release_task() | 191 | * child->sighand can't be NULL, release_task() |
| 157 | * does ptrace_unlink() before __exit_signal(). | 192 | * does ptrace_unlink() before __exit_signal(). |
| 158 | */ | 193 | */ |
| 159 | spin_lock_irq(&child->sighand->siglock); | 194 | if (ignore_state || ptrace_freeze_traced(child)) |
| 160 | WARN_ON_ONCE(task_is_stopped(child)); | ||
| 161 | if (ignore_state || (task_is_traced(child) && | ||
| 162 | !(child->jobctl & JOBCTL_LISTENING))) | ||
| 163 | ret = 0; | 195 | ret = 0; |
| 164 | spin_unlock_irq(&child->sighand->siglock); | ||
| 165 | } | 196 | } |
| 166 | read_unlock(&tasklist_lock); | 197 | read_unlock(&tasklist_lock); |
| 167 | 198 | ||
| 168 | if (!ret && !ignore_state) | 199 | if (!ret && !ignore_state) { |
| 169 | ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; | 200 | if (!wait_task_inactive(child, __TASK_TRACED)) { |
| 201 | /* | ||
| 202 | * This can only happen if may_ptrace_stop() fails and | ||
| 203 | * ptrace_stop() changes ->state back to TASK_RUNNING, | ||
| 204 | * so we should not worry about leaking __TASK_TRACED. | ||
| 205 | */ | ||
| 206 | WARN_ON(child->state == __TASK_TRACED); | ||
| 207 | ret = -ESRCH; | ||
| 208 | } | ||
| 209 | } | ||
| 170 | 210 | ||
| 171 | /* All systems go.. */ | ||
| 172 | return ret; | 211 | return ret; |
| 173 | } | 212 | } |
| 174 | 213 | ||
| @@ -317,7 +356,7 @@ static int ptrace_attach(struct task_struct *task, long request, | |||
| 317 | */ | 356 | */ |
| 318 | if (task_is_stopped(task) && | 357 | if (task_is_stopped(task) && |
| 319 | task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) | 358 | task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) |
| 320 | signal_wake_up(task, 1); | 359 | signal_wake_up_state(task, __TASK_STOPPED); |
| 321 | 360 | ||
| 322 | spin_unlock(&task->sighand->siglock); | 361 | spin_unlock(&task->sighand->siglock); |
| 323 | 362 | ||
| @@ -737,7 +776,7 @@ int ptrace_request(struct task_struct *child, long request, | |||
| 737 | * tracee into STOP. | 776 | * tracee into STOP. |
| 738 | */ | 777 | */ |
| 739 | if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) | 778 | if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) |
| 740 | signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); | 779 | ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); |
| 741 | 780 | ||
| 742 | unlock_task_sighand(child, &flags); | 781 | unlock_task_sighand(child, &flags); |
| 743 | ret = 0; | 782 | ret = 0; |
| @@ -763,7 +802,7 @@ int ptrace_request(struct task_struct *child, long request, | |||
| 763 | * start of this trap and now. Trigger re-trap. | 802 | * start of this trap and now. Trigger re-trap. |
| 764 | */ | 803 | */ |
| 765 | if (child->jobctl & JOBCTL_TRAP_NOTIFY) | 804 | if (child->jobctl & JOBCTL_TRAP_NOTIFY) |
| 766 | signal_wake_up(child, true); | 805 | ptrace_signal_wake_up(child, true); |
| 767 | ret = 0; | 806 | ret = 0; |
| 768 | } | 807 | } |
| 769 | unlock_task_sighand(child, &flags); | 808 | unlock_task_sighand(child, &flags); |
| @@ -900,6 +939,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, | |||
| 900 | goto out_put_task_struct; | 939 | goto out_put_task_struct; |
| 901 | 940 | ||
| 902 | ret = arch_ptrace(child, request, addr, data); | 941 | ret = arch_ptrace(child, request, addr, data); |
| 942 | if (ret || request != PTRACE_DETACH) | ||
| 943 | ptrace_unfreeze_traced(child); | ||
| 903 | 944 | ||
| 904 | out_put_task_struct: | 945 | out_put_task_struct: |
| 905 | put_task_struct(child); | 946 | put_task_struct(child); |
| @@ -1039,8 +1080,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, | |||
| 1039 | 1080 | ||
| 1040 | ret = ptrace_check_attach(child, request == PTRACE_KILL || | 1081 | ret = ptrace_check_attach(child, request == PTRACE_KILL || |
| 1041 | request == PTRACE_INTERRUPT); | 1082 | request == PTRACE_INTERRUPT); |
| 1042 | if (!ret) | 1083 | if (!ret) { |
| 1043 | ret = compat_arch_ptrace(child, request, addr, data); | 1084 | ret = compat_arch_ptrace(child, request, addr, data); |
| 1085 | if (ret || request != PTRACE_DETACH) | ||
| 1086 | ptrace_unfreeze_traced(child); | ||
| 1087 | } | ||
| 1044 | 1088 | ||
| 1045 | out_put_task_struct: | 1089 | out_put_task_struct: |
| 1046 | put_task_struct(child); | 1090 | put_task_struct(child); |
diff --git a/kernel/rwsem.c b/kernel/rwsem.c index 6850f53e02d8..b3c6c3fcd847 100644 --- a/kernel/rwsem.c +++ b/kernel/rwsem.c | |||
| @@ -116,6 +116,16 @@ void down_read_nested(struct rw_semaphore *sem, int subclass) | |||
| 116 | 116 | ||
| 117 | EXPORT_SYMBOL(down_read_nested); | 117 | EXPORT_SYMBOL(down_read_nested); |
| 118 | 118 | ||
| 119 | void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest) | ||
| 120 | { | ||
| 121 | might_sleep(); | ||
| 122 | rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_); | ||
| 123 | |||
| 124 | LOCK_CONTENDED(sem, __down_write_trylock, __down_write); | ||
| 125 | } | ||
| 126 | |||
| 127 | EXPORT_SYMBOL(_down_write_nest_lock); | ||
| 128 | |||
| 119 | void down_write_nested(struct rw_semaphore *sem, int subclass) | 129 | void down_write_nested(struct rw_semaphore *sem, int subclass) |
| 120 | { | 130 | { |
| 121 | might_sleep(); | 131 | might_sleep(); |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 257002c13bb0..26058d0bebba 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -1523,7 +1523,8 @@ out: | |||
| 1523 | */ | 1523 | */ |
| 1524 | int wake_up_process(struct task_struct *p) | 1524 | int wake_up_process(struct task_struct *p) |
| 1525 | { | 1525 | { |
| 1526 | return try_to_wake_up(p, TASK_ALL, 0); | 1526 | WARN_ON(task_is_stopped_or_traced(p)); |
| 1527 | return try_to_wake_up(p, TASK_NORMAL, 0); | ||
| 1527 | } | 1528 | } |
| 1528 | EXPORT_SYMBOL(wake_up_process); | 1529 | EXPORT_SYMBOL(wake_up_process); |
| 1529 | 1530 | ||
diff --git a/kernel/signal.c b/kernel/signal.c index 372771e948c2..3d09cf6cde75 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -680,23 +680,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
| 680 | * No need to set need_resched since signal event passing | 680 | * No need to set need_resched since signal event passing |
| 681 | * goes through ->blocked | 681 | * goes through ->blocked |
| 682 | */ | 682 | */ |
| 683 | void signal_wake_up(struct task_struct *t, int resume) | 683 | void signal_wake_up_state(struct task_struct *t, unsigned int state) |
| 684 | { | 684 | { |
| 685 | unsigned int mask; | ||
| 686 | |||
| 687 | set_tsk_thread_flag(t, TIF_SIGPENDING); | 685 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
| 688 | |||
| 689 | /* | 686 | /* |
| 690 | * For SIGKILL, we want to wake it up in the stopped/traced/killable | 687 | * TASK_WAKEKILL also means wake it up in the stopped/traced/killable |
| 691 | * case. We don't check t->state here because there is a race with it | 688 | * case. We don't check t->state here because there is a race with it |
| 692 | * executing another processor and just now entering stopped state. | 689 | * executing another processor and just now entering stopped state. |
| 693 | * By using wake_up_state, we ensure the process will wake up and | 690 | * By using wake_up_state, we ensure the process will wake up and |
| 694 | * handle its death signal. | 691 | * handle its death signal. |
| 695 | */ | 692 | */ |
| 696 | mask = TASK_INTERRUPTIBLE; | 693 | if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) |
| 697 | if (resume) | ||
| 698 | mask |= TASK_WAKEKILL; | ||
| 699 | if (!wake_up_state(t, mask)) | ||
| 700 | kick_process(t); | 694 | kick_process(t); |
| 701 | } | 695 | } |
| 702 | 696 | ||
| @@ -844,7 +838,7 @@ static void ptrace_trap_notify(struct task_struct *t) | |||
| 844 | assert_spin_locked(&t->sighand->siglock); | 838 | assert_spin_locked(&t->sighand->siglock); |
| 845 | 839 | ||
| 846 | task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); | 840 | task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); |
| 847 | signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); | 841 | ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); |
| 848 | } | 842 | } |
| 849 | 843 | ||
| 850 | /* | 844 | /* |
| @@ -1800,6 +1794,10 @@ static inline int may_ptrace_stop(void) | |||
| 1800 | * If SIGKILL was already sent before the caller unlocked | 1794 | * If SIGKILL was already sent before the caller unlocked |
| 1801 | * ->siglock we must see ->core_state != NULL. Otherwise it | 1795 | * ->siglock we must see ->core_state != NULL. Otherwise it |
| 1802 | * is safe to enter schedule(). | 1796 | * is safe to enter schedule(). |
| 1797 | * | ||
| 1798 | * This is almost outdated, a task with the pending SIGKILL can't | ||
| 1799 | * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported | ||
| 1800 | * after SIGKILL was already dequeued. | ||
| 1803 | */ | 1801 | */ |
| 1804 | if (unlikely(current->mm->core_state) && | 1802 | if (unlikely(current->mm->core_state) && |
| 1805 | unlikely(current->mm == current->parent->mm)) | 1803 | unlikely(current->mm == current->parent->mm)) |
| @@ -1925,6 +1923,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) | |||
| 1925 | if (gstop_done) | 1923 | if (gstop_done) |
| 1926 | do_notify_parent_cldstop(current, false, why); | 1924 | do_notify_parent_cldstop(current, false, why); |
| 1927 | 1925 | ||
| 1926 | /* tasklist protects us from ptrace_freeze_traced() */ | ||
| 1928 | __set_current_state(TASK_RUNNING); | 1927 | __set_current_state(TASK_RUNNING); |
| 1929 | if (clear_code) | 1928 | if (clear_code) |
| 1930 | current->exit_code = 0; | 1929 | current->exit_code = 0; |
| @@ -3116,8 +3115,9 @@ int __save_altstack(stack_t __user *uss, unsigned long sp) | |||
| 3116 | 3115 | ||
| 3117 | #ifdef CONFIG_COMPAT | 3116 | #ifdef CONFIG_COMPAT |
| 3118 | #ifdef CONFIG_GENERIC_SIGALTSTACK | 3117 | #ifdef CONFIG_GENERIC_SIGALTSTACK |
| 3119 | asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, | 3118 | COMPAT_SYSCALL_DEFINE2(sigaltstack, |
| 3120 | compat_stack_t __user *uoss_ptr) | 3119 | const compat_stack_t __user *, uss_ptr, |
| 3120 | compat_stack_t __user *, uoss_ptr) | ||
| 3121 | { | 3121 | { |
| 3122 | stack_t uss, uoss; | 3122 | stack_t uss, uoss; |
| 3123 | int ret; | 3123 | int ret; |
diff --git a/kernel/smp.c b/kernel/smp.c index 29dd40a9f2f4..69f38bd98b42 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -33,6 +33,7 @@ struct call_function_data { | |||
| 33 | struct call_single_data csd; | 33 | struct call_single_data csd; |
| 34 | atomic_t refs; | 34 | atomic_t refs; |
| 35 | cpumask_var_t cpumask; | 35 | cpumask_var_t cpumask; |
| 36 | cpumask_var_t cpumask_ipi; | ||
| 36 | }; | 37 | }; |
| 37 | 38 | ||
| 38 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); | 39 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); |
| @@ -56,6 +57,9 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
| 56 | if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, | 57 | if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, |
| 57 | cpu_to_node(cpu))) | 58 | cpu_to_node(cpu))) |
| 58 | return notifier_from_errno(-ENOMEM); | 59 | return notifier_from_errno(-ENOMEM); |
| 60 | if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, | ||
| 61 | cpu_to_node(cpu))) | ||
| 62 | return notifier_from_errno(-ENOMEM); | ||
| 59 | break; | 63 | break; |
| 60 | 64 | ||
| 61 | #ifdef CONFIG_HOTPLUG_CPU | 65 | #ifdef CONFIG_HOTPLUG_CPU |
| @@ -65,6 +69,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
| 65 | case CPU_DEAD: | 69 | case CPU_DEAD: |
| 66 | case CPU_DEAD_FROZEN: | 70 | case CPU_DEAD_FROZEN: |
| 67 | free_cpumask_var(cfd->cpumask); | 71 | free_cpumask_var(cfd->cpumask); |
| 72 | free_cpumask_var(cfd->cpumask_ipi); | ||
| 68 | break; | 73 | break; |
| 69 | #endif | 74 | #endif |
| 70 | }; | 75 | }; |
| @@ -526,6 +531,12 @@ void smp_call_function_many(const struct cpumask *mask, | |||
| 526 | return; | 531 | return; |
| 527 | } | 532 | } |
| 528 | 533 | ||
| 534 | /* | ||
| 535 | * After we put an entry into the list, data->cpumask | ||
| 536 | * may be cleared again when another CPU sends another IPI for | ||
| 537 | * a SMP function call, so data->cpumask will be zero. | ||
| 538 | */ | ||
| 539 | cpumask_copy(data->cpumask_ipi, data->cpumask); | ||
| 529 | raw_spin_lock_irqsave(&call_function.lock, flags); | 540 | raw_spin_lock_irqsave(&call_function.lock, flags); |
| 530 | /* | 541 | /* |
| 531 | * Place entry at the _HEAD_ of the list, so that any cpu still | 542 | * Place entry at the _HEAD_ of the list, so that any cpu still |
| @@ -549,7 +560,7 @@ void smp_call_function_many(const struct cpumask *mask, | |||
| 549 | smp_mb(); | 560 | smp_mb(); |
| 550 | 561 | ||
| 551 | /* Send a message to all CPUs in the map */ | 562 | /* Send a message to all CPUs in the map */ |
| 552 | arch_send_call_function_ipi_mask(data->cpumask); | 563 | arch_send_call_function_ipi_mask(data->cpumask_ipi); |
| 553 | 564 | ||
| 554 | /* Optionally wait for the CPUs to complete */ | 565 | /* Optionally wait for the CPUs to complete */ |
| 555 | if (wait) | 566 | if (wait) |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 30b6de0d977c..c6d6400ee137 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
| @@ -339,6 +339,7 @@ void clockevents_config_and_register(struct clock_event_device *dev, | |||
| 339 | clockevents_config(dev, freq); | 339 | clockevents_config(dev, freq); |
| 340 | clockevents_register_device(dev); | 340 | clockevents_register_device(dev); |
| 341 | } | 341 | } |
| 342 | EXPORT_SYMBOL_GPL(clockevents_config_and_register); | ||
| 342 | 343 | ||
| 343 | /** | 344 | /** |
| 344 | * clockevents_update_freq - Update frequency and reprogram a clock event device. | 345 | * clockevents_update_freq - Update frequency and reprogram a clock event device. |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 3ffe4c5ad3f3..41473b4ad7a4 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -3998,7 +3998,7 @@ static int ftrace_module_notify(struct notifier_block *self, | |||
| 3998 | 3998 | ||
| 3999 | struct notifier_block ftrace_module_nb = { | 3999 | struct notifier_block ftrace_module_nb = { |
| 4000 | .notifier_call = ftrace_module_notify, | 4000 | .notifier_call = ftrace_module_notify, |
| 4001 | .priority = 0, | 4001 | .priority = INT_MAX, /* Run before anything that can use kprobes */ |
| 4002 | }; | 4002 | }; |
| 4003 | 4003 | ||
| 4004 | extern unsigned long __start_mcount_loc[]; | 4004 | extern unsigned long __start_mcount_loc[]; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e5125677efa0..3c13e46d7d24 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -2899,6 +2899,8 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2899 | if (copy_from_user(&buf, ubuf, cnt)) | 2899 | if (copy_from_user(&buf, ubuf, cnt)) |
| 2900 | return -EFAULT; | 2900 | return -EFAULT; |
| 2901 | 2901 | ||
| 2902 | buf[cnt] = 0; | ||
| 2903 | |||
| 2902 | trace_set_options(buf); | 2904 | trace_set_options(buf); |
| 2903 | 2905 | ||
| 2904 | *ppos += cnt; | 2906 | *ppos += cnt; |
| @@ -3452,7 +3454,7 @@ static int tracing_wait_pipe(struct file *filp) | |||
| 3452 | return -EINTR; | 3454 | return -EINTR; |
| 3453 | 3455 | ||
| 3454 | /* | 3456 | /* |
| 3455 | * We block until we read something and tracing is enabled. | 3457 | * We block until we read something and tracing is disabled. |
| 3456 | * We still block if tracing is disabled, but we have never | 3458 | * We still block if tracing is disabled, but we have never |
| 3457 | * read anything. This allows a user to cat this file, and | 3459 | * read anything. This allows a user to cat this file, and |
| 3458 | * then enable tracing. But after we have read something, | 3460 | * then enable tracing. But after we have read something, |
| @@ -3460,7 +3462,7 @@ static int tracing_wait_pipe(struct file *filp) | |||
| 3460 | * | 3462 | * |
| 3461 | * iter->pos will be 0 if we haven't read anything. | 3463 | * iter->pos will be 0 if we haven't read anything. |
| 3462 | */ | 3464 | */ |
| 3463 | if (tracing_is_enabled() && iter->pos) | 3465 | if (!tracing_is_enabled() && iter->pos) |
| 3464 | break; | 3466 | break; |
| 3465 | } | 3467 | } |
| 3466 | 3468 | ||
| @@ -4815,10 +4817,17 @@ rb_simple_write(struct file *filp, const char __user *ubuf, | |||
| 4815 | return ret; | 4817 | return ret; |
| 4816 | 4818 | ||
| 4817 | if (buffer) { | 4819 | if (buffer) { |
| 4818 | if (val) | 4820 | mutex_lock(&trace_types_lock); |
| 4821 | if (val) { | ||
| 4819 | ring_buffer_record_on(buffer); | 4822 | ring_buffer_record_on(buffer); |
| 4820 | else | 4823 | if (current_trace->start) |
| 4824 | current_trace->start(tr); | ||
| 4825 | } else { | ||
| 4821 | ring_buffer_record_off(buffer); | 4826 | ring_buffer_record_off(buffer); |
| 4827 | if (current_trace->stop) | ||
| 4828 | current_trace->stop(tr); | ||
| 4829 | } | ||
| 4830 | mutex_unlock(&trace_types_lock); | ||
| 4822 | } | 4831 | } |
| 4823 | 4832 | ||
| 4824 | (*ppos)++; | 4833 | (*ppos)++; |
