aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c5
-rw-r--r--kernel/cpuset.c29
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/exit.c9
-rw-r--r--kernel/futex.c28
-rw-r--r--kernel/hung_task.c14
-rw-r--r--kernel/lockdep.c31
-rw-r--r--kernel/panic.c17
-rw-r--r--kernel/printk.c11
-rw-r--r--kernel/ptrace.c13
-rw-r--r--kernel/rtmutex-debug.c1
-rw-r--r--kernel/sched_fair.c14
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/sysctl_binary.c2
-rw-r--r--kernel/time/clockevents.c1
-rw-r--r--kernel/time/clocksource.c12
-rw-r--r--kernel/timer.c62
-rw-r--r--kernel/wait.c4
18 files changed, 196 insertions, 67 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d9d5648f3cdc..a184470cf9b5 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2098,11 +2098,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2098 continue; 2098 continue;
2099 /* get old css_set pointer */ 2099 /* get old css_set pointer */
2100 task_lock(tsk); 2100 task_lock(tsk);
2101 if (tsk->flags & PF_EXITING) {
2102 /* ignore this task if it's going away */
2103 task_unlock(tsk);
2104 continue;
2105 }
2106 oldcg = tsk->cgroups; 2101 oldcg = tsk->cgroups;
2107 get_css_set(oldcg); 2102 get_css_set(oldcg);
2108 task_unlock(tsk); 2103 task_unlock(tsk);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9fe58c46a426..0b1712dba587 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -123,6 +123,19 @@ static inline struct cpuset *task_cs(struct task_struct *task)
123 struct cpuset, css); 123 struct cpuset, css);
124} 124}
125 125
126#ifdef CONFIG_NUMA
127static inline bool task_has_mempolicy(struct task_struct *task)
128{
129 return task->mempolicy;
130}
131#else
132static inline bool task_has_mempolicy(struct task_struct *task)
133{
134 return false;
135}
136#endif
137
138
126/* bits in struct cpuset flags field */ 139/* bits in struct cpuset flags field */
127typedef enum { 140typedef enum {
128 CS_CPU_EXCLUSIVE, 141 CS_CPU_EXCLUSIVE,
@@ -949,7 +962,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
949static void cpuset_change_task_nodemask(struct task_struct *tsk, 962static void cpuset_change_task_nodemask(struct task_struct *tsk,
950 nodemask_t *newmems) 963 nodemask_t *newmems)
951{ 964{
952 bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed); 965 bool need_loop;
953 966
954repeat: 967repeat:
955 /* 968 /*
@@ -962,6 +975,14 @@ repeat:
962 return; 975 return;
963 976
964 task_lock(tsk); 977 task_lock(tsk);
978 /*
979 * Determine if a loop is necessary if another thread is doing
980 * get_mems_allowed(). If at least one node remains unchanged and
981 * tsk does not have a mempolicy, then an empty nodemask will not be
982 * possible when mems_allowed is larger than a word.
983 */
984 need_loop = task_has_mempolicy(tsk) ||
985 !nodes_intersects(*newmems, tsk->mems_allowed);
965 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); 986 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
966 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1); 987 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
967 988
@@ -981,11 +1002,9 @@ repeat:
981 1002
982 /* 1003 /*
983 * Allocation of memory is very fast, we needn't sleep when waiting 1004 * Allocation of memory is very fast, we needn't sleep when waiting
984 * for the read-side. No wait is necessary, however, if at least one 1005 * for the read-side.
985 * node remains unchanged.
986 */ 1006 */
987 while (masks_disjoint && 1007 while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
988 ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
989 task_unlock(tsk); 1008 task_unlock(tsk);
990 if (!task_curr(tsk)) 1009 if (!task_curr(tsk))
991 yield(); 1010 yield();
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 923c6b5667db..fc0e7ff11dda 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3558,9 +3558,13 @@ static void ring_buffer_wakeup(struct perf_event *event)
3558 3558
3559 rcu_read_lock(); 3559 rcu_read_lock();
3560 rb = rcu_dereference(event->rb); 3560 rb = rcu_dereference(event->rb);
3561 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { 3561 if (!rb)
3562 goto unlock;
3563
3564 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3562 wake_up_all(&event->waitq); 3565 wake_up_all(&event->waitq);
3563 } 3566
3567unlock:
3564 rcu_read_unlock(); 3568 rcu_read_unlock();
3565} 3569}
3566 3570
diff --git a/kernel/exit.c b/kernel/exit.c
index d0b7d988f873..e6e01b959a0e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1540,8 +1540,15 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
1540 } 1540 }
1541 1541
1542 /* dead body doesn't have much to contribute */ 1542 /* dead body doesn't have much to contribute */
1543 if (p->exit_state == EXIT_DEAD) 1543 if (unlikely(p->exit_state == EXIT_DEAD)) {
1544 /*
1545 * But do not ignore this task until the tracer does
1546 * wait_task_zombie()->do_notify_parent().
1547 */
1548 if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
1549 wo->notask_error = 0;
1544 return 0; 1550 return 0;
1551 }
1545 1552
1546 /* slay zombie? */ 1553 /* slay zombie? */
1547 if (p->exit_state == EXIT_ZOMBIE) { 1554 if (p->exit_state == EXIT_ZOMBIE) {
diff --git a/kernel/futex.c b/kernel/futex.c
index ea87f4d2f455..1614be20173d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -314,17 +314,29 @@ again:
314#endif 314#endif
315 315
316 lock_page(page_head); 316 lock_page(page_head);
317
318 /*
319 * If page_head->mapping is NULL, then it cannot be a PageAnon
320 * page; but it might be the ZERO_PAGE or in the gate area or
321 * in a special mapping (all cases which we are happy to fail);
322 * or it may have been a good file page when get_user_pages_fast
323 * found it, but truncated or holepunched or subjected to
324 * invalidate_complete_page2 before we got the page lock (also
325 * cases which we are happy to fail). And we hold a reference,
326 * so refcount care in invalidate_complete_page's remove_mapping
327 * prevents drop_caches from setting mapping to NULL beneath us.
328 *
329 * The case we do have to guard against is when memory pressure made
330 * shmem_writepage move it from filecache to swapcache beneath us:
331 * an unlikely race, but we do need to retry for page_head->mapping.
332 */
317 if (!page_head->mapping) { 333 if (!page_head->mapping) {
334 int shmem_swizzled = PageSwapCache(page_head);
318 unlock_page(page_head); 335 unlock_page(page_head);
319 put_page(page_head); 336 put_page(page_head);
320 /* 337 if (shmem_swizzled)
321 * ZERO_PAGE pages don't have a mapping. Avoid a busy loop 338 goto again;
322 * trying to find one. RW mapping would have COW'd (and thus 339 return -EFAULT;
323 * have a mapping) so this page is RO and won't ever change.
324 */
325 if ((page_head == ZERO_PAGE(address)))
326 return -EFAULT;
327 goto again;
328 } 340 }
329 341
330 /* 342 /*
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 8b1748d0172c..2e48ec0c2e91 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -74,11 +74,17 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
74 74
75 /* 75 /*
76 * Ensure the task is not frozen. 76 * Ensure the task is not frozen.
77 * Also, when a freshly created task is scheduled once, changes 77 * Also, skip vfork and any other user process that freezer should skip.
78 * its state to TASK_UNINTERRUPTIBLE without having ever been
79 * switched out once, it musn't be checked.
80 */ 78 */
81 if (unlikely(t->flags & PF_FROZEN || !switch_count)) 79 if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
80 return;
81
82 /*
83 * When a freshly created task is scheduled once, changes its state to
84 * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
85 * musn't be checked.
86 */
87 if (unlikely(!switch_count))
82 return; 88 return;
83 89
84 if (switch_count != t->last_switch_count) { 90 if (switch_count != t->last_switch_count) {
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index f45c6817770e..8fb755132322 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -431,6 +431,7 @@ unsigned int max_lockdep_depth;
431 * about it later on, in lockdep_info(). 431 * about it later on, in lockdep_info().
432 */ 432 */
433static int lockdep_init_error; 433static int lockdep_init_error;
434static const char *lock_init_error;
434static unsigned long lockdep_init_trace_data[20]; 435static unsigned long lockdep_init_trace_data[20];
435static struct stack_trace lockdep_init_trace = { 436static struct stack_trace lockdep_init_trace = {
436 .max_entries = ARRAY_SIZE(lockdep_init_trace_data), 437 .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
@@ -568,11 +569,12 @@ static void lockdep_print_held_locks(struct task_struct *curr)
568 } 569 }
569} 570}
570 571
571static void print_kernel_version(void) 572static void print_kernel_ident(void)
572{ 573{
573 printk("%s %.*s\n", init_utsname()->release, 574 printk("%s %.*s %s\n", init_utsname()->release,
574 (int)strcspn(init_utsname()->version, " "), 575 (int)strcspn(init_utsname()->version, " "),
575 init_utsname()->version); 576 init_utsname()->version,
577 print_tainted());
576} 578}
577 579
578static int very_verbose(struct lock_class *class) 580static int very_verbose(struct lock_class *class)
@@ -656,6 +658,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
656 if (unlikely(!lockdep_initialized)) { 658 if (unlikely(!lockdep_initialized)) {
657 lockdep_init(); 659 lockdep_init();
658 lockdep_init_error = 1; 660 lockdep_init_error = 1;
661 lock_init_error = lock->name;
659 save_stack_trace(&lockdep_init_trace); 662 save_stack_trace(&lockdep_init_trace);
660 } 663 }
661#endif 664#endif
@@ -723,7 +726,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
723 726
724 class = look_up_lock_class(lock, subclass); 727 class = look_up_lock_class(lock, subclass);
725 if (likely(class)) 728 if (likely(class))
726 return class; 729 goto out_set_class_cache;
727 730
728 /* 731 /*
729 * Debug-check: all keys must be persistent! 732 * Debug-check: all keys must be persistent!
@@ -808,6 +811,7 @@ out_unlock_set:
808 graph_unlock(); 811 graph_unlock();
809 raw_local_irq_restore(flags); 812 raw_local_irq_restore(flags);
810 813
814out_set_class_cache:
811 if (!subclass || force) 815 if (!subclass || force)
812 lock->class_cache[0] = class; 816 lock->class_cache[0] = class;
813 else if (subclass < NR_LOCKDEP_CACHING_CLASSES) 817 else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
@@ -1149,7 +1153,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1149 printk("\n"); 1153 printk("\n");
1150 printk("======================================================\n"); 1154 printk("======================================================\n");
1151 printk("[ INFO: possible circular locking dependency detected ]\n"); 1155 printk("[ INFO: possible circular locking dependency detected ]\n");
1152 print_kernel_version(); 1156 print_kernel_ident();
1153 printk("-------------------------------------------------------\n"); 1157 printk("-------------------------------------------------------\n");
1154 printk("%s/%d is trying to acquire lock:\n", 1158 printk("%s/%d is trying to acquire lock:\n",
1155 curr->comm, task_pid_nr(curr)); 1159 curr->comm, task_pid_nr(curr));
@@ -1488,7 +1492,7 @@ print_bad_irq_dependency(struct task_struct *curr,
1488 printk("======================================================\n"); 1492 printk("======================================================\n");
1489 printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n", 1493 printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1490 irqclass, irqclass); 1494 irqclass, irqclass);
1491 print_kernel_version(); 1495 print_kernel_ident();
1492 printk("------------------------------------------------------\n"); 1496 printk("------------------------------------------------------\n");
1493 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", 1497 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1494 curr->comm, task_pid_nr(curr), 1498 curr->comm, task_pid_nr(curr),
@@ -1717,7 +1721,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1717 printk("\n"); 1721 printk("\n");
1718 printk("=============================================\n"); 1722 printk("=============================================\n");
1719 printk("[ INFO: possible recursive locking detected ]\n"); 1723 printk("[ INFO: possible recursive locking detected ]\n");
1720 print_kernel_version(); 1724 print_kernel_ident();
1721 printk("---------------------------------------------\n"); 1725 printk("---------------------------------------------\n");
1722 printk("%s/%d is trying to acquire lock:\n", 1726 printk("%s/%d is trying to acquire lock:\n",
1723 curr->comm, task_pid_nr(curr)); 1727 curr->comm, task_pid_nr(curr));
@@ -2224,7 +2228,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
2224 printk("\n"); 2228 printk("\n");
2225 printk("=================================\n"); 2229 printk("=================================\n");
2226 printk("[ INFO: inconsistent lock state ]\n"); 2230 printk("[ INFO: inconsistent lock state ]\n");
2227 print_kernel_version(); 2231 print_kernel_ident();
2228 printk("---------------------------------\n"); 2232 printk("---------------------------------\n");
2229 2233
2230 printk("inconsistent {%s} -> {%s} usage.\n", 2234 printk("inconsistent {%s} -> {%s} usage.\n",
@@ -2289,7 +2293,7 @@ print_irq_inversion_bug(struct task_struct *curr,
2289 printk("\n"); 2293 printk("\n");
2290 printk("=========================================================\n"); 2294 printk("=========================================================\n");
2291 printk("[ INFO: possible irq lock inversion dependency detected ]\n"); 2295 printk("[ INFO: possible irq lock inversion dependency detected ]\n");
2292 print_kernel_version(); 2296 print_kernel_ident();
2293 printk("---------------------------------------------------------\n"); 2297 printk("---------------------------------------------------------\n");
2294 printk("%s/%d just changed the state of lock:\n", 2298 printk("%s/%d just changed the state of lock:\n",
2295 curr->comm, task_pid_nr(curr)); 2299 curr->comm, task_pid_nr(curr));
@@ -3175,6 +3179,7 @@ print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3175 printk("\n"); 3179 printk("\n");
3176 printk("=====================================\n"); 3180 printk("=====================================\n");
3177 printk("[ BUG: bad unlock balance detected! ]\n"); 3181 printk("[ BUG: bad unlock balance detected! ]\n");
3182 print_kernel_ident();
3178 printk("-------------------------------------\n"); 3183 printk("-------------------------------------\n");
3179 printk("%s/%d is trying to release lock (", 3184 printk("%s/%d is trying to release lock (",
3180 curr->comm, task_pid_nr(curr)); 3185 curr->comm, task_pid_nr(curr));
@@ -3619,6 +3624,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3619 printk("\n"); 3624 printk("\n");
3620 printk("=================================\n"); 3625 printk("=================================\n");
3621 printk("[ BUG: bad contention detected! ]\n"); 3626 printk("[ BUG: bad contention detected! ]\n");
3627 print_kernel_ident();
3622 printk("---------------------------------\n"); 3628 printk("---------------------------------\n");
3623 printk("%s/%d is trying to contend lock (", 3629 printk("%s/%d is trying to contend lock (",
3624 curr->comm, task_pid_nr(curr)); 3630 curr->comm, task_pid_nr(curr));
@@ -3974,7 +3980,8 @@ void __init lockdep_info(void)
3974 3980
3975#ifdef CONFIG_DEBUG_LOCKDEP 3981#ifdef CONFIG_DEBUG_LOCKDEP
3976 if (lockdep_init_error) { 3982 if (lockdep_init_error) {
3977 printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n"); 3983 printk("WARNING: lockdep init error! lock-%s was acquired"
3984 "before lockdep_init\n", lock_init_error);
3978 printk("Call stack leading to lockdep invocation was:\n"); 3985 printk("Call stack leading to lockdep invocation was:\n");
3979 print_stack_trace(&lockdep_init_trace, 0); 3986 print_stack_trace(&lockdep_init_trace, 0);
3980 } 3987 }
@@ -3993,6 +4000,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3993 printk("\n"); 4000 printk("\n");
3994 printk("=========================\n"); 4001 printk("=========================\n");
3995 printk("[ BUG: held lock freed! ]\n"); 4002 printk("[ BUG: held lock freed! ]\n");
4003 print_kernel_ident();
3996 printk("-------------------------\n"); 4004 printk("-------------------------\n");
3997 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", 4005 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
3998 curr->comm, task_pid_nr(curr), mem_from, mem_to-1); 4006 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
@@ -4050,6 +4058,7 @@ static void print_held_locks_bug(struct task_struct *curr)
4050 printk("\n"); 4058 printk("\n");
4051 printk("=====================================\n"); 4059 printk("=====================================\n");
4052 printk("[ BUG: lock held at task exit time! ]\n"); 4060 printk("[ BUG: lock held at task exit time! ]\n");
4061 print_kernel_ident();
4053 printk("-------------------------------------\n"); 4062 printk("-------------------------------------\n");
4054 printk("%s/%d is exiting with locks still held!\n", 4063 printk("%s/%d is exiting with locks still held!\n",
4055 curr->comm, task_pid_nr(curr)); 4064 curr->comm, task_pid_nr(curr));
@@ -4147,6 +4156,7 @@ void lockdep_sys_exit(void)
4147 printk("\n"); 4156 printk("\n");
4148 printk("================================================\n"); 4157 printk("================================================\n");
4149 printk("[ BUG: lock held when returning to user space! ]\n"); 4158 printk("[ BUG: lock held when returning to user space! ]\n");
4159 print_kernel_ident();
4150 printk("------------------------------------------------\n"); 4160 printk("------------------------------------------------\n");
4151 printk("%s/%d is leaving the kernel with locks still held!\n", 4161 printk("%s/%d is leaving the kernel with locks still held!\n",
4152 curr->comm, curr->pid); 4162 curr->comm, curr->pid);
@@ -4166,6 +4176,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4166 printk("\n"); 4176 printk("\n");
4167 printk("===============================\n"); 4177 printk("===============================\n");
4168 printk("[ INFO: suspicious RCU usage. ]\n"); 4178 printk("[ INFO: suspicious RCU usage. ]\n");
4179 print_kernel_ident();
4169 printk("-------------------------------\n"); 4180 printk("-------------------------------\n");
4170 printk("%s:%d %s!\n", file, line, s); 4181 printk("%s:%d %s!\n", file, line, s);
4171 printk("\nother info that might help us debug this:\n\n"); 4182 printk("\nother info that might help us debug this:\n\n");
diff --git a/kernel/panic.c b/kernel/panic.c
index b26593604214..3458469eb7c3 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -237,11 +237,20 @@ void add_taint(unsigned flag)
237 * Can't trust the integrity of the kernel anymore. 237 * Can't trust the integrity of the kernel anymore.
238 * We don't call directly debug_locks_off() because the issue 238 * We don't call directly debug_locks_off() because the issue
239 * is not necessarily serious enough to set oops_in_progress to 1 239 * is not necessarily serious enough to set oops_in_progress to 1
240 * Also we want to keep up lockdep for staging development and 240 * Also we want to keep up lockdep for staging/out-of-tree
241 * post-warning case. 241 * development and post-warning case.
242 */ 242 */
243 if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off()) 243 switch (flag) {
244 printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n"); 244 case TAINT_CRAP:
245 case TAINT_OOT_MODULE:
246 case TAINT_WARN:
247 case TAINT_FIRMWARE_WORKAROUND:
248 break;
249
250 default:
251 if (__debug_locks_off())
252 printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
253 }
245 254
246 set_bit(flag, &tainted_mask); 255 set_bit(flag, &tainted_mask);
247} 256}
diff --git a/kernel/printk.c b/kernel/printk.c
index 7982a0a841ea..989e4a52da76 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -199,7 +199,7 @@ void __init setup_log_buf(int early)
199 unsigned long mem; 199 unsigned long mem;
200 200
201 mem = memblock_alloc(new_log_buf_len, PAGE_SIZE); 201 mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
202 if (mem == MEMBLOCK_ERROR) 202 if (!mem)
203 return; 203 return;
204 new_log_buf = __va(mem); 204 new_log_buf = __va(mem);
205 } else { 205 } else {
@@ -688,6 +688,7 @@ static void zap_locks(void)
688 688
689 oops_timestamp = jiffies; 689 oops_timestamp = jiffies;
690 690
691 debug_locks_off();
691 /* If a crash is occurring, make sure we can't deadlock */ 692 /* If a crash is occurring, make sure we can't deadlock */
692 raw_spin_lock_init(&logbuf_lock); 693 raw_spin_lock_init(&logbuf_lock);
693 /* And make sure that we print immediately */ 694 /* And make sure that we print immediately */
@@ -840,9 +841,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
840 boot_delay_msec(); 841 boot_delay_msec();
841 printk_delay(); 842 printk_delay();
842 843
843 preempt_disable();
844 /* This stops the holder of console_sem just where we want him */ 844 /* This stops the holder of console_sem just where we want him */
845 raw_local_irq_save(flags); 845 local_irq_save(flags);
846 this_cpu = smp_processor_id(); 846 this_cpu = smp_processor_id();
847 847
848 /* 848 /*
@@ -856,7 +856,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
856 * recursion and return - but flag the recursion so that 856 * recursion and return - but flag the recursion so that
857 * it can be printed at the next appropriate moment: 857 * it can be printed at the next appropriate moment:
858 */ 858 */
859 if (!oops_in_progress) { 859 if (!oops_in_progress && !lockdep_recursing(current)) {
860 recursion_bug = 1; 860 recursion_bug = 1;
861 goto out_restore_irqs; 861 goto out_restore_irqs;
862 } 862 }
@@ -962,9 +962,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
962 962
963 lockdep_on(); 963 lockdep_on();
964out_restore_irqs: 964out_restore_irqs:
965 raw_local_irq_restore(flags); 965 local_irq_restore(flags);
966 966
967 preempt_enable();
968 return printed_len; 967 return printed_len;
969} 968}
970EXPORT_SYMBOL(printk); 969EXPORT_SYMBOL(printk);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 24d04477b257..78ab24a7b0e4 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -96,9 +96,20 @@ void __ptrace_unlink(struct task_struct *child)
96 */ 96 */
97 if (!(child->flags & PF_EXITING) && 97 if (!(child->flags & PF_EXITING) &&
98 (child->signal->flags & SIGNAL_STOP_STOPPED || 98 (child->signal->flags & SIGNAL_STOP_STOPPED ||
99 child->signal->group_stop_count)) 99 child->signal->group_stop_count)) {
100 child->jobctl |= JOBCTL_STOP_PENDING; 100 child->jobctl |= JOBCTL_STOP_PENDING;
101 101
102 /*
103 * This is only possible if this thread was cloned by the
104 * traced task running in the stopped group, set the signal
105 * for the future reports.
106 * FIXME: we should change ptrace_init_task() to handle this
107 * case.
108 */
109 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
110 child->jobctl |= SIGSTOP;
111 }
112
102 /* 113 /*
103 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick 114 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
104 * @child in the butt. Note that @resume should be used iff @child 115 * @child in the butt. Note that @resume should be used iff @child
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 8eafd1bd273e..16502d3a71c8 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -101,6 +101,7 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
101 101
102 printk("\n============================================\n"); 102 printk("\n============================================\n");
103 printk( "[ BUG: circular locking deadlock detected! ]\n"); 103 printk( "[ BUG: circular locking deadlock detected! ]\n");
104 printk("%s\n", print_tainted());
104 printk( "--------------------------------------------\n"); 105 printk( "--------------------------------------------\n");
105 printk("%s/%d is deadlocking current task %s/%d\n\n", 106 printk("%s/%d is deadlocking current task %s/%d\n\n",
106 task->comm, task_pid_nr(task), 107 task->comm, task_pid_nr(task),
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a78ed2736ba7..8a39fa3e3c6c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2352,13 +2352,11 @@ again:
2352 if (!smt && (sd->flags & SD_SHARE_CPUPOWER)) 2352 if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
2353 continue; 2353 continue;
2354 2354
2355 if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) { 2355 if (smt && !(sd->flags & SD_SHARE_CPUPOWER))
2356 if (!smt) { 2356 break;
2357 smt = 1; 2357
2358 goto again; 2358 if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
2359 }
2360 break; 2359 break;
2361 }
2362 2360
2363 sg = sd->groups; 2361 sg = sd->groups;
2364 do { 2362 do {
@@ -2378,6 +2376,10 @@ next:
2378 sg = sg->next; 2376 sg = sg->next;
2379 } while (sg != sd->groups); 2377 } while (sg != sd->groups);
2380 } 2378 }
2379 if (!smt) {
2380 smt = 1;
2381 goto again;
2382 }
2381done: 2383done:
2382 rcu_read_unlock(); 2384 rcu_read_unlock();
2383 2385
diff --git a/kernel/signal.c b/kernel/signal.c
index b3f78d09a105..206551563cce 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1994,8 +1994,6 @@ static bool do_signal_stop(int signr)
1994 */ 1994 */
1995 if (!(sig->flags & SIGNAL_STOP_STOPPED)) 1995 if (!(sig->flags & SIGNAL_STOP_STOPPED))
1996 sig->group_exit_code = signr; 1996 sig->group_exit_code = signr;
1997 else
1998 WARN_ON_ONCE(!current->ptrace);
1999 1997
2000 sig->group_stop_count = 0; 1998 sig->group_stop_count = 0;
2001 1999
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 6318b511afa1..a650694883a1 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
1354 1354
1355 fput(file); 1355 fput(file);
1356out_putname: 1356out_putname:
1357 putname(pathname); 1357 __putname(pathname);
1358out: 1358out:
1359 return result; 1359 return result;
1360} 1360}
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index c4eb71c8b2ea..1ecd6ba36d6c 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -387,7 +387,6 @@ void clockevents_exchange_device(struct clock_event_device *old,
387 * released list and do a notify add later. 387 * released list and do a notify add later.
388 */ 388 */
389 if (old) { 389 if (old) {
390 old->event_handler = clockevents_handle_noop;
391 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); 390 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
392 list_del(&old->list); 391 list_del(&old->list);
393 list_add(&old->list, &clockevents_released); 392 list_add(&old->list, &clockevents_released);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index da2f760e780c..d3ad022136e5 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -647,7 +647,7 @@ static void clocksource_enqueue(struct clocksource *cs)
647 647
648/** 648/**
649 * __clocksource_updatefreq_scale - Used update clocksource with new freq 649 * __clocksource_updatefreq_scale - Used update clocksource with new freq
650 * @t: clocksource to be registered 650 * @cs: clocksource to be registered
651 * @scale: Scale factor multiplied against freq to get clocksource hz 651 * @scale: Scale factor multiplied against freq to get clocksource hz
652 * @freq: clocksource frequency (cycles per second) divided by scale 652 * @freq: clocksource frequency (cycles per second) divided by scale
653 * 653 *
@@ -699,7 +699,7 @@ EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
699 699
700/** 700/**
701 * __clocksource_register_scale - Used to install new clocksources 701 * __clocksource_register_scale - Used to install new clocksources
702 * @t: clocksource to be registered 702 * @cs: clocksource to be registered
703 * @scale: Scale factor multiplied against freq to get clocksource hz 703 * @scale: Scale factor multiplied against freq to get clocksource hz
704 * @freq: clocksource frequency (cycles per second) divided by scale 704 * @freq: clocksource frequency (cycles per second) divided by scale
705 * 705 *
@@ -727,7 +727,7 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
727 727
728/** 728/**
729 * clocksource_register - Used to install new clocksources 729 * clocksource_register - Used to install new clocksources
730 * @t: clocksource to be registered 730 * @cs: clocksource to be registered
731 * 731 *
732 * Returns -EBUSY if registration fails, zero otherwise. 732 * Returns -EBUSY if registration fails, zero otherwise.
733 */ 733 */
@@ -761,6 +761,8 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
761 761
762/** 762/**
763 * clocksource_change_rating - Change the rating of a registered clocksource 763 * clocksource_change_rating - Change the rating of a registered clocksource
764 * @cs: clocksource to be changed
765 * @rating: new rating
764 */ 766 */
765void clocksource_change_rating(struct clocksource *cs, int rating) 767void clocksource_change_rating(struct clocksource *cs, int rating)
766{ 768{
@@ -772,6 +774,7 @@ EXPORT_SYMBOL(clocksource_change_rating);
772 774
773/** 775/**
774 * clocksource_unregister - remove a registered clocksource 776 * clocksource_unregister - remove a registered clocksource
777 * @cs: clocksource to be unregistered
775 */ 778 */
776void clocksource_unregister(struct clocksource *cs) 779void clocksource_unregister(struct clocksource *cs)
777{ 780{
@@ -787,6 +790,7 @@ EXPORT_SYMBOL(clocksource_unregister);
787/** 790/**
788 * sysfs_show_current_clocksources - sysfs interface for current clocksource 791 * sysfs_show_current_clocksources - sysfs interface for current clocksource
789 * @dev: unused 792 * @dev: unused
793 * @attr: unused
790 * @buf: char buffer to be filled with clocksource list 794 * @buf: char buffer to be filled with clocksource list
791 * 795 *
792 * Provides sysfs interface for listing current clocksource. 796 * Provides sysfs interface for listing current clocksource.
@@ -807,6 +811,7 @@ sysfs_show_current_clocksources(struct sys_device *dev,
807/** 811/**
808 * sysfs_override_clocksource - interface for manually overriding clocksource 812 * sysfs_override_clocksource - interface for manually overriding clocksource
809 * @dev: unused 813 * @dev: unused
814 * @attr: unused
810 * @buf: name of override clocksource 815 * @buf: name of override clocksource
811 * @count: length of buffer 816 * @count: length of buffer
812 * 817 *
@@ -842,6 +847,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
842/** 847/**
843 * sysfs_show_available_clocksources - sysfs interface for listing clocksource 848 * sysfs_show_available_clocksources - sysfs interface for listing clocksource
844 * @dev: unused 849 * @dev: unused
850 * @attr: unused
845 * @buf: char buffer to be filled with clocksource list 851 * @buf: char buffer to be filled with clocksource list
846 * 852 *
847 * Provides sysfs interface for listing registered clocksources 853 * Provides sysfs interface for listing registered clocksources
diff --git a/kernel/timer.c b/kernel/timer.c
index 9c3c62b0c4bc..a297ffcf888e 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -427,6 +427,12 @@ static int timer_fixup_init(void *addr, enum debug_obj_state state)
427 } 427 }
428} 428}
429 429
430/* Stub timer callback for improperly used timers. */
431static void stub_timer(unsigned long data)
432{
433 WARN_ON(1);
434}
435
430/* 436/*
431 * fixup_activate is called when: 437 * fixup_activate is called when:
432 * - an active object is activated 438 * - an active object is activated
@@ -450,7 +456,8 @@ static int timer_fixup_activate(void *addr, enum debug_obj_state state)
450 debug_object_activate(timer, &timer_debug_descr); 456 debug_object_activate(timer, &timer_debug_descr);
451 return 0; 457 return 0;
452 } else { 458 } else {
453 WARN_ON_ONCE(1); 459 setup_timer(timer, stub_timer, 0);
460 return 1;
454 } 461 }
455 return 0; 462 return 0;
456 463
@@ -480,12 +487,40 @@ static int timer_fixup_free(void *addr, enum debug_obj_state state)
480 } 487 }
481} 488}
482 489
490/*
491 * fixup_assert_init is called when:
492 * - an untracked/uninit-ed object is found
493 */
494static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
495{
496 struct timer_list *timer = addr;
497
498 switch (state) {
499 case ODEBUG_STATE_NOTAVAILABLE:
500 if (timer->entry.prev == TIMER_ENTRY_STATIC) {
501 /*
502 * This is not really a fixup. The timer was
503 * statically initialized. We just make sure that it
504 * is tracked in the object tracker.
505 */
506 debug_object_init(timer, &timer_debug_descr);
507 return 0;
508 } else {
509 setup_timer(timer, stub_timer, 0);
510 return 1;
511 }
512 default:
513 return 0;
514 }
515}
516
483static struct debug_obj_descr timer_debug_descr = { 517static struct debug_obj_descr timer_debug_descr = {
484 .name = "timer_list", 518 .name = "timer_list",
485 .debug_hint = timer_debug_hint, 519 .debug_hint = timer_debug_hint,
486 .fixup_init = timer_fixup_init, 520 .fixup_init = timer_fixup_init,
487 .fixup_activate = timer_fixup_activate, 521 .fixup_activate = timer_fixup_activate,
488 .fixup_free = timer_fixup_free, 522 .fixup_free = timer_fixup_free,
523 .fixup_assert_init = timer_fixup_assert_init,
489}; 524};
490 525
491static inline void debug_timer_init(struct timer_list *timer) 526static inline void debug_timer_init(struct timer_list *timer)
@@ -508,6 +543,11 @@ static inline void debug_timer_free(struct timer_list *timer)
508 debug_object_free(timer, &timer_debug_descr); 543 debug_object_free(timer, &timer_debug_descr);
509} 544}
510 545
546static inline void debug_timer_assert_init(struct timer_list *timer)
547{
548 debug_object_assert_init(timer, &timer_debug_descr);
549}
550
511static void __init_timer(struct timer_list *timer, 551static void __init_timer(struct timer_list *timer,
512 const char *name, 552 const char *name,
513 struct lock_class_key *key); 553 struct lock_class_key *key);
@@ -531,6 +571,7 @@ EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
531static inline void debug_timer_init(struct timer_list *timer) { } 571static inline void debug_timer_init(struct timer_list *timer) { }
532static inline void debug_timer_activate(struct timer_list *timer) { } 572static inline void debug_timer_activate(struct timer_list *timer) { }
533static inline void debug_timer_deactivate(struct timer_list *timer) { } 573static inline void debug_timer_deactivate(struct timer_list *timer) { }
574static inline void debug_timer_assert_init(struct timer_list *timer) { }
534#endif 575#endif
535 576
536static inline void debug_init(struct timer_list *timer) 577static inline void debug_init(struct timer_list *timer)
@@ -552,6 +593,11 @@ static inline void debug_deactivate(struct timer_list *timer)
552 trace_timer_cancel(timer); 593 trace_timer_cancel(timer);
553} 594}
554 595
596static inline void debug_assert_init(struct timer_list *timer)
597{
598 debug_timer_assert_init(timer);
599}
600
555static void __init_timer(struct timer_list *timer, 601static void __init_timer(struct timer_list *timer,
556 const char *name, 602 const char *name,
557 struct lock_class_key *key) 603 struct lock_class_key *key)
@@ -902,6 +948,8 @@ int del_timer(struct timer_list *timer)
902 unsigned long flags; 948 unsigned long flags;
903 int ret = 0; 949 int ret = 0;
904 950
951 debug_assert_init(timer);
952
905 timer_stats_timer_clear_start_info(timer); 953 timer_stats_timer_clear_start_info(timer);
906 if (timer_pending(timer)) { 954 if (timer_pending(timer)) {
907 base = lock_timer_base(timer, &flags); 955 base = lock_timer_base(timer, &flags);
@@ -932,6 +980,8 @@ int try_to_del_timer_sync(struct timer_list *timer)
932 unsigned long flags; 980 unsigned long flags;
933 int ret = -1; 981 int ret = -1;
934 982
983 debug_assert_init(timer);
984
935 base = lock_timer_base(timer, &flags); 985 base = lock_timer_base(timer, &flags);
936 986
937 if (base->running_timer == timer) 987 if (base->running_timer == timer)
diff --git a/kernel/wait.c b/kernel/wait.c
index 26fa7797f90f..7fdd9eaca2c3 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -10,10 +10,10 @@
10#include <linux/wait.h> 10#include <linux/wait.h>
11#include <linux/hash.h> 11#include <linux/hash.h>
12 12
13void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key) 13void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
14{ 14{
15 spin_lock_init(&q->lock); 15 spin_lock_init(&q->lock);
16 lockdep_set_class(&q->lock, key); 16 lockdep_set_class_and_name(&q->lock, key, name);
17 INIT_LIST_HEAD(&q->task_list); 17 INIT_LIST_HEAD(&q->task_list);
18} 18}
19 19