aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c22
-rw-r--r--kernel/irq/irqdesc.c7
-rw-r--r--kernel/irq/manage.c17
-rw-r--r--kernel/locking/mutex-debug.c19
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/power/suspend.c3
-rw-r--r--kernel/sched/deadline.c11
-rw-r--r--kernel/sched/fair.c16
-rw-r--r--kernel/sched/rt.c7
-rw-r--r--kernel/sched/sched.h9
-rw-r--r--kernel/seccomp.c19
-rw-r--r--kernel/softirq.c5
-rw-r--r--kernel/time/tick-common.c2
-rw-r--r--kernel/time/tick-sched.c5
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/ftrace.c27
-rw-r--r--kernel/trace/trace_events_trigger.c2
-rw-r--r--kernel/trace/trace_functions.c16
-rw-r--r--kernel/trace/trace_uprobe.c6
-rw-r--r--kernel/user_namespace.c11
-rw-r--r--kernel/watchdog.c6
21 files changed, 134 insertions, 84 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index d55092ceee29..6b715c0af1b1 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -234,6 +234,11 @@ again:
234 goto again; 234 goto again;
235 } 235 }
236 timer->base = new_base; 236 timer->base = new_base;
237 } else {
238 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
239 cpu = this_cpu;
240 goto again;
241 }
237 } 242 }
238 return new_base; 243 return new_base;
239} 244}
@@ -569,6 +574,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
569 574
570 cpu_base->expires_next.tv64 = expires_next.tv64; 575 cpu_base->expires_next.tv64 = expires_next.tv64;
571 576
577 /*
578 * If a hang was detected in the last timer interrupt then we
579 * leave the hang delay active in the hardware. We want the
580 * system to make progress. That also prevents the following
581 * scenario:
582 * T1 expires 50ms from now
583 * T2 expires 5s from now
584 *
585 * T1 is removed, so this code is called and would reprogram
586 * the hardware to 5s from now. Any hrtimer_start after that
587 * will not reprogram the hardware due to hang_detected being
588 * set. So we'd effectivly block all timers until the T2 event
589 * fires.
590 */
591 if (cpu_base->hang_detected)
592 return;
593
572 if (cpu_base->expires_next.tv64 != KTIME_MAX) 594 if (cpu_base->expires_next.tv64 != KTIME_MAX)
573 tick_program_event(cpu_base->expires_next, 1); 595 tick_program_event(cpu_base->expires_next, 1);
574} 596}
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index a7174617616b..bb07f2928f4b 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -363,6 +363,13 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
363 if (from > irq) 363 if (from > irq)
364 return -EINVAL; 364 return -EINVAL;
365 from = irq; 365 from = irq;
366 } else {
367 /*
368 * For interrupts which are freely allocated the
369 * architecture can force a lower bound to the @from
370 * argument. x86 uses this to exclude the GSI space.
371 */
372 from = arch_dynirq_lower_bound(from);
366 } 373 }
367 374
368 mutex_lock(&sparse_irq_lock); 375 mutex_lock(&sparse_irq_lock);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 2486a4c1a710..d34131ca372b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -180,7 +180,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
180 struct irq_chip *chip = irq_data_get_irq_chip(data); 180 struct irq_chip *chip = irq_data_get_irq_chip(data);
181 int ret; 181 int ret;
182 182
183 ret = chip->irq_set_affinity(data, mask, false); 183 ret = chip->irq_set_affinity(data, mask, force);
184 switch (ret) { 184 switch (ret) {
185 case IRQ_SET_MASK_OK: 185 case IRQ_SET_MASK_OK:
186 cpumask_copy(data->affinity, mask); 186 cpumask_copy(data->affinity, mask);
@@ -192,7 +192,8 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
192 return ret; 192 return ret;
193} 193}
194 194
195int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) 195int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
196 bool force)
196{ 197{
197 struct irq_chip *chip = irq_data_get_irq_chip(data); 198 struct irq_chip *chip = irq_data_get_irq_chip(data);
198 struct irq_desc *desc = irq_data_to_desc(data); 199 struct irq_desc *desc = irq_data_to_desc(data);
@@ -202,7 +203,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
202 return -EINVAL; 203 return -EINVAL;
203 204
204 if (irq_can_move_pcntxt(data)) { 205 if (irq_can_move_pcntxt(data)) {
205 ret = irq_do_set_affinity(data, mask, false); 206 ret = irq_do_set_affinity(data, mask, force);
206 } else { 207 } else {
207 irqd_set_move_pending(data); 208 irqd_set_move_pending(data);
208 irq_copy_pending(desc, mask); 209 irq_copy_pending(desc, mask);
@@ -217,13 +218,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
217 return ret; 218 return ret;
218} 219}
219 220
220/** 221int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
221 * irq_set_affinity - Set the irq affinity of a given irq
222 * @irq: Interrupt to set affinity
223 * @mask: cpumask
224 *
225 */
226int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
227{ 222{
228 struct irq_desc *desc = irq_to_desc(irq); 223 struct irq_desc *desc = irq_to_desc(irq);
229 unsigned long flags; 224 unsigned long flags;
@@ -233,7 +228,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
233 return -EINVAL; 228 return -EINVAL;
234 229
235 raw_spin_lock_irqsave(&desc->lock, flags); 230 raw_spin_lock_irqsave(&desc->lock, flags);
236 ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); 231 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
237 raw_spin_unlock_irqrestore(&desc->lock, flags); 232 raw_spin_unlock_irqrestore(&desc->lock, flags);
238 return ret; 233 return ret;
239} 234}
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index e1191c996c59..5cf6731b98e9 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -71,18 +71,17 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
71 71
72void debug_mutex_unlock(struct mutex *lock) 72void debug_mutex_unlock(struct mutex *lock)
73{ 73{
74 if (unlikely(!debug_locks)) 74 if (likely(debug_locks)) {
75 return; 75 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
76 76
77 DEBUG_LOCKS_WARN_ON(lock->magic != lock); 77 if (!lock->owner)
78 DEBUG_LOCKS_WARN_ON(!lock->owner);
79 else
80 DEBUG_LOCKS_WARN_ON(lock->owner != current);
78 81
79 if (!lock->owner) 82 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
80 DEBUG_LOCKS_WARN_ON(!lock->owner); 83 mutex_clear_owner(lock);
81 else 84 }
82 DEBUG_LOCKS_WARN_ON(lock->owner != current);
83
84 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
85 mutex_clear_owner(lock);
86 85
87 /* 86 /*
88 * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug 87 * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
diff --git a/kernel/module.c b/kernel/module.c
index 11869408f79b..079c4615607d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -815,9 +815,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
815 return -EFAULT; 815 return -EFAULT;
816 name[MODULE_NAME_LEN-1] = '\0'; 816 name[MODULE_NAME_LEN-1] = '\0';
817 817
818 if (!(flags & O_NONBLOCK))
819 pr_warn("waiting module removal not supported: please upgrade\n");
820
821 if (mutex_lock_interruptible(&module_mutex) != 0) 818 if (mutex_lock_interruptible(&module_mutex) != 0)
822 return -EINTR; 819 return -EINTR;
823 820
@@ -3271,6 +3268,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
3271 3268
3272 dynamic_debug_setup(info->debug, info->num_debug); 3269 dynamic_debug_setup(info->debug, info->num_debug);
3273 3270
3271 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3272 ftrace_module_init(mod);
3273
3274 /* Finally it's fully formed, ready to start executing. */ 3274 /* Finally it's fully formed, ready to start executing. */
3275 err = complete_formation(mod, info); 3275 err = complete_formation(mod, info);
3276 if (err) 3276 if (err)
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index c3ad9cafe930..8233cd4047d7 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -14,6 +14,7 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/console.h> 15#include <linux/console.h>
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/cpuidle.h>
17#include <linux/syscalls.h> 18#include <linux/syscalls.h>
18#include <linux/gfp.h> 19#include <linux/gfp.h>
19#include <linux/io.h> 20#include <linux/io.h>
@@ -53,7 +54,9 @@ static void freeze_begin(void)
53 54
54static void freeze_enter(void) 55static void freeze_enter(void)
55{ 56{
57 cpuidle_resume();
56 wait_event(suspend_freeze_wait_head, suspend_freeze_wake); 58 wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
59 cpuidle_pause();
57} 60}
58 61
59void freeze_wake(void) 62void freeze_wake(void)
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 27ef40925525..b08095786cb8 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1021,8 +1021,17 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1021 1021
1022 dl_rq = &rq->dl; 1022 dl_rq = &rq->dl;
1023 1023
1024 if (need_pull_dl_task(rq, prev)) 1024 if (need_pull_dl_task(rq, prev)) {
1025 pull_dl_task(rq); 1025 pull_dl_task(rq);
1026 /*
1027 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1028 * means a stop task can slip in, in which case we need to
1029 * re-start task selection.
1030 */
1031 if (rq->stop && rq->stop->on_rq)
1032 return RETRY_TASK;
1033 }
1034
1026 /* 1035 /*
1027 * When prev is DL, we may throttle it in put_prev_task(). 1036 * When prev is DL, we may throttle it in put_prev_task().
1028 * So, we update time before we check for dl_nr_running. 1037 * So, we update time before we check for dl_nr_running.
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7e9bd0b1fa9e..7570dd969c28 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1497,7 +1497,7 @@ static void task_numa_placement(struct task_struct *p)
1497 /* If the task is part of a group prevent parallel updates to group stats */ 1497 /* If the task is part of a group prevent parallel updates to group stats */
1498 if (p->numa_group) { 1498 if (p->numa_group) {
1499 group_lock = &p->numa_group->lock; 1499 group_lock = &p->numa_group->lock;
1500 spin_lock(group_lock); 1500 spin_lock_irq(group_lock);
1501 } 1501 }
1502 1502
1503 /* Find the node with the highest number of faults */ 1503 /* Find the node with the highest number of faults */
@@ -1572,7 +1572,7 @@ static void task_numa_placement(struct task_struct *p)
1572 } 1572 }
1573 } 1573 }
1574 1574
1575 spin_unlock(group_lock); 1575 spin_unlock_irq(group_lock);
1576 } 1576 }
1577 1577
1578 /* Preferred node as the node with the most faults */ 1578 /* Preferred node as the node with the most faults */
@@ -1677,7 +1677,8 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1677 if (!join) 1677 if (!join)
1678 return; 1678 return;
1679 1679
1680 double_lock(&my_grp->lock, &grp->lock); 1680 BUG_ON(irqs_disabled());
1681 double_lock_irq(&my_grp->lock, &grp->lock);
1681 1682
1682 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { 1683 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
1683 my_grp->faults[i] -= p->numa_faults_memory[i]; 1684 my_grp->faults[i] -= p->numa_faults_memory[i];
@@ -1691,7 +1692,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1691 grp->nr_tasks++; 1692 grp->nr_tasks++;
1692 1693
1693 spin_unlock(&my_grp->lock); 1694 spin_unlock(&my_grp->lock);
1694 spin_unlock(&grp->lock); 1695 spin_unlock_irq(&grp->lock);
1695 1696
1696 rcu_assign_pointer(p->numa_group, grp); 1697 rcu_assign_pointer(p->numa_group, grp);
1697 1698
@@ -1710,14 +1711,14 @@ void task_numa_free(struct task_struct *p)
1710 void *numa_faults = p->numa_faults_memory; 1711 void *numa_faults = p->numa_faults_memory;
1711 1712
1712 if (grp) { 1713 if (grp) {
1713 spin_lock(&grp->lock); 1714 spin_lock_irq(&grp->lock);
1714 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 1715 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1715 grp->faults[i] -= p->numa_faults_memory[i]; 1716 grp->faults[i] -= p->numa_faults_memory[i];
1716 grp->total_faults -= p->total_numa_faults; 1717 grp->total_faults -= p->total_numa_faults;
1717 1718
1718 list_del(&p->numa_entry); 1719 list_del(&p->numa_entry);
1719 grp->nr_tasks--; 1720 grp->nr_tasks--;
1720 spin_unlock(&grp->lock); 1721 spin_unlock_irq(&grp->lock);
1721 rcu_assign_pointer(p->numa_group, NULL); 1722 rcu_assign_pointer(p->numa_group, NULL);
1722 put_numa_group(grp); 1723 put_numa_group(grp);
1723 } 1724 }
@@ -6727,7 +6728,8 @@ static int idle_balance(struct rq *this_rq)
6727out: 6728out:
6728 /* Is there a task of a high priority class? */ 6729 /* Is there a task of a high priority class? */
6729 if (this_rq->nr_running != this_rq->cfs.h_nr_running && 6730 if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
6730 (this_rq->dl.dl_nr_running || 6731 ((this_rq->stop && this_rq->stop->on_rq) ||
6732 this_rq->dl.dl_nr_running ||
6731 (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt)))) 6733 (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
6732 pulled_task = -1; 6734 pulled_task = -1;
6733 6735
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index d8cdf1618551..bd2267ad404f 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1362,10 +1362,11 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1362 pull_rt_task(rq); 1362 pull_rt_task(rq);
1363 /* 1363 /*
1364 * pull_rt_task() can drop (and re-acquire) rq->lock; this 1364 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1365 * means a dl task can slip in, in which case we need to 1365 * means a dl or stop task can slip in, in which case we need
1366 * re-start task selection. 1366 * to re-start task selection.
1367 */ 1367 */
1368 if (unlikely(rq->dl.dl_nr_running)) 1368 if (unlikely((rq->stop && rq->stop->on_rq) ||
1369 rq->dl.dl_nr_running))
1369 return RETRY_TASK; 1370 return RETRY_TASK;
1370 } 1371 }
1371 1372
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c9007f28d3a2..456e492a3dca 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1385,6 +1385,15 @@ static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1385 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1385 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1386} 1386}
1387 1387
1388static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1389{
1390 if (l1 > l2)
1391 swap(l1, l2);
1392
1393 spin_lock_irq(l1);
1394 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1395}
1396
1388static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 1397static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1389{ 1398{
1390 if (l1 > l2) 1399 if (l1 > l2)
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index d8d046c0726a..b35c21503a36 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -69,18 +69,17 @@ static void populate_seccomp_data(struct seccomp_data *sd)
69{ 69{
70 struct task_struct *task = current; 70 struct task_struct *task = current;
71 struct pt_regs *regs = task_pt_regs(task); 71 struct pt_regs *regs = task_pt_regs(task);
72 unsigned long args[6];
72 73
73 sd->nr = syscall_get_nr(task, regs); 74 sd->nr = syscall_get_nr(task, regs);
74 sd->arch = syscall_get_arch(); 75 sd->arch = syscall_get_arch();
75 76 syscall_get_arguments(task, regs, 0, 6, args);
76 /* Unroll syscall_get_args to help gcc on arm. */ 77 sd->args[0] = args[0];
77 syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]); 78 sd->args[1] = args[1];
78 syscall_get_arguments(task, regs, 1, 1, (unsigned long *) &sd->args[1]); 79 sd->args[2] = args[2];
79 syscall_get_arguments(task, regs, 2, 1, (unsigned long *) &sd->args[2]); 80 sd->args[3] = args[3];
80 syscall_get_arguments(task, regs, 3, 1, (unsigned long *) &sd->args[3]); 81 sd->args[4] = args[4];
81 syscall_get_arguments(task, regs, 4, 1, (unsigned long *) &sd->args[4]); 82 sd->args[5] = args[5];
82 syscall_get_arguments(task, regs, 5, 1, (unsigned long *) &sd->args[5]);
83
84 sd->instruction_pointer = KSTK_EIP(task); 83 sd->instruction_pointer = KSTK_EIP(task);
85} 84}
86 85
@@ -256,6 +255,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
256 goto free_prog; 255 goto free_prog;
257 256
258 /* Allocate a new seccomp_filter */ 257 /* Allocate a new seccomp_filter */
258 ret = -ENOMEM;
259 filter = kzalloc(sizeof(struct seccomp_filter) + 259 filter = kzalloc(sizeof(struct seccomp_filter) +
260 sizeof(struct sock_filter_int) * new_len, 260 sizeof(struct sock_filter_int) * new_len,
261 GFP_KERNEL|__GFP_NOWARN); 261 GFP_KERNEL|__GFP_NOWARN);
@@ -265,6 +265,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
265 ret = sk_convert_filter(fp, fprog->len, filter->insnsi, &new_len); 265 ret = sk_convert_filter(fp, fprog->len, filter->insnsi, &new_len);
266 if (ret) 266 if (ret)
267 goto free_filter; 267 goto free_filter;
268 kfree(fp);
268 269
269 atomic_set(&filter->usage, 1); 270 atomic_set(&filter->usage, 1);
270 filter->len = new_len; 271 filter->len = new_len;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b50990a5bea0..33e4648ae0e7 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -779,3 +779,8 @@ int __init __weak arch_early_irq_init(void)
779{ 779{
780 return 0; 780 return 0;
781} 781}
782
783unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
784{
785 return from;
786}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 015661279b68..0a0608edeb26 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -276,7 +276,7 @@ static bool tick_check_preferred(struct clock_event_device *curdev,
276bool tick_check_replacement(struct clock_event_device *curdev, 276bool tick_check_replacement(struct clock_event_device *curdev,
277 struct clock_event_device *newdev) 277 struct clock_event_device *newdev)
278{ 278{
279 if (tick_check_percpu(curdev, newdev, smp_processor_id())) 279 if (!tick_check_percpu(curdev, newdev, smp_processor_id()))
280 return false; 280 return false;
281 281
282 return tick_check_preferred(curdev, newdev); 282 return tick_check_preferred(curdev, newdev);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 9f8af69c67ec..6558b7ac112d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -84,6 +84,9 @@ static void tick_do_update_jiffies64(ktime_t now)
84 84
85 /* Keep the tick_next_period variable up to date */ 85 /* Keep the tick_next_period variable up to date */
86 tick_next_period = ktime_add(last_jiffies_update, tick_period); 86 tick_next_period = ktime_add(last_jiffies_update, tick_period);
87 } else {
88 write_sequnlock(&jiffies_lock);
89 return;
87 } 90 }
88 write_sequnlock(&jiffies_lock); 91 write_sequnlock(&jiffies_lock);
89 update_wall_time(); 92 update_wall_time();
@@ -967,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void)
967 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 970 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
968 ktime_t next; 971 ktime_t next;
969 972
970 if (!tick_nohz_active) 973 if (!tick_nohz_enabled)
971 return; 974 return;
972 975
973 local_irq_disable(); 976 local_irq_disable();
diff --git a/kernel/timer.c b/kernel/timer.c
index 87bd529879c2..3bb01a323b2a 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -838,7 +838,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
838 838
839 bit = find_last_bit(&mask, BITS_PER_LONG); 839 bit = find_last_bit(&mask, BITS_PER_LONG);
840 840
841 mask = (1 << bit) - 1; 841 mask = (1UL << bit) - 1;
842 842
843 expires_limit = expires_limit & ~(mask); 843 expires_limit = expires_limit & ~(mask);
844 844
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1fd4b9479210..4a54a25afa2f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4330,16 +4330,11 @@ static void ftrace_init_module(struct module *mod,
4330 ftrace_process_locs(mod, start, end); 4330 ftrace_process_locs(mod, start, end);
4331} 4331}
4332 4332
4333static int ftrace_module_notify_enter(struct notifier_block *self, 4333void ftrace_module_init(struct module *mod)
4334 unsigned long val, void *data)
4335{ 4334{
4336 struct module *mod = data; 4335 ftrace_init_module(mod, mod->ftrace_callsites,
4337 4336 mod->ftrace_callsites +
4338 if (val == MODULE_STATE_COMING) 4337 mod->num_ftrace_callsites);
4339 ftrace_init_module(mod, mod->ftrace_callsites,
4340 mod->ftrace_callsites +
4341 mod->num_ftrace_callsites);
4342 return 0;
4343} 4338}
4344 4339
4345static int ftrace_module_notify_exit(struct notifier_block *self, 4340static int ftrace_module_notify_exit(struct notifier_block *self,
@@ -4353,11 +4348,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
4353 return 0; 4348 return 0;
4354} 4349}
4355#else 4350#else
4356static int ftrace_module_notify_enter(struct notifier_block *self,
4357 unsigned long val, void *data)
4358{
4359 return 0;
4360}
4361static int ftrace_module_notify_exit(struct notifier_block *self, 4351static int ftrace_module_notify_exit(struct notifier_block *self,
4362 unsigned long val, void *data) 4352 unsigned long val, void *data)
4363{ 4353{
@@ -4365,11 +4355,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
4365} 4355}
4366#endif /* CONFIG_MODULES */ 4356#endif /* CONFIG_MODULES */
4367 4357
4368struct notifier_block ftrace_module_enter_nb = {
4369 .notifier_call = ftrace_module_notify_enter,
4370 .priority = INT_MAX, /* Run before anything that can use kprobes */
4371};
4372
4373struct notifier_block ftrace_module_exit_nb = { 4358struct notifier_block ftrace_module_exit_nb = {
4374 .notifier_call = ftrace_module_notify_exit, 4359 .notifier_call = ftrace_module_notify_exit,
4375 .priority = INT_MIN, /* Run after anything that can remove kprobes */ 4360 .priority = INT_MIN, /* Run after anything that can remove kprobes */
@@ -4403,10 +4388,6 @@ void __init ftrace_init(void)
4403 __start_mcount_loc, 4388 __start_mcount_loc,
4404 __stop_mcount_loc); 4389 __stop_mcount_loc);
4405 4390
4406 ret = register_module_notifier(&ftrace_module_enter_nb);
4407 if (ret)
4408 pr_warning("Failed to register trace ftrace module enter notifier\n");
4409
4410 ret = register_module_notifier(&ftrace_module_exit_nb); 4391 ret = register_module_notifier(&ftrace_module_exit_nb);
4411 if (ret) 4392 if (ret)
4412 pr_warning("Failed to register trace ftrace module exit notifier\n"); 4393 pr_warning("Failed to register trace ftrace module exit notifier\n");
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 925f537f07d1..4747b476a030 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -77,7 +77,7 @@ event_triggers_call(struct ftrace_event_file *file, void *rec)
77 data->ops->func(data); 77 data->ops->func(data);
78 continue; 78 continue;
79 } 79 }
80 filter = rcu_dereference(data->filter); 80 filter = rcu_dereference_sched(data->filter);
81 if (filter && !filter_match_preds(filter, rec)) 81 if (filter && !filter_match_preds(filter, rec))
82 continue; 82 continue;
83 if (data->cmd_ops->post_trigger) { 83 if (data->cmd_ops->post_trigger) {
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 5b781d2be383..ffd56351b521 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -58,12 +58,16 @@ int ftrace_create_function_files(struct trace_array *tr,
58{ 58{
59 int ret; 59 int ret;
60 60
61 /* The top level array uses the "global_ops". */ 61 /*
62 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) { 62 * The top level array uses the "global_ops", and the files are
63 ret = allocate_ftrace_ops(tr); 63 * created on boot up.
64 if (ret) 64 */
65 return ret; 65 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
66 } 66 return 0;
67
68 ret = allocate_ftrace_ops(tr);
69 if (ret)
70 return ret;
67 71
68 ftrace_create_filter_files(tr->ops, parent); 72 ftrace_create_filter_files(tr->ops, parent);
69 73
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 930e51462dc8..c082a7441345 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -732,9 +732,15 @@ static int uprobe_buffer_enable(void)
732 732
733static void uprobe_buffer_disable(void) 733static void uprobe_buffer_disable(void)
734{ 734{
735 int cpu;
736
735 BUG_ON(!mutex_is_locked(&event_mutex)); 737 BUG_ON(!mutex_is_locked(&event_mutex));
736 738
737 if (--uprobe_buffer_refcnt == 0) { 739 if (--uprobe_buffer_refcnt == 0) {
740 for_each_possible_cpu(cpu)
741 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
742 cpu)->buf);
743
738 free_percpu(uprobe_cpu_buffer); 744 free_percpu(uprobe_cpu_buffer);
739 uprobe_cpu_buffer = NULL; 745 uprobe_cpu_buffer = NULL;
740 } 746 }
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 0d8f6023fd8d..bf71b4b2d632 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -152,7 +152,7 @@ static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
152 152
153 /* Find the matching extent */ 153 /* Find the matching extent */
154 extents = map->nr_extents; 154 extents = map->nr_extents;
155 smp_read_barrier_depends(); 155 smp_rmb();
156 for (idx = 0; idx < extents; idx++) { 156 for (idx = 0; idx < extents; idx++) {
157 first = map->extent[idx].first; 157 first = map->extent[idx].first;
158 last = first + map->extent[idx].count - 1; 158 last = first + map->extent[idx].count - 1;
@@ -176,7 +176,7 @@ static u32 map_id_down(struct uid_gid_map *map, u32 id)
176 176
177 /* Find the matching extent */ 177 /* Find the matching extent */
178 extents = map->nr_extents; 178 extents = map->nr_extents;
179 smp_read_barrier_depends(); 179 smp_rmb();
180 for (idx = 0; idx < extents; idx++) { 180 for (idx = 0; idx < extents; idx++) {
181 first = map->extent[idx].first; 181 first = map->extent[idx].first;
182 last = first + map->extent[idx].count - 1; 182 last = first + map->extent[idx].count - 1;
@@ -199,7 +199,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id)
199 199
200 /* Find the matching extent */ 200 /* Find the matching extent */
201 extents = map->nr_extents; 201 extents = map->nr_extents;
202 smp_read_barrier_depends(); 202 smp_rmb();
203 for (idx = 0; idx < extents; idx++) { 203 for (idx = 0; idx < extents; idx++) {
204 first = map->extent[idx].lower_first; 204 first = map->extent[idx].lower_first;
205 last = first + map->extent[idx].count - 1; 205 last = first + map->extent[idx].count - 1;
@@ -615,9 +615,8 @@ static ssize_t map_write(struct file *file, const char __user *buf,
615 * were written before the count of the extents. 615 * were written before the count of the extents.
616 * 616 *
617 * To achieve this smp_wmb() is used on guarantee the write 617 * To achieve this smp_wmb() is used on guarantee the write
618 * order and smp_read_barrier_depends() is guaranteed that we 618 * order and smp_rmb() is guaranteed that we don't have crazy
619 * don't have crazy architectures returning stale data. 619 * architectures returning stale data.
620 *
621 */ 620 */
622 mutex_lock(&id_map_mutex); 621 mutex_lock(&id_map_mutex);
623 622
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index e90089fd78e0..516203e665fc 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -138,7 +138,11 @@ static void __touch_watchdog(void)
138 138
139void touch_softlockup_watchdog(void) 139void touch_softlockup_watchdog(void)
140{ 140{
141 __this_cpu_write(watchdog_touch_ts, 0); 141 /*
142 * Preemption can be enabled. It doesn't matter which CPU's timestamp
143 * gets zeroed here, so use the raw_ operation.
144 */
145 raw_cpu_write(watchdog_touch_ts, 0);
142} 146}
143EXPORT_SYMBOL(touch_softlockup_watchdog); 147EXPORT_SYMBOL(touch_softlockup_watchdog);
144 148