diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/locking/mutex-debug.c | 19 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 11 | ||||
-rw-r--r-- | kernel/sched/fair.c | 16 | ||||
-rw-r--r-- | kernel/sched/rt.c | 7 | ||||
-rw-r--r-- | kernel/sched/sched.h | 9 | ||||
-rw-r--r-- | kernel/seccomp.c | 19 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 2 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 16 | ||||
-rw-r--r-- | kernel/trace/trace_uprobe.c | 6 | ||||
-rw-r--r-- | kernel/user_namespace.c | 11 | ||||
-rw-r--r-- | kernel/watchdog.c | 6 |
12 files changed, 82 insertions, 45 deletions
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c index e1191c996c59..5cf6731b98e9 100644 --- a/kernel/locking/mutex-debug.c +++ b/kernel/locking/mutex-debug.c | |||
@@ -71,18 +71,17 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, | |||
71 | 71 | ||
72 | void debug_mutex_unlock(struct mutex *lock) | 72 | void debug_mutex_unlock(struct mutex *lock) |
73 | { | 73 | { |
74 | if (unlikely(!debug_locks)) | 74 | if (likely(debug_locks)) { |
75 | return; | 75 | DEBUG_LOCKS_WARN_ON(lock->magic != lock); |
76 | 76 | ||
77 | DEBUG_LOCKS_WARN_ON(lock->magic != lock); | 77 | if (!lock->owner) |
78 | DEBUG_LOCKS_WARN_ON(!lock->owner); | ||
79 | else | ||
80 | DEBUG_LOCKS_WARN_ON(lock->owner != current); | ||
78 | 81 | ||
79 | if (!lock->owner) | 82 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); |
80 | DEBUG_LOCKS_WARN_ON(!lock->owner); | 83 | mutex_clear_owner(lock); |
81 | else | 84 | } |
82 | DEBUG_LOCKS_WARN_ON(lock->owner != current); | ||
83 | |||
84 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); | ||
85 | mutex_clear_owner(lock); | ||
86 | 85 | ||
87 | /* | 86 | /* |
88 | * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug | 87 | * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 27ef40925525..b08095786cb8 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -1021,8 +1021,17 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) | |||
1021 | 1021 | ||
1022 | dl_rq = &rq->dl; | 1022 | dl_rq = &rq->dl; |
1023 | 1023 | ||
1024 | if (need_pull_dl_task(rq, prev)) | 1024 | if (need_pull_dl_task(rq, prev)) { |
1025 | pull_dl_task(rq); | 1025 | pull_dl_task(rq); |
1026 | /* | ||
1027 | * pull_rt_task() can drop (and re-acquire) rq->lock; this | ||
1028 | * means a stop task can slip in, in which case we need to | ||
1029 | * re-start task selection. | ||
1030 | */ | ||
1031 | if (rq->stop && rq->stop->on_rq) | ||
1032 | return RETRY_TASK; | ||
1033 | } | ||
1034 | |||
1026 | /* | 1035 | /* |
1027 | * When prev is DL, we may throttle it in put_prev_task(). | 1036 | * When prev is DL, we may throttle it in put_prev_task(). |
1028 | * So, we update time before we check for dl_nr_running. | 1037 | * So, we update time before we check for dl_nr_running. |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7e9bd0b1fa9e..7570dd969c28 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1497,7 +1497,7 @@ static void task_numa_placement(struct task_struct *p) | |||
1497 | /* If the task is part of a group prevent parallel updates to group stats */ | 1497 | /* If the task is part of a group prevent parallel updates to group stats */ |
1498 | if (p->numa_group) { | 1498 | if (p->numa_group) { |
1499 | group_lock = &p->numa_group->lock; | 1499 | group_lock = &p->numa_group->lock; |
1500 | spin_lock(group_lock); | 1500 | spin_lock_irq(group_lock); |
1501 | } | 1501 | } |
1502 | 1502 | ||
1503 | /* Find the node with the highest number of faults */ | 1503 | /* Find the node with the highest number of faults */ |
@@ -1572,7 +1572,7 @@ static void task_numa_placement(struct task_struct *p) | |||
1572 | } | 1572 | } |
1573 | } | 1573 | } |
1574 | 1574 | ||
1575 | spin_unlock(group_lock); | 1575 | spin_unlock_irq(group_lock); |
1576 | } | 1576 | } |
1577 | 1577 | ||
1578 | /* Preferred node as the node with the most faults */ | 1578 | /* Preferred node as the node with the most faults */ |
@@ -1677,7 +1677,8 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, | |||
1677 | if (!join) | 1677 | if (!join) |
1678 | return; | 1678 | return; |
1679 | 1679 | ||
1680 | double_lock(&my_grp->lock, &grp->lock); | 1680 | BUG_ON(irqs_disabled()); |
1681 | double_lock_irq(&my_grp->lock, &grp->lock); | ||
1681 | 1682 | ||
1682 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { | 1683 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { |
1683 | my_grp->faults[i] -= p->numa_faults_memory[i]; | 1684 | my_grp->faults[i] -= p->numa_faults_memory[i]; |
@@ -1691,7 +1692,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, | |||
1691 | grp->nr_tasks++; | 1692 | grp->nr_tasks++; |
1692 | 1693 | ||
1693 | spin_unlock(&my_grp->lock); | 1694 | spin_unlock(&my_grp->lock); |
1694 | spin_unlock(&grp->lock); | 1695 | spin_unlock_irq(&grp->lock); |
1695 | 1696 | ||
1696 | rcu_assign_pointer(p->numa_group, grp); | 1697 | rcu_assign_pointer(p->numa_group, grp); |
1697 | 1698 | ||
@@ -1710,14 +1711,14 @@ void task_numa_free(struct task_struct *p) | |||
1710 | void *numa_faults = p->numa_faults_memory; | 1711 | void *numa_faults = p->numa_faults_memory; |
1711 | 1712 | ||
1712 | if (grp) { | 1713 | if (grp) { |
1713 | spin_lock(&grp->lock); | 1714 | spin_lock_irq(&grp->lock); |
1714 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) | 1715 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) |
1715 | grp->faults[i] -= p->numa_faults_memory[i]; | 1716 | grp->faults[i] -= p->numa_faults_memory[i]; |
1716 | grp->total_faults -= p->total_numa_faults; | 1717 | grp->total_faults -= p->total_numa_faults; |
1717 | 1718 | ||
1718 | list_del(&p->numa_entry); | 1719 | list_del(&p->numa_entry); |
1719 | grp->nr_tasks--; | 1720 | grp->nr_tasks--; |
1720 | spin_unlock(&grp->lock); | 1721 | spin_unlock_irq(&grp->lock); |
1721 | rcu_assign_pointer(p->numa_group, NULL); | 1722 | rcu_assign_pointer(p->numa_group, NULL); |
1722 | put_numa_group(grp); | 1723 | put_numa_group(grp); |
1723 | } | 1724 | } |
@@ -6727,7 +6728,8 @@ static int idle_balance(struct rq *this_rq) | |||
6727 | out: | 6728 | out: |
6728 | /* Is there a task of a high priority class? */ | 6729 | /* Is there a task of a high priority class? */ |
6729 | if (this_rq->nr_running != this_rq->cfs.h_nr_running && | 6730 | if (this_rq->nr_running != this_rq->cfs.h_nr_running && |
6730 | (this_rq->dl.dl_nr_running || | 6731 | ((this_rq->stop && this_rq->stop->on_rq) || |
6732 | this_rq->dl.dl_nr_running || | ||
6731 | (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt)))) | 6733 | (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt)))) |
6732 | pulled_task = -1; | 6734 | pulled_task = -1; |
6733 | 6735 | ||
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index d8cdf1618551..bd2267ad404f 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -1362,10 +1362,11 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev) | |||
1362 | pull_rt_task(rq); | 1362 | pull_rt_task(rq); |
1363 | /* | 1363 | /* |
1364 | * pull_rt_task() can drop (and re-acquire) rq->lock; this | 1364 | * pull_rt_task() can drop (and re-acquire) rq->lock; this |
1365 | * means a dl task can slip in, in which case we need to | 1365 | * means a dl or stop task can slip in, in which case we need |
1366 | * re-start task selection. | 1366 | * to re-start task selection. |
1367 | */ | 1367 | */ |
1368 | if (unlikely(rq->dl.dl_nr_running)) | 1368 | if (unlikely((rq->stop && rq->stop->on_rq) || |
1369 | rq->dl.dl_nr_running)) | ||
1369 | return RETRY_TASK; | 1370 | return RETRY_TASK; |
1370 | } | 1371 | } |
1371 | 1372 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c9007f28d3a2..456e492a3dca 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1385,6 +1385,15 @@ static inline void double_lock(spinlock_t *l1, spinlock_t *l2) | |||
1385 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); | 1385 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
1386 | } | 1386 | } |
1387 | 1387 | ||
1388 | static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) | ||
1389 | { | ||
1390 | if (l1 > l2) | ||
1391 | swap(l1, l2); | ||
1392 | |||
1393 | spin_lock_irq(l1); | ||
1394 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); | ||
1395 | } | ||
1396 | |||
1388 | static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) | 1397 | static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) |
1389 | { | 1398 | { |
1390 | if (l1 > l2) | 1399 | if (l1 > l2) |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index d8d046c0726a..b35c21503a36 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
@@ -69,18 +69,17 @@ static void populate_seccomp_data(struct seccomp_data *sd) | |||
69 | { | 69 | { |
70 | struct task_struct *task = current; | 70 | struct task_struct *task = current; |
71 | struct pt_regs *regs = task_pt_regs(task); | 71 | struct pt_regs *regs = task_pt_regs(task); |
72 | unsigned long args[6]; | ||
72 | 73 | ||
73 | sd->nr = syscall_get_nr(task, regs); | 74 | sd->nr = syscall_get_nr(task, regs); |
74 | sd->arch = syscall_get_arch(); | 75 | sd->arch = syscall_get_arch(); |
75 | 76 | syscall_get_arguments(task, regs, 0, 6, args); | |
76 | /* Unroll syscall_get_args to help gcc on arm. */ | 77 | sd->args[0] = args[0]; |
77 | syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]); | 78 | sd->args[1] = args[1]; |
78 | syscall_get_arguments(task, regs, 1, 1, (unsigned long *) &sd->args[1]); | 79 | sd->args[2] = args[2]; |
79 | syscall_get_arguments(task, regs, 2, 1, (unsigned long *) &sd->args[2]); | 80 | sd->args[3] = args[3]; |
80 | syscall_get_arguments(task, regs, 3, 1, (unsigned long *) &sd->args[3]); | 81 | sd->args[4] = args[4]; |
81 | syscall_get_arguments(task, regs, 4, 1, (unsigned long *) &sd->args[4]); | 82 | sd->args[5] = args[5]; |
82 | syscall_get_arguments(task, regs, 5, 1, (unsigned long *) &sd->args[5]); | ||
83 | |||
84 | sd->instruction_pointer = KSTK_EIP(task); | 83 | sd->instruction_pointer = KSTK_EIP(task); |
85 | } | 84 | } |
86 | 85 | ||
@@ -256,6 +255,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) | |||
256 | goto free_prog; | 255 | goto free_prog; |
257 | 256 | ||
258 | /* Allocate a new seccomp_filter */ | 257 | /* Allocate a new seccomp_filter */ |
258 | ret = -ENOMEM; | ||
259 | filter = kzalloc(sizeof(struct seccomp_filter) + | 259 | filter = kzalloc(sizeof(struct seccomp_filter) + |
260 | sizeof(struct sock_filter_int) * new_len, | 260 | sizeof(struct sock_filter_int) * new_len, |
261 | GFP_KERNEL|__GFP_NOWARN); | 261 | GFP_KERNEL|__GFP_NOWARN); |
@@ -265,6 +265,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) | |||
265 | ret = sk_convert_filter(fp, fprog->len, filter->insnsi, &new_len); | 265 | ret = sk_convert_filter(fp, fprog->len, filter->insnsi, &new_len); |
266 | if (ret) | 266 | if (ret) |
267 | goto free_filter; | 267 | goto free_filter; |
268 | kfree(fp); | ||
268 | 269 | ||
269 | atomic_set(&filter->usage, 1); | 270 | atomic_set(&filter->usage, 1); |
270 | filter->len = new_len; | 271 | filter->len = new_len; |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 015661279b68..0a0608edeb26 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -276,7 +276,7 @@ static bool tick_check_preferred(struct clock_event_device *curdev, | |||
276 | bool tick_check_replacement(struct clock_event_device *curdev, | 276 | bool tick_check_replacement(struct clock_event_device *curdev, |
277 | struct clock_event_device *newdev) | 277 | struct clock_event_device *newdev) |
278 | { | 278 | { |
279 | if (tick_check_percpu(curdev, newdev, smp_processor_id())) | 279 | if (!tick_check_percpu(curdev, newdev, smp_processor_id())) |
280 | return false; | 280 | return false; |
281 | 281 | ||
282 | return tick_check_preferred(curdev, newdev); | 282 | return tick_check_preferred(curdev, newdev); |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 9f8af69c67ec..6558b7ac112d 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -84,6 +84,9 @@ static void tick_do_update_jiffies64(ktime_t now) | |||
84 | 84 | ||
85 | /* Keep the tick_next_period variable up to date */ | 85 | /* Keep the tick_next_period variable up to date */ |
86 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | 86 | tick_next_period = ktime_add(last_jiffies_update, tick_period); |
87 | } else { | ||
88 | write_sequnlock(&jiffies_lock); | ||
89 | return; | ||
87 | } | 90 | } |
88 | write_sequnlock(&jiffies_lock); | 91 | write_sequnlock(&jiffies_lock); |
89 | update_wall_time(); | 92 | update_wall_time(); |
@@ -967,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void) | |||
967 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 970 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
968 | ktime_t next; | 971 | ktime_t next; |
969 | 972 | ||
970 | if (!tick_nohz_active) | 973 | if (!tick_nohz_enabled) |
971 | return; | 974 | return; |
972 | 975 | ||
973 | local_irq_disable(); | 976 | local_irq_disable(); |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 5b781d2be383..ffd56351b521 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -58,12 +58,16 @@ int ftrace_create_function_files(struct trace_array *tr, | |||
58 | { | 58 | { |
59 | int ret; | 59 | int ret; |
60 | 60 | ||
61 | /* The top level array uses the "global_ops". */ | 61 | /* |
62 | if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) { | 62 | * The top level array uses the "global_ops", and the files are |
63 | ret = allocate_ftrace_ops(tr); | 63 | * created on boot up. |
64 | if (ret) | 64 | */ |
65 | return ret; | 65 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) |
66 | } | 66 | return 0; |
67 | |||
68 | ret = allocate_ftrace_ops(tr); | ||
69 | if (ret) | ||
70 | return ret; | ||
67 | 71 | ||
68 | ftrace_create_filter_files(tr->ops, parent); | 72 | ftrace_create_filter_files(tr->ops, parent); |
69 | 73 | ||
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 930e51462dc8..c082a7441345 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -732,9 +732,15 @@ static int uprobe_buffer_enable(void) | |||
732 | 732 | ||
733 | static void uprobe_buffer_disable(void) | 733 | static void uprobe_buffer_disable(void) |
734 | { | 734 | { |
735 | int cpu; | ||
736 | |||
735 | BUG_ON(!mutex_is_locked(&event_mutex)); | 737 | BUG_ON(!mutex_is_locked(&event_mutex)); |
736 | 738 | ||
737 | if (--uprobe_buffer_refcnt == 0) { | 739 | if (--uprobe_buffer_refcnt == 0) { |
740 | for_each_possible_cpu(cpu) | ||
741 | free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, | ||
742 | cpu)->buf); | ||
743 | |||
738 | free_percpu(uprobe_cpu_buffer); | 744 | free_percpu(uprobe_cpu_buffer); |
739 | uprobe_cpu_buffer = NULL; | 745 | uprobe_cpu_buffer = NULL; |
740 | } | 746 | } |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 0d8f6023fd8d..bf71b4b2d632 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
@@ -152,7 +152,7 @@ static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count) | |||
152 | 152 | ||
153 | /* Find the matching extent */ | 153 | /* Find the matching extent */ |
154 | extents = map->nr_extents; | 154 | extents = map->nr_extents; |
155 | smp_read_barrier_depends(); | 155 | smp_rmb(); |
156 | for (idx = 0; idx < extents; idx++) { | 156 | for (idx = 0; idx < extents; idx++) { |
157 | first = map->extent[idx].first; | 157 | first = map->extent[idx].first; |
158 | last = first + map->extent[idx].count - 1; | 158 | last = first + map->extent[idx].count - 1; |
@@ -176,7 +176,7 @@ static u32 map_id_down(struct uid_gid_map *map, u32 id) | |||
176 | 176 | ||
177 | /* Find the matching extent */ | 177 | /* Find the matching extent */ |
178 | extents = map->nr_extents; | 178 | extents = map->nr_extents; |
179 | smp_read_barrier_depends(); | 179 | smp_rmb(); |
180 | for (idx = 0; idx < extents; idx++) { | 180 | for (idx = 0; idx < extents; idx++) { |
181 | first = map->extent[idx].first; | 181 | first = map->extent[idx].first; |
182 | last = first + map->extent[idx].count - 1; | 182 | last = first + map->extent[idx].count - 1; |
@@ -199,7 +199,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id) | |||
199 | 199 | ||
200 | /* Find the matching extent */ | 200 | /* Find the matching extent */ |
201 | extents = map->nr_extents; | 201 | extents = map->nr_extents; |
202 | smp_read_barrier_depends(); | 202 | smp_rmb(); |
203 | for (idx = 0; idx < extents; idx++) { | 203 | for (idx = 0; idx < extents; idx++) { |
204 | first = map->extent[idx].lower_first; | 204 | first = map->extent[idx].lower_first; |
205 | last = first + map->extent[idx].count - 1; | 205 | last = first + map->extent[idx].count - 1; |
@@ -615,9 +615,8 @@ static ssize_t map_write(struct file *file, const char __user *buf, | |||
615 | * were written before the count of the extents. | 615 | * were written before the count of the extents. |
616 | * | 616 | * |
617 | * To achieve this smp_wmb() is used on guarantee the write | 617 | * To achieve this smp_wmb() is used on guarantee the write |
618 | * order and smp_read_barrier_depends() is guaranteed that we | 618 | * order and smp_rmb() is guaranteed that we don't have crazy |
619 | * don't have crazy architectures returning stale data. | 619 | * architectures returning stale data. |
620 | * | ||
621 | */ | 620 | */ |
622 | mutex_lock(&id_map_mutex); | 621 | mutex_lock(&id_map_mutex); |
623 | 622 | ||
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index e90089fd78e0..516203e665fc 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -138,7 +138,11 @@ static void __touch_watchdog(void) | |||
138 | 138 | ||
139 | void touch_softlockup_watchdog(void) | 139 | void touch_softlockup_watchdog(void) |
140 | { | 140 | { |
141 | __this_cpu_write(watchdog_touch_ts, 0); | 141 | /* |
142 | * Preemption can be enabled. It doesn't matter which CPU's timestamp | ||
143 | * gets zeroed here, so use the raw_ operation. | ||
144 | */ | ||
145 | raw_cpu_write(watchdog_touch_ts, 0); | ||
142 | } | 146 | } |
143 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 147 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
144 | 148 | ||