aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/mutex-debug.c19
-rw-r--r--kernel/seccomp.c17
-rw-r--r--kernel/time/tick-common.c2
-rw-r--r--kernel/time/tick-sched.c5
-rw-r--r--kernel/trace/trace_functions.c16
-rw-r--r--kernel/trace/trace_uprobe.c6
-rw-r--r--kernel/user_namespace.c11
7 files changed, 43 insertions, 33 deletions
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index e1191c996c59..5cf6731b98e9 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -71,18 +71,17 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
71 71
72void debug_mutex_unlock(struct mutex *lock) 72void debug_mutex_unlock(struct mutex *lock)
73{ 73{
74 if (unlikely(!debug_locks)) 74 if (likely(debug_locks)) {
75 return; 75 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
76 76
77 DEBUG_LOCKS_WARN_ON(lock->magic != lock); 77 if (!lock->owner)
78 DEBUG_LOCKS_WARN_ON(!lock->owner);
79 else
80 DEBUG_LOCKS_WARN_ON(lock->owner != current);
78 81
79 if (!lock->owner) 82 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
80 DEBUG_LOCKS_WARN_ON(!lock->owner); 83 mutex_clear_owner(lock);
81 else 84 }
82 DEBUG_LOCKS_WARN_ON(lock->owner != current);
83
84 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
85 mutex_clear_owner(lock);
86 85
87 /* 86 /*
88 * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug 87 * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index d8d046c0726a..590c37925084 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -69,18 +69,17 @@ static void populate_seccomp_data(struct seccomp_data *sd)
69{ 69{
70 struct task_struct *task = current; 70 struct task_struct *task = current;
71 struct pt_regs *regs = task_pt_regs(task); 71 struct pt_regs *regs = task_pt_regs(task);
72 unsigned long args[6];
72 73
73 sd->nr = syscall_get_nr(task, regs); 74 sd->nr = syscall_get_nr(task, regs);
74 sd->arch = syscall_get_arch(); 75 sd->arch = syscall_get_arch();
75 76 syscall_get_arguments(task, regs, 0, 6, args);
76 /* Unroll syscall_get_args to help gcc on arm. */ 77 sd->args[0] = args[0];
77 syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]); 78 sd->args[1] = args[1];
78 syscall_get_arguments(task, regs, 1, 1, (unsigned long *) &sd->args[1]); 79 sd->args[2] = args[2];
79 syscall_get_arguments(task, regs, 2, 1, (unsigned long *) &sd->args[2]); 80 sd->args[3] = args[3];
80 syscall_get_arguments(task, regs, 3, 1, (unsigned long *) &sd->args[3]); 81 sd->args[4] = args[4];
81 syscall_get_arguments(task, regs, 4, 1, (unsigned long *) &sd->args[4]); 82 sd->args[5] = args[5];
82 syscall_get_arguments(task, regs, 5, 1, (unsigned long *) &sd->args[5]);
83
84 sd->instruction_pointer = KSTK_EIP(task); 83 sd->instruction_pointer = KSTK_EIP(task);
85} 84}
86 85
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 015661279b68..0a0608edeb26 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -276,7 +276,7 @@ static bool tick_check_preferred(struct clock_event_device *curdev,
276bool tick_check_replacement(struct clock_event_device *curdev, 276bool tick_check_replacement(struct clock_event_device *curdev,
277 struct clock_event_device *newdev) 277 struct clock_event_device *newdev)
278{ 278{
279 if (tick_check_percpu(curdev, newdev, smp_processor_id())) 279 if (!tick_check_percpu(curdev, newdev, smp_processor_id()))
280 return false; 280 return false;
281 281
282 return tick_check_preferred(curdev, newdev); 282 return tick_check_preferred(curdev, newdev);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 9f8af69c67ec..6558b7ac112d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -84,6 +84,9 @@ static void tick_do_update_jiffies64(ktime_t now)
84 84
85 /* Keep the tick_next_period variable up to date */ 85 /* Keep the tick_next_period variable up to date */
86 tick_next_period = ktime_add(last_jiffies_update, tick_period); 86 tick_next_period = ktime_add(last_jiffies_update, tick_period);
87 } else {
88 write_sequnlock(&jiffies_lock);
89 return;
87 } 90 }
88 write_sequnlock(&jiffies_lock); 91 write_sequnlock(&jiffies_lock);
89 update_wall_time(); 92 update_wall_time();
@@ -967,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void)
967 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 970 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
968 ktime_t next; 971 ktime_t next;
969 972
970 if (!tick_nohz_active) 973 if (!tick_nohz_enabled)
971 return; 974 return;
972 975
973 local_irq_disable(); 976 local_irq_disable();
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 5b781d2be383..ffd56351b521 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -58,12 +58,16 @@ int ftrace_create_function_files(struct trace_array *tr,
58{ 58{
59 int ret; 59 int ret;
60 60
61 /* The top level array uses the "global_ops". */ 61 /*
62 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) { 62 * The top level array uses the "global_ops", and the files are
63 ret = allocate_ftrace_ops(tr); 63 * created on boot up.
64 if (ret) 64 */
65 return ret; 65 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
66 } 66 return 0;
67
68 ret = allocate_ftrace_ops(tr);
69 if (ret)
70 return ret;
67 71
68 ftrace_create_filter_files(tr->ops, parent); 72 ftrace_create_filter_files(tr->ops, parent);
69 73
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 930e51462dc8..c082a7441345 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -732,9 +732,15 @@ static int uprobe_buffer_enable(void)
732 732
733static void uprobe_buffer_disable(void) 733static void uprobe_buffer_disable(void)
734{ 734{
735 int cpu;
736
735 BUG_ON(!mutex_is_locked(&event_mutex)); 737 BUG_ON(!mutex_is_locked(&event_mutex));
736 738
737 if (--uprobe_buffer_refcnt == 0) { 739 if (--uprobe_buffer_refcnt == 0) {
740 for_each_possible_cpu(cpu)
741 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
742 cpu)->buf);
743
738 free_percpu(uprobe_cpu_buffer); 744 free_percpu(uprobe_cpu_buffer);
739 uprobe_cpu_buffer = NULL; 745 uprobe_cpu_buffer = NULL;
740 } 746 }
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 0d8f6023fd8d..bf71b4b2d632 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -152,7 +152,7 @@ static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
152 152
153 /* Find the matching extent */ 153 /* Find the matching extent */
154 extents = map->nr_extents; 154 extents = map->nr_extents;
155 smp_read_barrier_depends(); 155 smp_rmb();
156 for (idx = 0; idx < extents; idx++) { 156 for (idx = 0; idx < extents; idx++) {
157 first = map->extent[idx].first; 157 first = map->extent[idx].first;
158 last = first + map->extent[idx].count - 1; 158 last = first + map->extent[idx].count - 1;
@@ -176,7 +176,7 @@ static u32 map_id_down(struct uid_gid_map *map, u32 id)
176 176
177 /* Find the matching extent */ 177 /* Find the matching extent */
178 extents = map->nr_extents; 178 extents = map->nr_extents;
179 smp_read_barrier_depends(); 179 smp_rmb();
180 for (idx = 0; idx < extents; idx++) { 180 for (idx = 0; idx < extents; idx++) {
181 first = map->extent[idx].first; 181 first = map->extent[idx].first;
182 last = first + map->extent[idx].count - 1; 182 last = first + map->extent[idx].count - 1;
@@ -199,7 +199,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id)
199 199
200 /* Find the matching extent */ 200 /* Find the matching extent */
201 extents = map->nr_extents; 201 extents = map->nr_extents;
202 smp_read_barrier_depends(); 202 smp_rmb();
203 for (idx = 0; idx < extents; idx++) { 203 for (idx = 0; idx < extents; idx++) {
204 first = map->extent[idx].lower_first; 204 first = map->extent[idx].lower_first;
205 last = first + map->extent[idx].count - 1; 205 last = first + map->extent[idx].count - 1;
@@ -615,9 +615,8 @@ static ssize_t map_write(struct file *file, const char __user *buf,
615 * were written before the count of the extents. 615 * were written before the count of the extents.
616 * 616 *
617 * To achieve this smp_wmb() is used on guarantee the write 617 * To achieve this smp_wmb() is used on guarantee the write
618 * order and smp_read_barrier_depends() is guaranteed that we 618 * order and smp_rmb() is guaranteed that we don't have crazy
619 * don't have crazy architectures returning stale data. 619 * architectures returning stale data.
620 *
621 */ 620 */
622 mutex_lock(&id_map_mutex); 621 mutex_lock(&id_map_mutex);
623 622