diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 7 | ||||
-rw-r--r-- | kernel/cred.c | 2 | ||||
-rw-r--r-- | kernel/futex.c | 30 | ||||
-rw-r--r-- | kernel/hw_breakpoint.c | 2 | ||||
-rw-r--r-- | kernel/kfifo.c | 3 | ||||
-rw-r--r-- | kernel/kgdb.c | 6 | ||||
-rw-r--r-- | kernel/perf_event.c | 2 | ||||
-rw-r--r-- | kernel/softirq.c | 15 | ||||
-rw-r--r-- | kernel/softlockup.c | 15 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 24 |
12 files changed, 87 insertions, 23 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 1fbcc748044a..aa3bee566446 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -2936,14 +2936,17 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
2936 | 2936 | ||
2937 | for_each_subsys(root, ss) { | 2937 | for_each_subsys(root, ss) { |
2938 | struct cgroup_subsys_state *css = ss->create(ss, cgrp); | 2938 | struct cgroup_subsys_state *css = ss->create(ss, cgrp); |
2939 | |||
2939 | if (IS_ERR(css)) { | 2940 | if (IS_ERR(css)) { |
2940 | err = PTR_ERR(css); | 2941 | err = PTR_ERR(css); |
2941 | goto err_destroy; | 2942 | goto err_destroy; |
2942 | } | 2943 | } |
2943 | init_cgroup_css(css, ss, cgrp); | 2944 | init_cgroup_css(css, ss, cgrp); |
2944 | if (ss->use_id) | 2945 | if (ss->use_id) { |
2945 | if (alloc_css_id(ss, parent, cgrp)) | 2946 | err = alloc_css_id(ss, parent, cgrp); |
2947 | if (err) | ||
2946 | goto err_destroy; | 2948 | goto err_destroy; |
2949 | } | ||
2947 | /* At error, ->destroy() callback has to free assigned ID. */ | 2950 | /* At error, ->destroy() callback has to free assigned ID. */ |
2948 | } | 2951 | } |
2949 | 2952 | ||
diff --git a/kernel/cred.c b/kernel/cred.c index dd76cfe5f5b0..1ed8ca18790c 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -224,7 +224,7 @@ struct cred *cred_alloc_blank(void) | |||
224 | #ifdef CONFIG_KEYS | 224 | #ifdef CONFIG_KEYS |
225 | new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL); | 225 | new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL); |
226 | if (!new->tgcred) { | 226 | if (!new->tgcred) { |
227 | kfree(new); | 227 | kmem_cache_free(cred_jar, new); |
228 | return NULL; | 228 | return NULL; |
229 | } | 229 | } |
230 | atomic_set(&new->tgcred->usage, 1); | 230 | atomic_set(&new->tgcred->usage, 1); |
diff --git a/kernel/futex.c b/kernel/futex.c index d9b3a2228f9d..e7a35f1039e7 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -530,8 +530,25 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | |||
530 | return -EINVAL; | 530 | return -EINVAL; |
531 | 531 | ||
532 | WARN_ON(!atomic_read(&pi_state->refcount)); | 532 | WARN_ON(!atomic_read(&pi_state->refcount)); |
533 | WARN_ON(pid && pi_state->owner && | 533 | |
534 | pi_state->owner->pid != pid); | 534 | /* |
535 | * When pi_state->owner is NULL then the owner died | ||
536 | * and another waiter is on the fly. pi_state->owner | ||
537 | * is fixed up by the task which acquires | ||
538 | * pi_state->rt_mutex. | ||
539 | * | ||
540 | * We do not check for pid == 0 which can happen when | ||
541 | * the owner died and robust_list_exit() cleared the | ||
542 | * TID. | ||
543 | */ | ||
544 | if (pid && pi_state->owner) { | ||
545 | /* | ||
546 | * Bail out if user space manipulated the | ||
547 | * futex value. | ||
548 | */ | ||
549 | if (pid != task_pid_vnr(pi_state->owner)) | ||
550 | return -EINVAL; | ||
551 | } | ||
535 | 552 | ||
536 | atomic_inc(&pi_state->refcount); | 553 | atomic_inc(&pi_state->refcount); |
537 | *ps = pi_state; | 554 | *ps = pi_state; |
@@ -758,6 +775,13 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) | |||
758 | if (!pi_state) | 775 | if (!pi_state) |
759 | return -EINVAL; | 776 | return -EINVAL; |
760 | 777 | ||
778 | /* | ||
779 | * If current does not own the pi_state then the futex is | ||
780 | * inconsistent and user space fiddled with the futex value. | ||
781 | */ | ||
782 | if (pi_state->owner != current) | ||
783 | return -EINVAL; | ||
784 | |||
761 | raw_spin_lock(&pi_state->pi_mutex.wait_lock); | 785 | raw_spin_lock(&pi_state->pi_mutex.wait_lock); |
762 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); | 786 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); |
763 | 787 | ||
@@ -1971,7 +1995,7 @@ retry_private: | |||
1971 | /* Unqueue and drop the lock */ | 1995 | /* Unqueue and drop the lock */ |
1972 | unqueue_me_pi(&q); | 1996 | unqueue_me_pi(&q); |
1973 | 1997 | ||
1974 | goto out; | 1998 | goto out_put_key; |
1975 | 1999 | ||
1976 | out_unlock_put_key: | 2000 | out_unlock_put_key: |
1977 | queue_unlock(&q, hb); | 2001 | queue_unlock(&q, hb); |
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 8a5c7d55ac9f..967e66143e11 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -360,8 +360,8 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | |||
360 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) | 360 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
361 | { | 361 | { |
362 | u64 old_addr = bp->attr.bp_addr; | 362 | u64 old_addr = bp->attr.bp_addr; |
363 | u64 old_len = bp->attr.bp_len; | ||
363 | int old_type = bp->attr.bp_type; | 364 | int old_type = bp->attr.bp_type; |
364 | int old_len = bp->attr.bp_len; | ||
365 | int err = 0; | 365 | int err = 0; |
366 | 366 | ||
367 | perf_event_disable(bp); | 367 | perf_event_disable(bp); |
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 32c5c15d750d..498cabba225e 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
@@ -349,6 +349,7 @@ EXPORT_SYMBOL(__kfifo_from_user_n); | |||
349 | * @fifo: the fifo to be used. | 349 | * @fifo: the fifo to be used. |
350 | * @from: pointer to the data to be added. | 350 | * @from: pointer to the data to be added. |
351 | * @len: the length of the data to be added. | 351 | * @len: the length of the data to be added. |
352 | * @total: the actual returned data length. | ||
352 | * | 353 | * |
353 | * This function copies at most @len bytes from the @from into the | 354 | * This function copies at most @len bytes from the @from into the |
354 | * FIFO depending and returns -EFAULT/0. | 355 | * FIFO depending and returns -EFAULT/0. |
@@ -399,7 +400,7 @@ EXPORT_SYMBOL(__kfifo_to_user_n); | |||
399 | * @fifo: the fifo to be used. | 400 | * @fifo: the fifo to be used. |
400 | * @to: where the data must be copied. | 401 | * @to: where the data must be copied. |
401 | * @len: the size of the destination buffer. | 402 | * @len: the size of the destination buffer. |
402 | @ @lenout: pointer to output variable with copied data | 403 | * @lenout: pointer to output variable with copied data |
403 | * | 404 | * |
404 | * This function copies at most @len bytes from the FIFO into the | 405 | * This function copies at most @len bytes from the FIFO into the |
405 | * @to buffer and 0 or -EFAULT. | 406 | * @to buffer and 0 or -EFAULT. |
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index c7ade62e4ef0..761fdd2b3034 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
@@ -599,7 +599,7 @@ static void kgdb_wait(struct pt_regs *regs) | |||
599 | 599 | ||
600 | /* Signal the primary CPU that we are done: */ | 600 | /* Signal the primary CPU that we are done: */ |
601 | atomic_set(&cpu_in_kgdb[cpu], 0); | 601 | atomic_set(&cpu_in_kgdb[cpu], 0); |
602 | touch_softlockup_watchdog(); | 602 | touch_softlockup_watchdog_sync(); |
603 | clocksource_touch_watchdog(); | 603 | clocksource_touch_watchdog(); |
604 | local_irq_restore(flags); | 604 | local_irq_restore(flags); |
605 | } | 605 | } |
@@ -1453,7 +1453,7 @@ acquirelock: | |||
1453 | (kgdb_info[cpu].task && | 1453 | (kgdb_info[cpu].task && |
1454 | kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { | 1454 | kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { |
1455 | atomic_set(&kgdb_active, -1); | 1455 | atomic_set(&kgdb_active, -1); |
1456 | touch_softlockup_watchdog(); | 1456 | touch_softlockup_watchdog_sync(); |
1457 | clocksource_touch_watchdog(); | 1457 | clocksource_touch_watchdog(); |
1458 | local_irq_restore(flags); | 1458 | local_irq_restore(flags); |
1459 | 1459 | ||
@@ -1553,7 +1553,7 @@ kgdb_restore: | |||
1553 | } | 1553 | } |
1554 | /* Free kgdb_active */ | 1554 | /* Free kgdb_active */ |
1555 | atomic_set(&kgdb_active, -1); | 1555 | atomic_set(&kgdb_active, -1); |
1556 | touch_softlockup_watchdog(); | 1556 | touch_softlockup_watchdog_sync(); |
1557 | clocksource_touch_watchdog(); | 1557 | clocksource_touch_watchdog(); |
1558 | local_irq_restore(flags); | 1558 | local_irq_restore(flags); |
1559 | 1559 | ||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index d27746bd3a06..2b19297742cb 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -4580,7 +4580,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, | |||
4580 | if (attr->type >= PERF_TYPE_MAX) | 4580 | if (attr->type >= PERF_TYPE_MAX) |
4581 | return -EINVAL; | 4581 | return -EINVAL; |
4582 | 4582 | ||
4583 | if (attr->__reserved_1 || attr->__reserved_2) | 4583 | if (attr->__reserved_1) |
4584 | return -EINVAL; | 4584 | return -EINVAL; |
4585 | 4585 | ||
4586 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) | 4586 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) |
diff --git a/kernel/softirq.c b/kernel/softirq.c index a09502e2ef75..7c1a67ef0274 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -500,22 +500,17 @@ EXPORT_SYMBOL(tasklet_kill); | |||
500 | */ | 500 | */ |
501 | 501 | ||
502 | /* | 502 | /* |
503 | * The trampoline is called when the hrtimer expires. If this is | 503 | * The trampoline is called when the hrtimer expires. It schedules a tasklet |
504 | * called from the hrtimer interrupt then we schedule the tasklet as | 504 | * to run __tasklet_hrtimer_trampoline() which in turn will call the intended |
505 | * the timer callback function expects to run in softirq context. If | 505 | * hrtimer callback, but from softirq context. |
506 | * it's called in softirq context anyway (i.e. high resolution timers | ||
507 | * disabled) then the hrtimer callback is called right away. | ||
508 | */ | 506 | */ |
509 | static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) | 507 | static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) |
510 | { | 508 | { |
511 | struct tasklet_hrtimer *ttimer = | 509 | struct tasklet_hrtimer *ttimer = |
512 | container_of(timer, struct tasklet_hrtimer, timer); | 510 | container_of(timer, struct tasklet_hrtimer, timer); |
513 | 511 | ||
514 | if (hrtimer_is_hres_active(timer)) { | 512 | tasklet_hi_schedule(&ttimer->tasklet); |
515 | tasklet_hi_schedule(&ttimer->tasklet); | 513 | return HRTIMER_NORESTART; |
516 | return HRTIMER_NORESTART; | ||
517 | } | ||
518 | return ttimer->function(timer); | ||
519 | } | 514 | } |
520 | 515 | ||
521 | /* | 516 | /* |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index d22579087e27..0d4c7898ab80 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -25,6 +25,7 @@ static DEFINE_SPINLOCK(print_lock); | |||
25 | static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ | 25 | static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ |
26 | static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ | 26 | static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ |
27 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); | 27 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); |
28 | static DEFINE_PER_CPU(bool, softlock_touch_sync); | ||
28 | 29 | ||
29 | static int __read_mostly did_panic; | 30 | static int __read_mostly did_panic; |
30 | int __read_mostly softlockup_thresh = 60; | 31 | int __read_mostly softlockup_thresh = 60; |
@@ -79,6 +80,12 @@ void touch_softlockup_watchdog(void) | |||
79 | } | 80 | } |
80 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 81 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
81 | 82 | ||
83 | void touch_softlockup_watchdog_sync(void) | ||
84 | { | ||
85 | __raw_get_cpu_var(softlock_touch_sync) = true; | ||
86 | __raw_get_cpu_var(softlockup_touch_ts) = 0; | ||
87 | } | ||
88 | |||
82 | void touch_all_softlockup_watchdogs(void) | 89 | void touch_all_softlockup_watchdogs(void) |
83 | { | 90 | { |
84 | int cpu; | 91 | int cpu; |
@@ -118,6 +125,14 @@ void softlockup_tick(void) | |||
118 | } | 125 | } |
119 | 126 | ||
120 | if (touch_ts == 0) { | 127 | if (touch_ts == 0) { |
128 | if (unlikely(per_cpu(softlock_touch_sync, this_cpu))) { | ||
129 | /* | ||
130 | * If the time stamp was touched atomically | ||
131 | * make sure the scheduler tick is up to date. | ||
132 | */ | ||
133 | per_cpu(softlock_touch_sync, this_cpu) = false; | ||
134 | sched_clock_tick(); | ||
135 | } | ||
121 | __touch_softlockup_watchdog(); | 136 | __touch_softlockup_watchdog(); |
122 | return; | 137 | return; |
123 | } | 138 | } |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 7faaa32fbf4f..e2ab064c6d41 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -880,6 +880,7 @@ void getboottime(struct timespec *ts) | |||
880 | 880 | ||
881 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); | 881 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); |
882 | } | 882 | } |
883 | EXPORT_SYMBOL_GPL(getboottime); | ||
883 | 884 | ||
884 | /** | 885 | /** |
885 | * monotonic_to_bootbased - Convert the monotonic time to boot based. | 886 | * monotonic_to_bootbased - Convert the monotonic time to boot based. |
@@ -889,6 +890,7 @@ void monotonic_to_bootbased(struct timespec *ts) | |||
889 | { | 890 | { |
890 | *ts = timespec_add_safe(*ts, total_sleep_time); | 891 | *ts = timespec_add_safe(*ts, total_sleep_time); |
891 | } | 892 | } |
893 | EXPORT_SYMBOL_GPL(monotonic_to_bootbased); | ||
892 | 894 | ||
893 | unsigned long get_seconds(void) | 895 | unsigned long get_seconds(void) |
894 | { | 896 | { |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 6ea90c0e2c96..50b1b8239806 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -689,7 +689,7 @@ static int create_trace_probe(int argc, char **argv) | |||
689 | return -EINVAL; | 689 | return -EINVAL; |
690 | } | 690 | } |
691 | /* an address specified */ | 691 | /* an address specified */ |
692 | ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr); | 692 | ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr); |
693 | if (ret) { | 693 | if (ret) { |
694 | pr_info("Failed to parse address.\n"); | 694 | pr_info("Failed to parse address.\n"); |
695 | return ret; | 695 | return ret; |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 678a5120ee30..f4bc9b27de5f 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -157,6 +157,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
157 | unsigned long val, flags; | 157 | unsigned long val, flags; |
158 | char buf[64]; | 158 | char buf[64]; |
159 | int ret; | 159 | int ret; |
160 | int cpu; | ||
160 | 161 | ||
161 | if (count >= sizeof(buf)) | 162 | if (count >= sizeof(buf)) |
162 | return -EINVAL; | 163 | return -EINVAL; |
@@ -171,9 +172,20 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
171 | return ret; | 172 | return ret; |
172 | 173 | ||
173 | local_irq_save(flags); | 174 | local_irq_save(flags); |
175 | |||
176 | /* | ||
177 | * In case we trace inside arch_spin_lock() or after (NMI), | ||
178 | * we will cause circular lock, so we also need to increase | ||
179 | * the percpu trace_active here. | ||
180 | */ | ||
181 | cpu = smp_processor_id(); | ||
182 | per_cpu(trace_active, cpu)++; | ||
183 | |||
174 | arch_spin_lock(&max_stack_lock); | 184 | arch_spin_lock(&max_stack_lock); |
175 | *ptr = val; | 185 | *ptr = val; |
176 | arch_spin_unlock(&max_stack_lock); | 186 | arch_spin_unlock(&max_stack_lock); |
187 | |||
188 | per_cpu(trace_active, cpu)--; | ||
177 | local_irq_restore(flags); | 189 | local_irq_restore(flags); |
178 | 190 | ||
179 | return count; | 191 | return count; |
@@ -206,7 +218,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
206 | 218 | ||
207 | static void *t_start(struct seq_file *m, loff_t *pos) | 219 | static void *t_start(struct seq_file *m, loff_t *pos) |
208 | { | 220 | { |
221 | int cpu; | ||
222 | |||
209 | local_irq_disable(); | 223 | local_irq_disable(); |
224 | |||
225 | cpu = smp_processor_id(); | ||
226 | per_cpu(trace_active, cpu)++; | ||
227 | |||
210 | arch_spin_lock(&max_stack_lock); | 228 | arch_spin_lock(&max_stack_lock); |
211 | 229 | ||
212 | if (*pos == 0) | 230 | if (*pos == 0) |
@@ -217,7 +235,13 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
217 | 235 | ||
218 | static void t_stop(struct seq_file *m, void *p) | 236 | static void t_stop(struct seq_file *m, void *p) |
219 | { | 237 | { |
238 | int cpu; | ||
239 | |||
220 | arch_spin_unlock(&max_stack_lock); | 240 | arch_spin_unlock(&max_stack_lock); |
241 | |||
242 | cpu = smp_processor_id(); | ||
243 | per_cpu(trace_active, cpu)--; | ||
244 | |||
221 | local_irq_enable(); | 245 | local_irq_enable(); |
222 | } | 246 | } |
223 | 247 | ||