diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-11-01 03:10:58 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-11-01 03:24:41 -0400 |
commit | fb10d5b7efbcc0aa9e46a9aa5ad86772c7bacb9a (patch) | |
tree | ea284fe7b9c17a85b8d3c4ba999d6e26d51a12f6 /kernel | |
parent | f9f9ffc237dd924f048204e8799da74f9ecf40cf (diff) | |
parent | 52469b4fcd4fc433ffc78cec4cf94368e9052890 (diff) |
Merge branch 'linus' into sched/core
Resolve cherry-picking conflicts:
Conflicts:
mm/huge_memory.c
mm/memory.c
mm/mprotect.c
See this upstream merge commit for more details:
52469b4fcd4f Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 14 | ||||
-rw-r--r-- | kernel/events/core.c | 4 | ||||
-rw-r--r-- | kernel/mutex.c | 32 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 2 | ||||
-rw-r--r-- | kernel/time/clockevents.c | 65 |
5 files changed, 77 insertions, 40 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2418b6e71a85..8bd9cfdc70d7 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -2039,7 +2039,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | |||
2039 | 2039 | ||
2040 | /* @tsk either already exited or can't exit until the end */ | 2040 | /* @tsk either already exited or can't exit until the end */ |
2041 | if (tsk->flags & PF_EXITING) | 2041 | if (tsk->flags & PF_EXITING) |
2042 | continue; | 2042 | goto next; |
2043 | 2043 | ||
2044 | /* as per above, nr_threads may decrease, but not increase. */ | 2044 | /* as per above, nr_threads may decrease, but not increase. */ |
2045 | BUG_ON(i >= group_size); | 2045 | BUG_ON(i >= group_size); |
@@ -2047,7 +2047,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | |||
2047 | ent.cgrp = task_cgroup_from_root(tsk, root); | 2047 | ent.cgrp = task_cgroup_from_root(tsk, root); |
2048 | /* nothing to do if this task is already in the cgroup */ | 2048 | /* nothing to do if this task is already in the cgroup */ |
2049 | if (ent.cgrp == cgrp) | 2049 | if (ent.cgrp == cgrp) |
2050 | continue; | 2050 | goto next; |
2051 | /* | 2051 | /* |
2052 | * saying GFP_ATOMIC has no effect here because we did prealloc | 2052 | * saying GFP_ATOMIC has no effect here because we did prealloc |
2053 | * earlier, but it's good form to communicate our expectations. | 2053 | * earlier, but it's good form to communicate our expectations. |
@@ -2055,7 +2055,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | |||
2055 | retval = flex_array_put(group, i, &ent, GFP_ATOMIC); | 2055 | retval = flex_array_put(group, i, &ent, GFP_ATOMIC); |
2056 | BUG_ON(retval != 0); | 2056 | BUG_ON(retval != 0); |
2057 | i++; | 2057 | i++; |
2058 | 2058 | next: | |
2059 | if (!threadgroup) | 2059 | if (!threadgroup) |
2060 | break; | 2060 | break; |
2061 | } while_each_thread(leader, tsk); | 2061 | } while_each_thread(leader, tsk); |
@@ -3188,11 +3188,9 @@ css_next_descendant_post(struct cgroup_subsys_state *pos, | |||
3188 | 3188 | ||
3189 | WARN_ON_ONCE(!rcu_read_lock_held()); | 3189 | WARN_ON_ONCE(!rcu_read_lock_held()); |
3190 | 3190 | ||
3191 | /* if first iteration, visit the leftmost descendant */ | 3191 | /* if first iteration, visit leftmost descendant which may be @root */ |
3192 | if (!pos) { | 3192 | if (!pos) |
3193 | next = css_leftmost_descendant(root); | 3193 | return css_leftmost_descendant(root); |
3194 | return next != root ? next : NULL; | ||
3195 | } | ||
3196 | 3194 | ||
3197 | /* if we visited @root, we're done */ | 3195 | /* if we visited @root, we're done */ |
3198 | if (pos == root) | 3196 | if (pos == root) |
diff --git a/kernel/events/core.c b/kernel/events/core.c index d49a9d29334c..953c14348375 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -6767,6 +6767,10 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, | |||
6767 | if (ret) | 6767 | if (ret) |
6768 | return -EFAULT; | 6768 | return -EFAULT; |
6769 | 6769 | ||
6770 | /* disabled for now */ | ||
6771 | if (attr->mmap2) | ||
6772 | return -EINVAL; | ||
6773 | |||
6770 | if (attr->__reserved_1) | 6774 | if (attr->__reserved_1) |
6771 | return -EINVAL; | 6775 | return -EINVAL; |
6772 | 6776 | ||
diff --git a/kernel/mutex.c b/kernel/mutex.c index 6d647aedffea..d24105b1b794 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -410,7 +410,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, | |||
410 | static __always_inline int __sched | 410 | static __always_inline int __sched |
411 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | 411 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
412 | struct lockdep_map *nest_lock, unsigned long ip, | 412 | struct lockdep_map *nest_lock, unsigned long ip, |
413 | struct ww_acquire_ctx *ww_ctx) | 413 | struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) |
414 | { | 414 | { |
415 | struct task_struct *task = current; | 415 | struct task_struct *task = current; |
416 | struct mutex_waiter waiter; | 416 | struct mutex_waiter waiter; |
@@ -450,7 +450,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
450 | struct task_struct *owner; | 450 | struct task_struct *owner; |
451 | struct mspin_node node; | 451 | struct mspin_node node; |
452 | 452 | ||
453 | if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { | 453 | if (use_ww_ctx && ww_ctx->acquired > 0) { |
454 | struct ww_mutex *ww; | 454 | struct ww_mutex *ww; |
455 | 455 | ||
456 | ww = container_of(lock, struct ww_mutex, base); | 456 | ww = container_of(lock, struct ww_mutex, base); |
@@ -480,7 +480,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
480 | if ((atomic_read(&lock->count) == 1) && | 480 | if ((atomic_read(&lock->count) == 1) && |
481 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { | 481 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { |
482 | lock_acquired(&lock->dep_map, ip); | 482 | lock_acquired(&lock->dep_map, ip); |
483 | if (!__builtin_constant_p(ww_ctx == NULL)) { | 483 | if (use_ww_ctx) { |
484 | struct ww_mutex *ww; | 484 | struct ww_mutex *ww; |
485 | ww = container_of(lock, struct ww_mutex, base); | 485 | ww = container_of(lock, struct ww_mutex, base); |
486 | 486 | ||
@@ -551,7 +551,7 @@ slowpath: | |||
551 | goto err; | 551 | goto err; |
552 | } | 552 | } |
553 | 553 | ||
554 | if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { | 554 | if (use_ww_ctx && ww_ctx->acquired > 0) { |
555 | ret = __mutex_lock_check_stamp(lock, ww_ctx); | 555 | ret = __mutex_lock_check_stamp(lock, ww_ctx); |
556 | if (ret) | 556 | if (ret) |
557 | goto err; | 557 | goto err; |
@@ -575,7 +575,7 @@ skip_wait: | |||
575 | lock_acquired(&lock->dep_map, ip); | 575 | lock_acquired(&lock->dep_map, ip); |
576 | mutex_set_owner(lock); | 576 | mutex_set_owner(lock); |
577 | 577 | ||
578 | if (!__builtin_constant_p(ww_ctx == NULL)) { | 578 | if (use_ww_ctx) { |
579 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); | 579 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); |
580 | struct mutex_waiter *cur; | 580 | struct mutex_waiter *cur; |
581 | 581 | ||
@@ -615,7 +615,7 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass) | |||
615 | { | 615 | { |
616 | might_sleep(); | 616 | might_sleep(); |
617 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, | 617 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
618 | subclass, NULL, _RET_IP_, NULL); | 618 | subclass, NULL, _RET_IP_, NULL, 0); |
619 | } | 619 | } |
620 | 620 | ||
621 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | 621 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
@@ -625,7 +625,7 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) | |||
625 | { | 625 | { |
626 | might_sleep(); | 626 | might_sleep(); |
627 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, | 627 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
628 | 0, nest, _RET_IP_, NULL); | 628 | 0, nest, _RET_IP_, NULL, 0); |
629 | } | 629 | } |
630 | 630 | ||
631 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); | 631 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
@@ -635,7 +635,7 @@ mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) | |||
635 | { | 635 | { |
636 | might_sleep(); | 636 | might_sleep(); |
637 | return __mutex_lock_common(lock, TASK_KILLABLE, | 637 | return __mutex_lock_common(lock, TASK_KILLABLE, |
638 | subclass, NULL, _RET_IP_, NULL); | 638 | subclass, NULL, _RET_IP_, NULL, 0); |
639 | } | 639 | } |
640 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | 640 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); |
641 | 641 | ||
@@ -644,7 +644,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | |||
644 | { | 644 | { |
645 | might_sleep(); | 645 | might_sleep(); |
646 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, | 646 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
647 | subclass, NULL, _RET_IP_, NULL); | 647 | subclass, NULL, _RET_IP_, NULL, 0); |
648 | } | 648 | } |
649 | 649 | ||
650 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | 650 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
@@ -682,7 +682,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |||
682 | 682 | ||
683 | might_sleep(); | 683 | might_sleep(); |
684 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, | 684 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, |
685 | 0, &ctx->dep_map, _RET_IP_, ctx); | 685 | 0, &ctx->dep_map, _RET_IP_, ctx, 1); |
686 | if (!ret && ctx->acquired > 1) | 686 | if (!ret && ctx->acquired > 1) |
687 | return ww_mutex_deadlock_injection(lock, ctx); | 687 | return ww_mutex_deadlock_injection(lock, ctx); |
688 | 688 | ||
@@ -697,7 +697,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |||
697 | 697 | ||
698 | might_sleep(); | 698 | might_sleep(); |
699 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, | 699 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, |
700 | 0, &ctx->dep_map, _RET_IP_, ctx); | 700 | 0, &ctx->dep_map, _RET_IP_, ctx, 1); |
701 | 701 | ||
702 | if (!ret && ctx->acquired > 1) | 702 | if (!ret && ctx->acquired > 1) |
703 | return ww_mutex_deadlock_injection(lock, ctx); | 703 | return ww_mutex_deadlock_injection(lock, ctx); |
@@ -809,28 +809,28 @@ __mutex_lock_slowpath(atomic_t *lock_count) | |||
809 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 809 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
810 | 810 | ||
811 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, | 811 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, |
812 | NULL, _RET_IP_, NULL); | 812 | NULL, _RET_IP_, NULL, 0); |
813 | } | 813 | } |
814 | 814 | ||
815 | static noinline int __sched | 815 | static noinline int __sched |
816 | __mutex_lock_killable_slowpath(struct mutex *lock) | 816 | __mutex_lock_killable_slowpath(struct mutex *lock) |
817 | { | 817 | { |
818 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, | 818 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, |
819 | NULL, _RET_IP_, NULL); | 819 | NULL, _RET_IP_, NULL, 0); |
820 | } | 820 | } |
821 | 821 | ||
822 | static noinline int __sched | 822 | static noinline int __sched |
823 | __mutex_lock_interruptible_slowpath(struct mutex *lock) | 823 | __mutex_lock_interruptible_slowpath(struct mutex *lock) |
824 | { | 824 | { |
825 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, | 825 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, |
826 | NULL, _RET_IP_, NULL); | 826 | NULL, _RET_IP_, NULL, 0); |
827 | } | 827 | } |
828 | 828 | ||
829 | static noinline int __sched | 829 | static noinline int __sched |
830 | __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | 830 | __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
831 | { | 831 | { |
832 | return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, | 832 | return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, |
833 | NULL, _RET_IP_, ctx); | 833 | NULL, _RET_IP_, ctx, 1); |
834 | } | 834 | } |
835 | 835 | ||
836 | static noinline int __sched | 836 | static noinline int __sched |
@@ -838,7 +838,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, | |||
838 | struct ww_acquire_ctx *ctx) | 838 | struct ww_acquire_ctx *ctx) |
839 | { | 839 | { |
840 | return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, | 840 | return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, |
841 | NULL, _RET_IP_, ctx); | 841 | NULL, _RET_IP_, ctx, 1); |
842 | } | 842 | } |
843 | 843 | ||
844 | #endif | 844 | #endif |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index c9c759d5a15c..0121dab83f43 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -846,7 +846,7 @@ static int software_resume(void) | |||
846 | goto Finish; | 846 | goto Finish; |
847 | } | 847 | } |
848 | 848 | ||
849 | late_initcall(software_resume); | 849 | late_initcall_sync(software_resume); |
850 | 850 | ||
851 | 851 | ||
852 | static const char * const hibernation_modes[] = { | 852 | static const char * const hibernation_modes[] = { |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 38959c866789..662c5798a685 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -33,29 +33,64 @@ struct ce_unbind { | |||
33 | int res; | 33 | int res; |
34 | }; | 34 | }; |
35 | 35 | ||
36 | /** | 36 | static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, |
37 | * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds | 37 | bool ismax) |
38 | * @latch: value to convert | ||
39 | * @evt: pointer to clock event device descriptor | ||
40 | * | ||
41 | * Math helper, returns latch value converted to nanoseconds (bound checked) | ||
42 | */ | ||
43 | u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) | ||
44 | { | 38 | { |
45 | u64 clc = (u64) latch << evt->shift; | 39 | u64 clc = (u64) latch << evt->shift; |
40 | u64 rnd; | ||
46 | 41 | ||
47 | if (unlikely(!evt->mult)) { | 42 | if (unlikely(!evt->mult)) { |
48 | evt->mult = 1; | 43 | evt->mult = 1; |
49 | WARN_ON(1); | 44 | WARN_ON(1); |
50 | } | 45 | } |
46 | rnd = (u64) evt->mult - 1; | ||
47 | |||
48 | /* | ||
49 | * Upper bound sanity check. If the backwards conversion is | ||
50 | * not equal latch, we know that the above shift overflowed. | ||
51 | */ | ||
52 | if ((clc >> evt->shift) != (u64)latch) | ||
53 | clc = ~0ULL; | ||
54 | |||
55 | /* | ||
56 | * Scaled math oddities: | ||
57 | * | ||
58 | * For mult <= (1 << shift) we can safely add mult - 1 to | ||
59 | * prevent integer rounding loss. So the backwards conversion | ||
60 | * from nsec to device ticks will be correct. | ||
61 | * | ||
62 | * For mult > (1 << shift), i.e. device frequency is > 1GHz we | ||
63 | * need to be careful. Adding mult - 1 will result in a value | ||
64 | * which when converted back to device ticks can be larger | ||
65 | * than latch by up to (mult - 1) >> shift. For the min_delta | ||
66 | * calculation we still want to apply this in order to stay | ||
67 | * above the minimum device ticks limit. For the upper limit | ||
68 | * we would end up with a latch value larger than the upper | ||
69 | * limit of the device, so we omit the add to stay below the | ||
70 | * device upper boundary. | ||
71 | * | ||
72 | * Also omit the add if it would overflow the u64 boundary. | ||
73 | */ | ||
74 | if ((~0ULL - clc > rnd) && | ||
75 | (!ismax || evt->mult <= (1U << evt->shift))) | ||
76 | clc += rnd; | ||
51 | 77 | ||
52 | do_div(clc, evt->mult); | 78 | do_div(clc, evt->mult); |
53 | if (clc < 1000) | ||
54 | clc = 1000; | ||
55 | if (clc > KTIME_MAX) | ||
56 | clc = KTIME_MAX; | ||
57 | 79 | ||
58 | return clc; | 80 | /* Deltas less than 1usec are pointless noise */ |
81 | return clc > 1000 ? clc : 1000; | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds | ||
86 | * @latch: value to convert | ||
87 | * @evt: pointer to clock event device descriptor | ||
88 | * | ||
89 | * Math helper, returns latch value converted to nanoseconds (bound checked) | ||
90 | */ | ||
91 | u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) | ||
92 | { | ||
93 | return cev_delta2ns(latch, evt, false); | ||
59 | } | 94 | } |
60 | EXPORT_SYMBOL_GPL(clockevent_delta2ns); | 95 | EXPORT_SYMBOL_GPL(clockevent_delta2ns); |
61 | 96 | ||
@@ -380,8 +415,8 @@ void clockevents_config(struct clock_event_device *dev, u32 freq) | |||
380 | sec = 600; | 415 | sec = 600; |
381 | 416 | ||
382 | clockevents_calc_mult_shift(dev, freq, sec); | 417 | clockevents_calc_mult_shift(dev, freq, sec); |
383 | dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev); | 418 | dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false); |
384 | dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev); | 419 | dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true); |
385 | } | 420 | } |
386 | 421 | ||
387 | /** | 422 | /** |