aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-11-04 13:48:30 -0500
committerDavid S. Miller <davem@davemloft.net>2013-11-04 13:48:30 -0500
commit394efd19d5fcae936261bd48e5b33b21897aacf8 (patch)
treec48cf3ddbb07fd87309f1abdf31a27c71330e587 /kernel
parentf421436a591d34fa5279b54a96ac07d70250cc8d (diff)
parentbe408cd3e1fef73e9408b196a79b9934697fe3b1 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/emulex/benet/be.h drivers/net/netconsole.c net/bridge/br_private.h Three mostly trivial conflicts. The net/bridge/br_private.h conflict was a function signature (argument addition) change overlapping with the extern removals from Joe Perches. In drivers/net/netconsole.c we had one change adjusting a printk message whilst another changed "printk(KERN_INFO" into "pr_info(". Lastly, the emulex change was a new inline function addition overlapping with Joe Perches's extern removals. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/events/ring_buffer.c31
-rw-r--r--kernel/mutex.c32
-rw-r--r--kernel/power/hibernate.c2
-rw-r--r--kernel/time/clockevents.c65
5 files changed, 98 insertions, 36 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d49a9d29334c..953c14348375 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6767,6 +6767,10 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
6767 if (ret) 6767 if (ret)
6768 return -EFAULT; 6768 return -EFAULT;
6769 6769
6770 /* disabled for now */
6771 if (attr->mmap2)
6772 return -EINVAL;
6773
6770 if (attr->__reserved_1) 6774 if (attr->__reserved_1)
6771 return -EINVAL; 6775 return -EINVAL;
6772 6776
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index cd55144270b5..9c2ddfbf4525 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -87,10 +87,31 @@ again:
87 goto out; 87 goto out;
88 88
89 /* 89 /*
90 * Publish the known good head. Rely on the full barrier implied 90 * Since the mmap() consumer (userspace) can run on a different CPU:
91 * by atomic_dec_and_test() order the rb->head read and this 91 *
92 * write. 92 * kernel user
93 *
94 * READ ->data_tail READ ->data_head
95 * smp_mb() (A) smp_rmb() (C)
96 * WRITE $data READ $data
97 * smp_wmb() (B) smp_mb() (D)
98 * STORE ->data_head WRITE ->data_tail
99 *
100 * Where A pairs with D, and B pairs with C.
101 *
102 * I don't think A needs to be a full barrier because we won't in fact
103 * write data until we see the store from userspace. So we simply don't
104 * issue the data WRITE until we observe it. Be conservative for now.
105 *
106 * OTOH, D needs to be a full barrier since it separates the data READ
107 * from the tail WRITE.
108 *
109 * For B a WMB is sufficient since it separates two WRITEs, and for C
110 * an RMB is sufficient since it separates two READs.
111 *
112 * See perf_output_begin().
93 */ 113 */
114 smp_wmb();
94 rb->user_page->data_head = head; 115 rb->user_page->data_head = head;
95 116
96 /* 117 /*
@@ -154,9 +175,11 @@ int perf_output_begin(struct perf_output_handle *handle,
154 * Userspace could choose to issue a mb() before updating the 175 * Userspace could choose to issue a mb() before updating the
155 * tail pointer. So that all reads will be completed before the 176 * tail pointer. So that all reads will be completed before the
156 * write is issued. 177 * write is issued.
178 *
179 * See perf_output_put_handle().
157 */ 180 */
158 tail = ACCESS_ONCE(rb->user_page->data_tail); 181 tail = ACCESS_ONCE(rb->user_page->data_tail);
159 smp_rmb(); 182 smp_mb();
160 offset = head = local_read(&rb->head); 183 offset = head = local_read(&rb->head);
161 head += size; 184 head += size;
162 if (unlikely(!perf_output_space(rb, tail, offset, head))) 185 if (unlikely(!perf_output_space(rb, tail, offset, head)))
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 6d647aedffea..d24105b1b794 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -410,7 +410,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
410static __always_inline int __sched 410static __always_inline int __sched
411__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 411__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
412 struct lockdep_map *nest_lock, unsigned long ip, 412 struct lockdep_map *nest_lock, unsigned long ip,
413 struct ww_acquire_ctx *ww_ctx) 413 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
414{ 414{
415 struct task_struct *task = current; 415 struct task_struct *task = current;
416 struct mutex_waiter waiter; 416 struct mutex_waiter waiter;
@@ -450,7 +450,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
450 struct task_struct *owner; 450 struct task_struct *owner;
451 struct mspin_node node; 451 struct mspin_node node;
452 452
453 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { 453 if (use_ww_ctx && ww_ctx->acquired > 0) {
454 struct ww_mutex *ww; 454 struct ww_mutex *ww;
455 455
456 ww = container_of(lock, struct ww_mutex, base); 456 ww = container_of(lock, struct ww_mutex, base);
@@ -480,7 +480,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
480 if ((atomic_read(&lock->count) == 1) && 480 if ((atomic_read(&lock->count) == 1) &&
481 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { 481 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
482 lock_acquired(&lock->dep_map, ip); 482 lock_acquired(&lock->dep_map, ip);
483 if (!__builtin_constant_p(ww_ctx == NULL)) { 483 if (use_ww_ctx) {
484 struct ww_mutex *ww; 484 struct ww_mutex *ww;
485 ww = container_of(lock, struct ww_mutex, base); 485 ww = container_of(lock, struct ww_mutex, base);
486 486
@@ -551,7 +551,7 @@ slowpath:
551 goto err; 551 goto err;
552 } 552 }
553 553
554 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { 554 if (use_ww_ctx && ww_ctx->acquired > 0) {
555 ret = __mutex_lock_check_stamp(lock, ww_ctx); 555 ret = __mutex_lock_check_stamp(lock, ww_ctx);
556 if (ret) 556 if (ret)
557 goto err; 557 goto err;
@@ -575,7 +575,7 @@ skip_wait:
575 lock_acquired(&lock->dep_map, ip); 575 lock_acquired(&lock->dep_map, ip);
576 mutex_set_owner(lock); 576 mutex_set_owner(lock);
577 577
578 if (!__builtin_constant_p(ww_ctx == NULL)) { 578 if (use_ww_ctx) {
579 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 579 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
580 struct mutex_waiter *cur; 580 struct mutex_waiter *cur;
581 581
@@ -615,7 +615,7 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
615{ 615{
616 might_sleep(); 616 might_sleep();
617 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 617 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
618 subclass, NULL, _RET_IP_, NULL); 618 subclass, NULL, _RET_IP_, NULL, 0);
619} 619}
620 620
621EXPORT_SYMBOL_GPL(mutex_lock_nested); 621EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -625,7 +625,7 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
625{ 625{
626 might_sleep(); 626 might_sleep();
627 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 627 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
628 0, nest, _RET_IP_, NULL); 628 0, nest, _RET_IP_, NULL, 0);
629} 629}
630 630
631EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 631EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
@@ -635,7 +635,7 @@ mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
635{ 635{
636 might_sleep(); 636 might_sleep();
637 return __mutex_lock_common(lock, TASK_KILLABLE, 637 return __mutex_lock_common(lock, TASK_KILLABLE,
638 subclass, NULL, _RET_IP_, NULL); 638 subclass, NULL, _RET_IP_, NULL, 0);
639} 639}
640EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 640EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
641 641
@@ -644,7 +644,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
644{ 644{
645 might_sleep(); 645 might_sleep();
646 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 646 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
647 subclass, NULL, _RET_IP_, NULL); 647 subclass, NULL, _RET_IP_, NULL, 0);
648} 648}
649 649
650EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 650EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -682,7 +682,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
682 682
683 might_sleep(); 683 might_sleep();
684 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 684 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
685 0, &ctx->dep_map, _RET_IP_, ctx); 685 0, &ctx->dep_map, _RET_IP_, ctx, 1);
686 if (!ret && ctx->acquired > 1) 686 if (!ret && ctx->acquired > 1)
687 return ww_mutex_deadlock_injection(lock, ctx); 687 return ww_mutex_deadlock_injection(lock, ctx);
688 688
@@ -697,7 +697,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
697 697
698 might_sleep(); 698 might_sleep();
699 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 699 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
700 0, &ctx->dep_map, _RET_IP_, ctx); 700 0, &ctx->dep_map, _RET_IP_, ctx, 1);
701 701
702 if (!ret && ctx->acquired > 1) 702 if (!ret && ctx->acquired > 1)
703 return ww_mutex_deadlock_injection(lock, ctx); 703 return ww_mutex_deadlock_injection(lock, ctx);
@@ -809,28 +809,28 @@ __mutex_lock_slowpath(atomic_t *lock_count)
809 struct mutex *lock = container_of(lock_count, struct mutex, count); 809 struct mutex *lock = container_of(lock_count, struct mutex, count);
810 810
811 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, 811 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
812 NULL, _RET_IP_, NULL); 812 NULL, _RET_IP_, NULL, 0);
813} 813}
814 814
815static noinline int __sched 815static noinline int __sched
816__mutex_lock_killable_slowpath(struct mutex *lock) 816__mutex_lock_killable_slowpath(struct mutex *lock)
817{ 817{
818 return __mutex_lock_common(lock, TASK_KILLABLE, 0, 818 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
819 NULL, _RET_IP_, NULL); 819 NULL, _RET_IP_, NULL, 0);
820} 820}
821 821
822static noinline int __sched 822static noinline int __sched
823__mutex_lock_interruptible_slowpath(struct mutex *lock) 823__mutex_lock_interruptible_slowpath(struct mutex *lock)
824{ 824{
825 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, 825 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
826 NULL, _RET_IP_, NULL); 826 NULL, _RET_IP_, NULL, 0);
827} 827}
828 828
829static noinline int __sched 829static noinline int __sched
830__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 830__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
831{ 831{
832 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, 832 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
833 NULL, _RET_IP_, ctx); 833 NULL, _RET_IP_, ctx, 1);
834} 834}
835 835
836static noinline int __sched 836static noinline int __sched
@@ -838,7 +838,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
838 struct ww_acquire_ctx *ctx) 838 struct ww_acquire_ctx *ctx)
839{ 839{
840 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, 840 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
841 NULL, _RET_IP_, ctx); 841 NULL, _RET_IP_, ctx, 1);
842} 842}
843 843
844#endif 844#endif
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index c9c759d5a15c..0121dab83f43 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -846,7 +846,7 @@ static int software_resume(void)
846 goto Finish; 846 goto Finish;
847} 847}
848 848
849late_initcall(software_resume); 849late_initcall_sync(software_resume);
850 850
851 851
852static const char * const hibernation_modes[] = { 852static const char * const hibernation_modes[] = {
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 38959c866789..662c5798a685 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -33,29 +33,64 @@ struct ce_unbind {
33 int res; 33 int res;
34}; 34};
35 35
36/** 36static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
37 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds 37 bool ismax)
38 * @latch: value to convert
39 * @evt: pointer to clock event device descriptor
40 *
41 * Math helper, returns latch value converted to nanoseconds (bound checked)
42 */
43u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
44{ 38{
45 u64 clc = (u64) latch << evt->shift; 39 u64 clc = (u64) latch << evt->shift;
40 u64 rnd;
46 41
47 if (unlikely(!evt->mult)) { 42 if (unlikely(!evt->mult)) {
48 evt->mult = 1; 43 evt->mult = 1;
49 WARN_ON(1); 44 WARN_ON(1);
50 } 45 }
46 rnd = (u64) evt->mult - 1;
47
48 /*
49 * Upper bound sanity check. If the backwards conversion is
50 * not equal latch, we know that the above shift overflowed.
51 */
52 if ((clc >> evt->shift) != (u64)latch)
53 clc = ~0ULL;
54
55 /*
56 * Scaled math oddities:
57 *
58 * For mult <= (1 << shift) we can safely add mult - 1 to
59 * prevent integer rounding loss. So the backwards conversion
60 * from nsec to device ticks will be correct.
61 *
62 * For mult > (1 << shift), i.e. device frequency is > 1GHz we
63 * need to be careful. Adding mult - 1 will result in a value
64 * which when converted back to device ticks can be larger
65 * than latch by up to (mult - 1) >> shift. For the min_delta
66 * calculation we still want to apply this in order to stay
67 * above the minimum device ticks limit. For the upper limit
68 * we would end up with a latch value larger than the upper
69 * limit of the device, so we omit the add to stay below the
70 * device upper boundary.
71 *
72 * Also omit the add if it would overflow the u64 boundary.
73 */
74 if ((~0ULL - clc > rnd) &&
75 (!ismax || evt->mult <= (1U << evt->shift)))
76 clc += rnd;
51 77
52 do_div(clc, evt->mult); 78 do_div(clc, evt->mult);
53 if (clc < 1000)
54 clc = 1000;
55 if (clc > KTIME_MAX)
56 clc = KTIME_MAX;
57 79
58 return clc; 80 /* Deltas less than 1usec are pointless noise */
81 return clc > 1000 ? clc : 1000;
82}
83
84/**
85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
86 * @latch: value to convert
87 * @evt: pointer to clock event device descriptor
88 *
89 * Math helper, returns latch value converted to nanoseconds (bound checked)
90 */
91u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
92{
93 return cev_delta2ns(latch, evt, false);
59} 94}
60EXPORT_SYMBOL_GPL(clockevent_delta2ns); 95EXPORT_SYMBOL_GPL(clockevent_delta2ns);
61 96
@@ -380,8 +415,8 @@ void clockevents_config(struct clock_event_device *dev, u32 freq)
380 sec = 600; 415 sec = 600;
381 416
382 clockevents_calc_mult_shift(dev, freq, sec); 417 clockevents_calc_mult_shift(dev, freq, sec);
383 dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev); 418 dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
384 dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev); 419 dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
385} 420}
386 421
387/** 422/**