aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 15:38:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 15:38:26 -0500
commit8e9a2dba8686187d8c8179e5b86640e653963889 (patch)
treea4ba543649219cbb28d91aab65b785d763f5d069 /tools
parent6098850e7e6978f95a958f79a645a653228d0002 (diff)
parent450cbdd0125cfa5d7bbf9e2a6b6961cc48d29730 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar: "The main changes in this cycle are: - Another attempt at enabling cross-release lockdep dependency tracking (automatically part of CONFIG_PROVE_LOCKING=y), this time with better performance and fewer false positives. (Byungchul Park) - Introduce lockdep_assert_irqs_enabled()/disabled() and convert open-coded equivalents to lockdep variants. (Frederic Weisbecker) - Add down_read_killable() and use it in the VFS's iterate_dir() method. (Kirill Tkhai) - Convert remaining uses of ACCESS_ONCE() to READ_ONCE()/WRITE_ONCE(). Most of the conversion was Coccinelle driven. (Mark Rutland, Paul E. McKenney) - Get rid of lockless_dereference(), by strengthening Alpha atomics, strengthening READ_ONCE() with smp_read_barrier_depends() and thus being able to convert users of lockless_dereference() to READ_ONCE(). (Will Deacon) - Various micro-optimizations: - better PV qspinlocks (Waiman Long), - better x86 barriers (Michael S. Tsirkin) - better x86 refcounts (Kees Cook) - ... plus other fixes and enhancements. (Borislav Petkov, Juergen Gross, Miguel Bernal Marin)" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) locking/x86: Use LOCK ADD for smp_mb() instead of MFENCE rcu: Use lockdep to assert IRQs are disabled/enabled netpoll: Use lockdep to assert IRQs are disabled/enabled timers/posix-cpu-timers: Use lockdep to assert IRQs are disabled/enabled sched/clock, sched/cputime: Use lockdep to assert IRQs are disabled/enabled irq_work: Use lockdep to assert IRQs are disabled/enabled irq/timings: Use lockdep to assert IRQs are disabled/enabled perf/core: Use lockdep to assert IRQs are disabled/enabled x86: Use lockdep to assert IRQs are disabled/enabled smp/core: Use lockdep to assert IRQs are disabled/enabled timers/hrtimer: Use lockdep to assert IRQs are disabled/enabled timers/nohz: Use lockdep to assert IRQs are disabled/enabled workqueue: Use lockdep to assert IRQs are disabled/enabled irq/softirqs: Use lockdep to assert IRQs are disabled/enabled locking/lockdep: Add IRQs disabled/enabled assertion APIs: lockdep_assert_irqs_enabled()/disabled() locking/pvqspinlock: Implement hybrid PV queued/unfair locks locking/rwlocks: Fix comments x86/paravirt: Set up the virt_spin_lock_key after static keys get initialized block, locking/lockdep: Assign a lock_class per gendisk used for wait_for_completion() workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes ...
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/x86/include/asm/atomic.h2
-rw-r--r--tools/include/asm-generic/atomic-gcc.h2
-rw-r--r--tools/perf/util/auxtrace.h4
-rw-r--r--tools/perf/util/session.h2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr.h2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_default_test.c2
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h5
-rw-r--r--tools/virtio/ringtest/main.h4
8 files changed, 13 insertions, 10 deletions
diff --git a/tools/arch/x86/include/asm/atomic.h b/tools/arch/x86/include/asm/atomic.h
index 7d8c3261a50d..1f5e26aae9fc 100644
--- a/tools/arch/x86/include/asm/atomic.h
+++ b/tools/arch/x86/include/asm/atomic.h
@@ -25,7 +25,7 @@
25 */ 25 */
26static inline int atomic_read(const atomic_t *v) 26static inline int atomic_read(const atomic_t *v)
27{ 27{
28 return ACCESS_ONCE((v)->counter); 28 return READ_ONCE((v)->counter);
29} 29}
30 30
31/** 31/**
diff --git a/tools/include/asm-generic/atomic-gcc.h b/tools/include/asm-generic/atomic-gcc.h
index 40b231fb95bd..4c1966f7c77a 100644
--- a/tools/include/asm-generic/atomic-gcc.h
+++ b/tools/include/asm-generic/atomic-gcc.h
@@ -22,7 +22,7 @@
22 */ 22 */
23static inline int atomic_read(const atomic_t *v) 23static inline int atomic_read(const atomic_t *v)
24{ 24{
25 return ACCESS_ONCE((v)->counter); 25 return READ_ONCE((v)->counter);
26} 26}
27 27
28/** 28/**
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 33b5e6cdf38c..d19e11b68de7 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -378,7 +378,7 @@ struct addr_filters {
378static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm) 378static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
379{ 379{
380 struct perf_event_mmap_page *pc = mm->userpg; 380 struct perf_event_mmap_page *pc = mm->userpg;
381 u64 head = ACCESS_ONCE(pc->aux_head); 381 u64 head = READ_ONCE(pc->aux_head);
382 382
383 /* Ensure all reads are done after we read the head */ 383 /* Ensure all reads are done after we read the head */
384 rmb(); 384 rmb();
@@ -389,7 +389,7 @@ static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
389{ 389{
390 struct perf_event_mmap_page *pc = mm->userpg; 390 struct perf_event_mmap_page *pc = mm->userpg;
391#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) 391#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
392 u64 head = ACCESS_ONCE(pc->aux_head); 392 u64 head = READ_ONCE(pc->aux_head);
393#else 393#else
394 u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0); 394 u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
395#endif 395#endif
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 41caa098ed15..3f63ee12471d 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -114,7 +114,7 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
114 114
115extern volatile int session_done; 115extern volatile int session_done;
116 116
117#define session_done() ACCESS_ONCE(session_done) 117#define session_done() READ_ONCE(session_done)
118 118
119int perf_session__deliver_synth_event(struct perf_session *session, 119int perf_session__deliver_synth_event(struct perf_session *session,
120 union perf_event *event, 120 union perf_event *event,
diff --git a/tools/testing/selftests/powerpc/dscr/dscr.h b/tools/testing/selftests/powerpc/dscr/dscr.h
index 18ea223bd398..cdb840bc54f2 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr.h
+++ b/tools/testing/selftests/powerpc/dscr/dscr.h
@@ -39,7 +39,7 @@
39#define rmb() asm volatile("lwsync":::"memory") 39#define rmb() asm volatile("lwsync":::"memory")
40#define wmb() asm volatile("lwsync":::"memory") 40#define wmb() asm volatile("lwsync":::"memory")
41 41
42#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) 42#define READ_ONCE(x) (*(volatile typeof(x) *)&(x))
43 43
44/* Prilvilege state DSCR access */ 44/* Prilvilege state DSCR access */
45inline unsigned long get_dscr(void) 45inline unsigned long get_dscr(void)
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_default_test.c b/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
index df17c3bab0a7..9e1a37e93b63 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
@@ -27,7 +27,7 @@ static void *do_test(void *in)
27 unsigned long d, cur_dscr, cur_dscr_usr; 27 unsigned long d, cur_dscr, cur_dscr_usr;
28 unsigned long s1, s2; 28 unsigned long s1, s2;
29 29
30 s1 = ACCESS_ONCE(sequence); 30 s1 = READ_ONCE(sequence);
31 if (s1 & 1) 31 if (s1 & 1)
32 continue; 32 continue;
33 rmb(); 33 rmb();
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h
index be3fdd351937..3f95a768a03b 100644
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h
+++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h
@@ -35,8 +35,7 @@
35#define rs_smp_mb() do {} while (0) 35#define rs_smp_mb() do {} while (0)
36#endif 36#endif
37 37
38#define ACCESS_ONCE(x) (*(volatile typeof(x) *) &(x)) 38#define READ_ONCE(x) (*(volatile typeof(x) *) &(x))
39#define READ_ONCE(x) ACCESS_ONCE(x) 39#define WRITE_ONCE(x) ((*(volatile typeof(x) *) &(x)) = (val))
40#define WRITE_ONCE(x, val) (ACCESS_ONCE(x) = (val))
41 40
42#endif 41#endif
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 90b0133004e1..5706e075adf2 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -110,11 +110,15 @@ static inline void busy_wait(void)
110 barrier(); 110 barrier();
111} 111}
112 112
113#if defined(__x86_64__) || defined(__i386__)
114#define smp_mb() asm volatile("lock; addl $0,-128(%%rsp)" ::: "memory", "cc")
115#else
113/* 116/*
114 * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized 117 * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
115 * with other __ATOMIC_SEQ_CST calls. 118 * with other __ATOMIC_SEQ_CST calls.
116 */ 119 */
117#define smp_mb() __sync_synchronize() 120#define smp_mb() __sync_synchronize()
121#endif
118 122
119/* 123/*
120 * This abuses the atomic builtins for thread fences, and 124 * This abuses the atomic builtins for thread fences, and