aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2016-04-07 11:12:31 -0400
committerIngo Molnar <mingo@kernel.org>2016-04-22 02:58:33 -0400
commit916633a403702549d37ea353e63a68e5b0dc27ad (patch)
tree2e36d9a73b0b8781aac44e2b99fa749b7122d2ce
parent664b4e24c6145830885e854195376351b0eb3eee (diff)
locking/rwsem: Provide down_write_killable()
Now that all the architectures implement the necessary glue code we can introduce down_write_killable(). The only difference wrt. regular down_write() is that the slow path waits in TASK_KILLABLE state and the interruption by the fatal signal is reported as -EINTR to the caller. Signed-off-by: Michal Hocko <mhocko@suse.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Chris Zankel <chris@zankel.net> Cc: David S. Miller <davem@davemloft.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Cc: Signed-off-by: Jason Low <jason.low2@hp.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linux-sh@vger.kernel.org Cc: linux-xtensa@linux-xtensa.org Cc: sparclinux@vger.kernel.org Link: http://lkml.kernel.org/r/1460041951-22347-12-git-send-email-mhocko@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/rwsem.h6
-rw-r--r--include/linux/lockdep.h15
-rw-r--r--include/linux/rwsem.h1
-rw-r--r--kernel/locking/rwsem.c19
4 files changed, 38 insertions, 3 deletions
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index d759c5f70f49..453744c1d347 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -102,9 +102,9 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
102#define ____down_write(sem, slow_path) \ 102#define ____down_write(sem, slow_path) \
103({ \ 103({ \
104 long tmp; \ 104 long tmp; \
105 struct rw_semaphore* ret = sem; \ 105 struct rw_semaphore* ret; \
106 asm volatile("# beginning down_write\n\t" \ 106 asm volatile("# beginning down_write\n\t" \
107 LOCK_PREFIX " xadd %1,(%2)\n\t" \ 107 LOCK_PREFIX " xadd %1,(%3)\n\t" \
108 /* adds 0xffff0001, returns the old value */ \ 108 /* adds 0xffff0001, returns the old value */ \
109 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \ 109 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
110 /* was the active mask 0 before? */\ 110 /* was the active mask 0 before? */\
@@ -112,7 +112,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
112 " call " slow_path "\n" \ 112 " call " slow_path "\n" \
113 "1:\n" \ 113 "1:\n" \
114 "# ending down_write" \ 114 "# ending down_write" \
115 : "+m" (sem->count), "=d" (tmp), "+a" (ret) \ 115 : "+m" (sem->count), "=d" (tmp), "=a" (ret) \
116 : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \ 116 : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
117 : "memory", "cc"); \ 117 : "memory", "cc"); \
118 ret; \ 118 ret; \
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index d026b190c530..accfe56d8c51 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -444,6 +444,18 @@ do { \
444 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 444 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
445} while (0) 445} while (0)
446 446
447#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
448({ \
449 int ____err = 0; \
450 if (!try(_lock)) { \
451 lock_contended(&(_lock)->dep_map, _RET_IP_); \
452 ____err = lock(_lock); \
453 } \
454 if (!____err) \
455 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
456 ____err; \
457})
458
447#else /* CONFIG_LOCK_STAT */ 459#else /* CONFIG_LOCK_STAT */
448 460
449#define lock_contended(lockdep_map, ip) do {} while (0) 461#define lock_contended(lockdep_map, ip) do {} while (0)
@@ -452,6 +464,9 @@ do { \
452#define LOCK_CONTENDED(_lock, try, lock) \ 464#define LOCK_CONTENDED(_lock, try, lock) \
453 lock(_lock) 465 lock(_lock)
454 466
467#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
468 lock(_lock)
469
455#endif /* CONFIG_LOCK_STAT */ 470#endif /* CONFIG_LOCK_STAT */
456 471
457#ifdef CONFIG_LOCKDEP 472#ifdef CONFIG_LOCKDEP
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 7d7ae029dac5..d1c12d160ace 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -118,6 +118,7 @@ extern int down_read_trylock(struct rw_semaphore *sem);
118 * lock for writing 118 * lock for writing
119 */ 119 */
120extern void down_write(struct rw_semaphore *sem); 120extern void down_write(struct rw_semaphore *sem);
121extern int __must_check down_write_killable(struct rw_semaphore *sem);
121 122
122/* 123/*
123 * trylock for writing -- returns 1 if successful, 0 if contention 124 * trylock for writing -- returns 1 if successful, 0 if contention
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 205be0ce34de..c817216c1615 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -55,6 +55,25 @@ void __sched down_write(struct rw_semaphore *sem)
55EXPORT_SYMBOL(down_write); 55EXPORT_SYMBOL(down_write);
56 56
57/* 57/*
58 * lock for writing
59 */
60int __sched down_write_killable(struct rw_semaphore *sem)
61{
62 might_sleep();
63 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
64
65 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) {
66 rwsem_release(&sem->dep_map, 1, _RET_IP_);
67 return -EINTR;
68 }
69
70 rwsem_set_owner(sem);
71 return 0;
72}
73
74EXPORT_SYMBOL(down_write_killable);
75
76/*
58 * trylock for writing -- returns 1 if successful, 0 if contention 77 * trylock for writing -- returns 1 if successful, 0 if contention
59 */ 78 */
60int down_write_trylock(struct rw_semaphore *sem) 79int down_write_trylock(struct rw_semaphore *sem)