aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/rwsem-spinlock.c20
-rw-r--r--lib/rwsem.c20
2 files changed, 37 insertions, 3 deletions
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index 03b6097eb04e..db4fed74b940 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -20,8 +20,16 @@ struct rwsem_waiter {
20/* 20/*
21 * initialise the semaphore 21 * initialise the semaphore
22 */ 22 */
23void fastcall init_rwsem(struct rw_semaphore *sem) 23void __init_rwsem(struct rw_semaphore *sem, const char *name,
24 struct lock_class_key *key)
24{ 25{
26#ifdef CONFIG_DEBUG_LOCK_ALLOC
27 /*
28 * Make sure we are not reinitializing a held semaphore:
29 */
30 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
31 lockdep_init_map(&sem->dep_map, name, key);
32#endif
25 sem->activity = 0; 33 sem->activity = 0;
26 spin_lock_init(&sem->wait_lock); 34 spin_lock_init(&sem->wait_lock);
27 INIT_LIST_HEAD(&sem->wait_list); 35 INIT_LIST_HEAD(&sem->wait_list);
@@ -183,7 +191,7 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
183 * get a write lock on the semaphore 191 * get a write lock on the semaphore
184 * - we increment the waiting count anyway to indicate an exclusive lock 192 * - we increment the waiting count anyway to indicate an exclusive lock
185 */ 193 */
186void fastcall __sched __down_write(struct rw_semaphore *sem) 194void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
187{ 195{
188 struct rwsem_waiter waiter; 196 struct rwsem_waiter waiter;
189 struct task_struct *tsk; 197 struct task_struct *tsk;
@@ -223,6 +231,11 @@ void fastcall __sched __down_write(struct rw_semaphore *sem)
223 ; 231 ;
224} 232}
225 233
234void fastcall __sched __down_write(struct rw_semaphore *sem)
235{
236 __down_write_nested(sem, 0);
237}
238
226/* 239/*
227 * trylock for writing -- returns 1 if successful, 0 if contention 240 * trylock for writing -- returns 1 if successful, 0 if contention
228 */ 241 */
@@ -292,9 +305,10 @@ void fastcall __downgrade_write(struct rw_semaphore *sem)
292 spin_unlock_irqrestore(&sem->wait_lock, flags); 305 spin_unlock_irqrestore(&sem->wait_lock, flags);
293} 306}
294 307
295EXPORT_SYMBOL(init_rwsem); 308EXPORT_SYMBOL(__init_rwsem);
296EXPORT_SYMBOL(__down_read); 309EXPORT_SYMBOL(__down_read);
297EXPORT_SYMBOL(__down_read_trylock); 310EXPORT_SYMBOL(__down_read_trylock);
311EXPORT_SYMBOL(__down_write_nested);
298EXPORT_SYMBOL(__down_write); 312EXPORT_SYMBOL(__down_write);
299EXPORT_SYMBOL(__down_write_trylock); 313EXPORT_SYMBOL(__down_write_trylock);
300EXPORT_SYMBOL(__up_read); 314EXPORT_SYMBOL(__up_read);
diff --git a/lib/rwsem.c b/lib/rwsem.c
index bae597284889..b322421c2969 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -8,6 +8,26 @@
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/module.h> 9#include <linux/module.h>
10 10
11/*
12 * Initialize an rwsem:
13 */
14void __init_rwsem(struct rw_semaphore *sem, const char *name,
15 struct lock_class_key *key)
16{
17#ifdef CONFIG_DEBUG_LOCK_ALLOC
18 /*
19 * Make sure we are not reinitializing a held semaphore:
20 */
21 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
22 lockdep_init_map(&sem->dep_map, name, key);
23#endif
24 sem->count = RWSEM_UNLOCKED_VALUE;
25 spin_lock_init(&sem->wait_lock);
26 INIT_LIST_HEAD(&sem->wait_list);
27}
28
29EXPORT_SYMBOL(__init_rwsem);
30
11struct rwsem_waiter { 31struct rwsem_waiter {
12 struct list_head list; 32 struct list_head list;
13 struct task_struct *task; 33 struct task_struct *task;