aboutsummaryrefslogtreecommitdiffstats
path: root/fs/reiserfs/lock.c
diff options
context:
space:
mode:
authorJeff Mahoney <jeffm@suse.com>2013-08-08 17:34:46 -0400
committerJeff Mahoney <jeffm@suse.de>2013-08-08 17:34:46 -0400
commit278f6679f454bf185a07d9a4ca355b153482d17a (patch)
treeffead073e67cfdc1ddfc3949ebc93c06dcaaab8f /fs/reiserfs/lock.c
parent4c05141df57f4ffc1a9a28f1925434924179bfe4 (diff)
reiserfs: locking, handle nested locks properly
The reiserfs write lock replaced the BKL and uses similar semantics. Frederic's locking code makes a distinction between when the lock is nested and when it's being acquired/released, but I don't think that's the right distinction to make. The right distinction is between the lock being released at end-of-use and the lock being released for a schedule. The unlock should return the depth and the lock should restore it, rather than the other way around as it is now. This patch implements that and adds a number of places where the lock should be dropped. Signed-off-by: Jeff Mahoney <jeffm@suse.com>
Diffstat (limited to 'fs/reiserfs/lock.c')
-rw-r--r--fs/reiserfs/lock.c43
1 files changed, 23 insertions, 20 deletions
diff --git a/fs/reiserfs/lock.c b/fs/reiserfs/lock.c
index d735bc8470e3..045b83ef9fd9 100644
--- a/fs/reiserfs/lock.c
+++ b/fs/reiserfs/lock.c
@@ -48,30 +48,35 @@ void reiserfs_write_unlock(struct super_block *s)
48 } 48 }
49} 49}
50 50
51/* 51int __must_check reiserfs_write_unlock_nested(struct super_block *s)
52 * If we already own the lock, just exit and don't increase the depth.
53 * Useful when we don't want to lock more than once.
54 *
55 * We always return the lock_depth we had before calling
56 * this function.
57 */
58int reiserfs_write_lock_once(struct super_block *s)
59{ 52{
60 struct reiserfs_sb_info *sb_i = REISERFS_SB(s); 53 struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
54 int depth;
61 55
62 if (sb_i->lock_owner != current) { 56 /* this can happen when the lock isn't always held */
63 mutex_lock(&sb_i->lock); 57 if (sb_i->lock_owner != current)
64 sb_i->lock_owner = current; 58 return -1;
65 return sb_i->lock_depth++; 59
66 } 60 depth = sb_i->lock_depth;
61
62 sb_i->lock_depth = -1;
63 sb_i->lock_owner = NULL;
64 mutex_unlock(&sb_i->lock);
67 65
68 return sb_i->lock_depth; 66 return depth;
69} 67}
70 68
71void reiserfs_write_unlock_once(struct super_block *s, int lock_depth) 69void reiserfs_write_lock_nested(struct super_block *s, int depth)
72{ 70{
73 if (lock_depth == -1) 71 struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
74 reiserfs_write_unlock(s); 72
73 /* this can happen when the lock isn't always held */
74 if (depth == -1)
75 return;
76
77 mutex_lock(&sb_i->lock);
78 sb_i->lock_owner = current;
79 sb_i->lock_depth = depth;
75} 80}
76 81
77/* 82/*
@@ -82,9 +87,7 @@ void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
82{ 87{
83 struct reiserfs_sb_info *sb_i = REISERFS_SB(sb); 88 struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
84 89
85 if (sb_i->lock_depth < 0) 90 WARN_ON(sb_i->lock_depth < 0);
86 reiserfs_panic(sb, "%s called without kernel lock held %d",
87 caller);
88} 91}
89 92
90#ifdef CONFIG_REISERFS_CHECK 93#ifdef CONFIG_REISERFS_CHECK