diff options
author | David Howells <dhowells@redhat.com> | 2007-07-31 03:38:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-31 18:39:40 -0400 |
commit | ff8e210a9550ad760a62e9803938cd04f9fb0851 (patch) | |
tree | 5fa45dae4ca7a180db2e83e4bc731c2a567f1f0d /fs | |
parent | b34bd06e485abf5b24fc13a9a988ebf4d2915dd6 (diff) |
AFS: fix file locking
Fix file locking for AFS:
(*) Start the lock manager thread under a mutex to avoid a race.
(*) Made the locking non-fair: New readlocks will jump pending writelocks if
there's a readlock currently granted on a file. This makes the behaviour
similar to Linux's VFS locking.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/afs/flock.c | 126 |
1 files changed, 79 insertions, 47 deletions
diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 4f77f3caee97..af6952e39a18 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c | |||
@@ -19,6 +19,7 @@ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl); | |||
19 | static void afs_fl_release_private(struct file_lock *fl); | 19 | static void afs_fl_release_private(struct file_lock *fl); |
20 | 20 | ||
21 | static struct workqueue_struct *afs_lock_manager; | 21 | static struct workqueue_struct *afs_lock_manager; |
22 | static DEFINE_MUTEX(afs_lock_manager_mutex); | ||
22 | 23 | ||
23 | static struct file_lock_operations afs_lock_ops = { | 24 | static struct file_lock_operations afs_lock_ops = { |
24 | .fl_copy_lock = afs_fl_copy_lock, | 25 | .fl_copy_lock = afs_fl_copy_lock, |
@@ -30,12 +31,20 @@ static struct file_lock_operations afs_lock_ops = { | |||
30 | */ | 31 | */ |
31 | static int afs_init_lock_manager(void) | 32 | static int afs_init_lock_manager(void) |
32 | { | 33 | { |
34 | int ret; | ||
35 | |||
36 | ret = 0; | ||
33 | if (!afs_lock_manager) { | 37 | if (!afs_lock_manager) { |
34 | afs_lock_manager = create_singlethread_workqueue("kafs_lockd"); | 38 | mutex_lock(&afs_lock_manager_mutex); |
35 | if (!afs_lock_manager) | 39 | if (!afs_lock_manager) { |
36 | return -ENOMEM; | 40 | afs_lock_manager = |
41 | create_singlethread_workqueue("kafs_lockd"); | ||
42 | if (!afs_lock_manager) | ||
43 | ret = -ENOMEM; | ||
44 | } | ||
45 | mutex_unlock(&afs_lock_manager_mutex); | ||
37 | } | 46 | } |
38 | return 0; | 47 | return ret; |
39 | } | 48 | } |
40 | 49 | ||
41 | /* | 50 | /* |
@@ -68,6 +77,29 @@ static void afs_schedule_lock_extension(struct afs_vnode *vnode) | |||
68 | } | 77 | } |
69 | 78 | ||
70 | /* | 79 | /* |
80 | * grant one or more locks (readlocks are allowed to jump the queue if the | ||
81 | * first lock in the queue is itself a readlock) | ||
82 | * - the caller must hold the vnode lock | ||
83 | */ | ||
84 | static void afs_grant_locks(struct afs_vnode *vnode, struct file_lock *fl) | ||
85 | { | ||
86 | struct file_lock *p, *_p; | ||
87 | |||
88 | list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); | ||
89 | if (fl->fl_type == F_RDLCK) { | ||
90 | list_for_each_entry_safe(p, _p, &vnode->pending_locks, | ||
91 | fl_u.afs.link) { | ||
92 | if (p->fl_type == F_RDLCK) { | ||
93 | p->fl_u.afs.state = AFS_LOCK_GRANTED; | ||
94 | list_move_tail(&p->fl_u.afs.link, | ||
95 | &vnode->granted_locks); | ||
96 | wake_up(&p->fl_wait); | ||
97 | } | ||
98 | } | ||
99 | } | ||
100 | } | ||
101 | |||
102 | /* | ||
71 | * do work for a lock, including: | 103 | * do work for a lock, including: |
72 | * - probing for a lock we're waiting on but didn't get immediately | 104 | * - probing for a lock we're waiting on but didn't get immediately |
73 | * - extending a lock that's close to timing out | 105 | * - extending a lock that's close to timing out |
@@ -172,8 +204,7 @@ void afs_lock_work(struct work_struct *work) | |||
172 | struct file_lock, fl_u.afs.link) == fl) { | 204 | struct file_lock, fl_u.afs.link) == fl) { |
173 | fl->fl_u.afs.state = ret; | 205 | fl->fl_u.afs.state = ret; |
174 | if (ret == AFS_LOCK_GRANTED) | 206 | if (ret == AFS_LOCK_GRANTED) |
175 | list_move_tail(&fl->fl_u.afs.link, | 207 | afs_grant_locks(vnode, fl); |
176 | &vnode->granted_locks); | ||
177 | else | 208 | else |
178 | list_del_init(&fl->fl_u.afs.link); | 209 | list_del_init(&fl->fl_u.afs.link); |
179 | wake_up(&fl->fl_wait); | 210 | wake_up(&fl->fl_wait); |
@@ -258,49 +289,50 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl) | |||
258 | 289 | ||
259 | spin_lock(&vnode->lock); | 290 | spin_lock(&vnode->lock); |
260 | 291 | ||
261 | if (list_empty(&vnode->pending_locks)) { | 292 | /* if we've already got a readlock on the server then we can instantly |
262 | /* if there's no-one else with a lock on this vnode, then we | 293 | * grant another readlock, irrespective of whether there are any |
263 | * need to ask the server for a lock */ | 294 | * pending writelocks */ |
264 | if (list_empty(&vnode->granted_locks)) { | 295 | if (type == AFS_LOCK_READ && |
265 | _debug("not locked"); | 296 | vnode->flags & (1 << AFS_VNODE_READLOCKED)) { |
266 | ASSERTCMP(vnode->flags & | 297 | _debug("instant readlock"); |
267 | ((1 << AFS_VNODE_LOCKING) | | 298 | ASSERTCMP(vnode->flags & |
268 | (1 << AFS_VNODE_READLOCKED) | | 299 | ((1 << AFS_VNODE_LOCKING) | |
269 | (1 << AFS_VNODE_WRITELOCKED)), ==, 0); | 300 | (1 << AFS_VNODE_WRITELOCKED)), ==, 0); |
270 | list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); | 301 | ASSERT(!list_empty(&vnode->granted_locks)); |
271 | set_bit(AFS_VNODE_LOCKING, &vnode->flags); | 302 | goto sharing_existing_lock; |
272 | spin_unlock(&vnode->lock); | 303 | } |
273 | 304 | ||
274 | ret = afs_vnode_set_lock(vnode, key, type); | 305 | /* if there's no-one else with a lock on this vnode, then we need to |
275 | clear_bit(AFS_VNODE_LOCKING, &vnode->flags); | 306 | * ask the server for a lock */ |
276 | switch (ret) { | 307 | if (list_empty(&vnode->pending_locks) && |
277 | case 0: | 308 | list_empty(&vnode->granted_locks)) { |
278 | goto acquired_server_lock; | 309 | _debug("not locked"); |
279 | case -EWOULDBLOCK: | 310 | ASSERTCMP(vnode->flags & |
280 | spin_lock(&vnode->lock); | 311 | ((1 << AFS_VNODE_LOCKING) | |
281 | ASSERT(list_empty(&vnode->granted_locks)); | 312 | (1 << AFS_VNODE_READLOCKED) | |
282 | ASSERTCMP(vnode->pending_locks.next, ==, | 313 | (1 << AFS_VNODE_WRITELOCKED)), ==, 0); |
283 | &fl->fl_u.afs.link); | 314 | list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); |
284 | goto wait; | 315 | set_bit(AFS_VNODE_LOCKING, &vnode->flags); |
285 | default: | 316 | spin_unlock(&vnode->lock); |
286 | spin_lock(&vnode->lock); | ||
287 | list_del_init(&fl->fl_u.afs.link); | ||
288 | spin_unlock(&vnode->lock); | ||
289 | goto error; | ||
290 | } | ||
291 | } | ||
292 | 317 | ||
293 | /* if we've already got a readlock on the server and no waiting | 318 | ret = afs_vnode_set_lock(vnode, key, type); |
294 | * writelocks, then we might be able to instantly grant another | 319 | clear_bit(AFS_VNODE_LOCKING, &vnode->flags); |
295 | * readlock */ | 320 | switch (ret) { |
296 | if (type == AFS_LOCK_READ && | 321 | case 0: |
297 | vnode->flags & (1 << AFS_VNODE_READLOCKED)) { | 322 | _debug("acquired"); |
298 | _debug("instant readlock"); | 323 | goto acquired_server_lock; |
299 | ASSERTCMP(vnode->flags & | 324 | case -EWOULDBLOCK: |
300 | ((1 << AFS_VNODE_LOCKING) | | 325 | _debug("would block"); |
301 | (1 << AFS_VNODE_WRITELOCKED)), ==, 0); | 326 | spin_lock(&vnode->lock); |
302 | ASSERT(!list_empty(&vnode->granted_locks)); | 327 | ASSERT(list_empty(&vnode->granted_locks)); |
303 | goto sharing_existing_lock; | 328 | ASSERTCMP(vnode->pending_locks.next, ==, |
329 | &fl->fl_u.afs.link); | ||
330 | goto wait; | ||
331 | default: | ||
332 | spin_lock(&vnode->lock); | ||
333 | list_del_init(&fl->fl_u.afs.link); | ||
334 | spin_unlock(&vnode->lock); | ||
335 | goto error; | ||
304 | } | 336 | } |
305 | } | 337 | } |
306 | 338 | ||