aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.h
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2008-02-22 11:07:18 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2008-03-31 05:41:12 -0400
commit7afd88d9166a752b52517648bcbe923e05d393fc (patch)
tree2fb945189e3cb1be7ad007088f8ec86e9f67ece6 /fs/gfs2/glock.h
parent60b779cfc1fa52034a996ee12a23b62d32e86000 (diff)
[GFS2] Fix a page lock / glock deadlock
We've previously been using a "try lock" in readpage on the basis that it would prevent deadlocks due to the inverted lock ordering (our normal lock ordering is glock first and then page lock). Unfortunately tests have shown that this isn't enough. If the glock has a demote request queued such that run_queue() in the glock code tries to do a demote when its called under readpage then it will try and write out all the dirty pages which requires locking them. This then deadlocks with the page locked by readpage. The solution is to always require two calls into readpage. The first unlocks the page, gets the glock and returns AOP_TRUNCATED_PAGE, the second does the actual readpage and unlocks the glock & page as required. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.h')
-rw-r--r--fs/gfs2/glock.h13
1 files changed, 6 insertions, 7 deletions
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index ace5770760ce..cdad3e6f8150 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -32,24 +32,23 @@
32#define GLR_TRYFAILED 13 32#define GLR_TRYFAILED 13
33#define GLR_CANCELED 14 33#define GLR_CANCELED 14
34 34
35static inline int gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) 35static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
36{ 36{
37 struct gfs2_holder *gh; 37 struct gfs2_holder *gh;
38 int locked = 0;
39 struct pid *pid; 38 struct pid *pid;
40 39
41 /* Look in glock's list of holders for one with current task as owner */ 40 /* Look in glock's list of holders for one with current task as owner */
42 spin_lock(&gl->gl_spin); 41 spin_lock(&gl->gl_spin);
43 pid = task_pid(current); 42 pid = task_pid(current);
44 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 43 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
45 if (gh->gh_owner_pid == pid) { 44 if (gh->gh_owner_pid == pid)
46 locked = 1; 45 goto out;
47 break;
48 }
49 } 46 }
47 gh = NULL;
48out:
50 spin_unlock(&gl->gl_spin); 49 spin_unlock(&gl->gl_spin);
51 50
52 return locked; 51 return gh;
53} 52}
54 53
55static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl) 54static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)