diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2009-03-09 05:03:51 -0400 |
---|---|---|
committer | Steven Whitehouse <steve@dolmen.chygwyn.com> | 2009-03-24 07:21:27 -0400 |
commit | 6bac243f0793499782267342eba852a8a6cc7ac4 (patch) | |
tree | 8cf9bb9fa54767c6207bc7f72d75364c290702d9 /fs/gfs2/glops.c | |
parent | 02ffad08e838997fad3de05c85560a57e5fd92de (diff) |
GFS2: Clean up of glops.c
This cleans up a number of bits of code mostly based in glops.c.
A couple of simple functions have been merged into the callers
to make it more obvious what is going on, the mysterious raising
of i_writecount around the truncate_inode_pages() call has been
removed. The meta_go_* operations have been renamed rgrp_go_*
since that is the only lock type that they are used with.
The unused argument of gfs2_read_sb has been removed. Also
a bug has been fixed where a check for the rindex inode was
in the wrong callback. More comments are added, and the
debugging code is improved too.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glops.c')
-rw-r--r-- | fs/gfs2/glops.c | 115 |
1 files changed, 52 insertions, 63 deletions
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index f34bc7093dd1..bf23a62aa925 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
@@ -76,29 +76,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) | |||
76 | } | 76 | } |
77 | 77 | ||
78 | /** | 78 | /** |
79 | * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock | 79 | * rgrp_go_sync - sync out the metadata for this glock |
80 | * @gl: the glock | ||
81 | * | ||
82 | */ | ||
83 | |||
84 | static void gfs2_pte_inval(struct gfs2_glock *gl) | ||
85 | { | ||
86 | struct gfs2_inode *ip; | ||
87 | struct inode *inode; | ||
88 | |||
89 | ip = gl->gl_object; | ||
90 | inode = &ip->i_inode; | ||
91 | if (!ip || !S_ISREG(inode->i_mode)) | ||
92 | return; | ||
93 | |||
94 | unmap_shared_mapping_range(inode->i_mapping, 0, 0); | ||
95 | if (test_bit(GIF_SW_PAGED, &ip->i_flags)) | ||
96 | set_bit(GLF_DIRTY, &gl->gl_flags); | ||
97 | |||
98 | } | ||
99 | |||
100 | /** | ||
101 | * meta_go_sync - sync out the metadata for this glock | ||
102 | * @gl: the glock | 80 | * @gl: the glock |
103 | * | 81 | * |
104 | * Called when demoting or unlocking an EX glock. We must flush | 82 | * Called when demoting or unlocking an EX glock. We must flush |
@@ -106,36 +84,42 @@ static void gfs2_pte_inval(struct gfs2_glock *gl) | |||
106 | * not return to caller to demote/unlock the glock until I/O is complete. | 84 | * not return to caller to demote/unlock the glock until I/O is complete. |
107 | */ | 85 | */ |
108 | 86 | ||
109 | static void meta_go_sync(struct gfs2_glock *gl) | 87 | static void rgrp_go_sync(struct gfs2_glock *gl) |
110 | { | 88 | { |
111 | if (gl->gl_state != LM_ST_EXCLUSIVE) | 89 | struct address_space *metamapping = gl->gl_aspace->i_mapping; |
90 | int error; | ||
91 | |||
92 | if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) | ||
112 | return; | 93 | return; |
94 | BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE); | ||
113 | 95 | ||
114 | if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) { | 96 | gfs2_log_flush(gl->gl_sbd, gl); |
115 | gfs2_log_flush(gl->gl_sbd, gl); | 97 | filemap_fdatawrite(metamapping); |
116 | gfs2_meta_sync(gl); | 98 | error = filemap_fdatawait(metamapping); |
117 | gfs2_ail_empty_gl(gl); | 99 | mapping_set_error(metamapping, error); |
118 | } | 100 | gfs2_ail_empty_gl(gl); |
119 | } | 101 | } |
120 | 102 | ||
121 | /** | 103 | /** |
122 | * meta_go_inval - invalidate the metadata for this glock | 104 | * rgrp_go_inval - invalidate the metadata for this glock |
123 | * @gl: the glock | 105 | * @gl: the glock |
124 | * @flags: | 106 | * @flags: |
125 | * | 107 | * |
108 | * We never used LM_ST_DEFERRED with resource groups, so that we | ||
109 | * should always see the metadata flag set here. | ||
110 | * | ||
126 | */ | 111 | */ |
127 | 112 | ||
128 | static void meta_go_inval(struct gfs2_glock *gl, int flags) | 113 | static void rgrp_go_inval(struct gfs2_glock *gl, int flags) |
129 | { | 114 | { |
130 | if (!(flags & DIO_METADATA)) | 115 | struct address_space *mapping = gl->gl_aspace->i_mapping; |
131 | return; | ||
132 | 116 | ||
133 | gfs2_meta_inval(gl); | 117 | BUG_ON(!(flags & DIO_METADATA)); |
134 | if (gl->gl_object == GFS2_I(gl->gl_sbd->sd_rindex)) | 118 | gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); |
135 | gl->gl_sbd->sd_rindex_uptodate = 0; | 119 | truncate_inode_pages(mapping, 0); |
136 | else if (gl->gl_ops == &gfs2_rgrp_glops && gl->gl_object) { | ||
137 | struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object; | ||
138 | 120 | ||
121 | if (gl->gl_object) { | ||
122 | struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object; | ||
139 | rgd->rd_flags &= ~GFS2_RDF_UPTODATE; | 123 | rgd->rd_flags &= ~GFS2_RDF_UPTODATE; |
140 | } | 124 | } |
141 | } | 125 | } |
@@ -152,48 +136,54 @@ static void inode_go_sync(struct gfs2_glock *gl) | |||
152 | struct address_space *metamapping = gl->gl_aspace->i_mapping; | 136 | struct address_space *metamapping = gl->gl_aspace->i_mapping; |
153 | int error; | 137 | int error; |
154 | 138 | ||
155 | if (gl->gl_state != LM_ST_UNLOCKED) | ||
156 | gfs2_pte_inval(gl); | ||
157 | if (gl->gl_state != LM_ST_EXCLUSIVE) | ||
158 | return; | ||
159 | |||
160 | if (ip && !S_ISREG(ip->i_inode.i_mode)) | 139 | if (ip && !S_ISREG(ip->i_inode.i_mode)) |
161 | ip = NULL; | 140 | ip = NULL; |
141 | if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) | ||
142 | unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); | ||
143 | if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) | ||
144 | return; | ||
162 | 145 | ||
163 | if (test_bit(GLF_DIRTY, &gl->gl_flags)) { | 146 | BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE); |
164 | gfs2_log_flush(gl->gl_sbd, gl); | 147 | |
165 | filemap_fdatawrite(metamapping); | 148 | gfs2_log_flush(gl->gl_sbd, gl); |
166 | if (ip) { | 149 | filemap_fdatawrite(metamapping); |
167 | struct address_space *mapping = ip->i_inode.i_mapping; | 150 | if (ip) { |
168 | filemap_fdatawrite(mapping); | 151 | struct address_space *mapping = ip->i_inode.i_mapping; |
169 | error = filemap_fdatawait(mapping); | 152 | filemap_fdatawrite(mapping); |
170 | mapping_set_error(mapping, error); | 153 | error = filemap_fdatawait(mapping); |
171 | } | 154 | mapping_set_error(mapping, error); |
172 | error = filemap_fdatawait(metamapping); | ||
173 | mapping_set_error(metamapping, error); | ||
174 | clear_bit(GLF_DIRTY, &gl->gl_flags); | ||
175 | gfs2_ail_empty_gl(gl); | ||
176 | } | 155 | } |
156 | error = filemap_fdatawait(metamapping); | ||
157 | mapping_set_error(metamapping, error); | ||
158 | gfs2_ail_empty_gl(gl); | ||
177 | } | 159 | } |
178 | 160 | ||
179 | /** | 161 | /** |
180 | * inode_go_inval - prepare a inode glock to be released | 162 | * inode_go_inval - prepare a inode glock to be released |
181 | * @gl: the glock | 163 | * @gl: the glock |
182 | * @flags: | 164 | * @flags: |
165 | * | ||
166 | * Normally we invlidate everything, but if we are moving into | ||
167 | * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we | ||
168 | * can keep hold of the metadata, since it won't have changed. | ||
183 | * | 169 | * |
184 | */ | 170 | */ |
185 | 171 | ||
186 | static void inode_go_inval(struct gfs2_glock *gl, int flags) | 172 | static void inode_go_inval(struct gfs2_glock *gl, int flags) |
187 | { | 173 | { |
188 | struct gfs2_inode *ip = gl->gl_object; | 174 | struct gfs2_inode *ip = gl->gl_object; |
189 | int meta = (flags & DIO_METADATA); | ||
190 | 175 | ||
191 | if (meta) { | 176 | gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); |
192 | gfs2_meta_inval(gl); | 177 | |
178 | if (flags & DIO_METADATA) { | ||
179 | struct address_space *mapping = gl->gl_aspace->i_mapping; | ||
180 | truncate_inode_pages(mapping, 0); | ||
193 | if (ip) | 181 | if (ip) |
194 | set_bit(GIF_INVALID, &ip->i_flags); | 182 | set_bit(GIF_INVALID, &ip->i_flags); |
195 | } | 183 | } |
196 | 184 | ||
185 | if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) | ||
186 | gl->gl_sbd->sd_rindex_uptodate = 0; | ||
197 | if (ip && S_ISREG(ip->i_inode.i_mode)) | 187 | if (ip && S_ISREG(ip->i_inode.i_mode)) |
198 | truncate_inode_pages(ip->i_inode.i_mapping, 0); | 188 | truncate_inode_pages(ip->i_inode.i_mapping, 0); |
199 | } | 189 | } |
@@ -395,7 +385,6 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl) | |||
395 | } | 385 | } |
396 | 386 | ||
397 | const struct gfs2_glock_operations gfs2_meta_glops = { | 387 | const struct gfs2_glock_operations gfs2_meta_glops = { |
398 | .go_xmote_th = meta_go_sync, | ||
399 | .go_type = LM_TYPE_META, | 388 | .go_type = LM_TYPE_META, |
400 | }; | 389 | }; |
401 | 390 | ||
@@ -410,8 +399,8 @@ const struct gfs2_glock_operations gfs2_inode_glops = { | |||
410 | }; | 399 | }; |
411 | 400 | ||
412 | const struct gfs2_glock_operations gfs2_rgrp_glops = { | 401 | const struct gfs2_glock_operations gfs2_rgrp_glops = { |
413 | .go_xmote_th = meta_go_sync, | 402 | .go_xmote_th = rgrp_go_sync, |
414 | .go_inval = meta_go_inval, | 403 | .go_inval = rgrp_go_inval, |
415 | .go_demote_ok = rgrp_go_demote_ok, | 404 | .go_demote_ok = rgrp_go_demote_ok, |
416 | .go_lock = rgrp_go_lock, | 405 | .go_lock = rgrp_go_lock, |
417 | .go_unlock = rgrp_go_unlock, | 406 | .go_unlock = rgrp_go_unlock, |