aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2009-03-09 05:03:51 -0400
committerSteven Whitehouse <steve@dolmen.chygwyn.com>2009-03-24 07:21:27 -0400
commit6bac243f0793499782267342eba852a8a6cc7ac4 (patch)
tree8cf9bb9fa54767c6207bc7f72d75364c290702d9
parent02ffad08e838997fad3de05c85560a57e5fd92de (diff)
GFS2: Clean up of glops.c
This cleans up a number of bits of code mostly based in glops.c. A couple of simple functions have been merged into the callers to make it more obvious what is going on, the mysterious raising of i_writecount around the truncate_inode_pages() call has been removed. The meta_go_* operations have been renamed rgrp_go_* since that is the only lock type that they are used with. The unused argument of gfs2_read_sb has been removed. Also a bug has been fixed where a check for the rindex inode was in the wrong callback. More comments are added, and the debugging code is improved too. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
-rw-r--r--fs/gfs2/glops.c115
-rw-r--r--fs/gfs2/meta_io.c21
-rw-r--r--fs/gfs2/meta_io.h1
-rw-r--r--fs/gfs2/ops_file.c3
-rw-r--r--fs/gfs2/ops_fstype.c6
5 files changed, 57 insertions, 89 deletions
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index f34bc7093dd1..bf23a62aa925 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -76,29 +76,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
76} 76}
77 77
78/** 78/**
79 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock 79 * rgrp_go_sync - sync out the metadata for this glock
80 * @gl: the glock
81 *
82 */
83
84static void gfs2_pte_inval(struct gfs2_glock *gl)
85{
86 struct gfs2_inode *ip;
87 struct inode *inode;
88
89 ip = gl->gl_object;
90 inode = &ip->i_inode;
91 if (!ip || !S_ISREG(inode->i_mode))
92 return;
93
94 unmap_shared_mapping_range(inode->i_mapping, 0, 0);
95 if (test_bit(GIF_SW_PAGED, &ip->i_flags))
96 set_bit(GLF_DIRTY, &gl->gl_flags);
97
98}
99
100/**
101 * meta_go_sync - sync out the metadata for this glock
102 * @gl: the glock 80 * @gl: the glock
103 * 81 *
104 * Called when demoting or unlocking an EX glock. We must flush 82 * Called when demoting or unlocking an EX glock. We must flush
@@ -106,36 +84,42 @@ static void gfs2_pte_inval(struct gfs2_glock *gl)
106 * not return to caller to demote/unlock the glock until I/O is complete. 84 * not return to caller to demote/unlock the glock until I/O is complete.
107 */ 85 */
108 86
109static void meta_go_sync(struct gfs2_glock *gl) 87static void rgrp_go_sync(struct gfs2_glock *gl)
110{ 88{
111 if (gl->gl_state != LM_ST_EXCLUSIVE) 89 struct address_space *metamapping = gl->gl_aspace->i_mapping;
90 int error;
91
92 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
112 return; 93 return;
94 BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);
113 95
114 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) { 96 gfs2_log_flush(gl->gl_sbd, gl);
115 gfs2_log_flush(gl->gl_sbd, gl); 97 filemap_fdatawrite(metamapping);
116 gfs2_meta_sync(gl); 98 error = filemap_fdatawait(metamapping);
117 gfs2_ail_empty_gl(gl); 99 mapping_set_error(metamapping, error);
118 } 100 gfs2_ail_empty_gl(gl);
119} 101}
120 102
121/** 103/**
122 * meta_go_inval - invalidate the metadata for this glock 104 * rgrp_go_inval - invalidate the metadata for this glock
123 * @gl: the glock 105 * @gl: the glock
124 * @flags: 106 * @flags:
125 * 107 *
108 * We never used LM_ST_DEFERRED with resource groups, so that we
109 * should always see the metadata flag set here.
110 *
126 */ 111 */
127 112
128static void meta_go_inval(struct gfs2_glock *gl, int flags) 113static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
129{ 114{
130 if (!(flags & DIO_METADATA)) 115 struct address_space *mapping = gl->gl_aspace->i_mapping;
131 return;
132 116
133 gfs2_meta_inval(gl); 117 BUG_ON(!(flags & DIO_METADATA));
134 if (gl->gl_object == GFS2_I(gl->gl_sbd->sd_rindex)) 118 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
135 gl->gl_sbd->sd_rindex_uptodate = 0; 119 truncate_inode_pages(mapping, 0);
136 else if (gl->gl_ops == &gfs2_rgrp_glops && gl->gl_object) {
137 struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
138 120
121 if (gl->gl_object) {
122 struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
139 rgd->rd_flags &= ~GFS2_RDF_UPTODATE; 123 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
140 } 124 }
141} 125}
@@ -152,48 +136,54 @@ static void inode_go_sync(struct gfs2_glock *gl)
152 struct address_space *metamapping = gl->gl_aspace->i_mapping; 136 struct address_space *metamapping = gl->gl_aspace->i_mapping;
153 int error; 137 int error;
154 138
155 if (gl->gl_state != LM_ST_UNLOCKED)
156 gfs2_pte_inval(gl);
157 if (gl->gl_state != LM_ST_EXCLUSIVE)
158 return;
159
160 if (ip && !S_ISREG(ip->i_inode.i_mode)) 139 if (ip && !S_ISREG(ip->i_inode.i_mode))
161 ip = NULL; 140 ip = NULL;
141 if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
142 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
143 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
144 return;
162 145
163 if (test_bit(GLF_DIRTY, &gl->gl_flags)) { 146 BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);
164 gfs2_log_flush(gl->gl_sbd, gl); 147
165 filemap_fdatawrite(metamapping); 148 gfs2_log_flush(gl->gl_sbd, gl);
166 if (ip) { 149 filemap_fdatawrite(metamapping);
167 struct address_space *mapping = ip->i_inode.i_mapping; 150 if (ip) {
168 filemap_fdatawrite(mapping); 151 struct address_space *mapping = ip->i_inode.i_mapping;
169 error = filemap_fdatawait(mapping); 152 filemap_fdatawrite(mapping);
170 mapping_set_error(mapping, error); 153 error = filemap_fdatawait(mapping);
171 } 154 mapping_set_error(mapping, error);
172 error = filemap_fdatawait(metamapping);
173 mapping_set_error(metamapping, error);
174 clear_bit(GLF_DIRTY, &gl->gl_flags);
175 gfs2_ail_empty_gl(gl);
176 } 155 }
156 error = filemap_fdatawait(metamapping);
157 mapping_set_error(metamapping, error);
158 gfs2_ail_empty_gl(gl);
177} 159}
178 160
179/** 161/**
180 * inode_go_inval - prepare a inode glock to be released 162 * inode_go_inval - prepare a inode glock to be released
181 * @gl: the glock 163 * @gl: the glock
182 * @flags: 164 * @flags:
165 *
166 * Normally we invlidate everything, but if we are moving into
167 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
168 * can keep hold of the metadata, since it won't have changed.
183 * 169 *
184 */ 170 */
185 171
186static void inode_go_inval(struct gfs2_glock *gl, int flags) 172static void inode_go_inval(struct gfs2_glock *gl, int flags)
187{ 173{
188 struct gfs2_inode *ip = gl->gl_object; 174 struct gfs2_inode *ip = gl->gl_object;
189 int meta = (flags & DIO_METADATA);
190 175
191 if (meta) { 176 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
192 gfs2_meta_inval(gl); 177
178 if (flags & DIO_METADATA) {
179 struct address_space *mapping = gl->gl_aspace->i_mapping;
180 truncate_inode_pages(mapping, 0);
193 if (ip) 181 if (ip)
194 set_bit(GIF_INVALID, &ip->i_flags); 182 set_bit(GIF_INVALID, &ip->i_flags);
195 } 183 }
196 184
185 if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
186 gl->gl_sbd->sd_rindex_uptodate = 0;
197 if (ip && S_ISREG(ip->i_inode.i_mode)) 187 if (ip && S_ISREG(ip->i_inode.i_mode))
198 truncate_inode_pages(ip->i_inode.i_mapping, 0); 188 truncate_inode_pages(ip->i_inode.i_mapping, 0);
199} 189}
@@ -395,7 +385,6 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)
395} 385}
396 386
397const struct gfs2_glock_operations gfs2_meta_glops = { 387const struct gfs2_glock_operations gfs2_meta_glops = {
398 .go_xmote_th = meta_go_sync,
399 .go_type = LM_TYPE_META, 388 .go_type = LM_TYPE_META,
400}; 389};
401 390
@@ -410,8 +399,8 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
410}; 399};
411 400
412const struct gfs2_glock_operations gfs2_rgrp_glops = { 401const struct gfs2_glock_operations gfs2_rgrp_glops = {
413 .go_xmote_th = meta_go_sync, 402 .go_xmote_th = rgrp_go_sync,
414 .go_inval = meta_go_inval, 403 .go_inval = rgrp_go_inval,
415 .go_demote_ok = rgrp_go_demote_ok, 404 .go_demote_ok = rgrp_go_demote_ok,
416 .go_lock = rgrp_go_lock, 405 .go_lock = rgrp_go_lock,
417 .go_unlock = rgrp_go_unlock, 406 .go_unlock = rgrp_go_unlock,
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 870d65ae7ae2..8d6f13256b26 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -89,27 +89,6 @@ void gfs2_aspace_put(struct inode *aspace)
89} 89}
90 90
91/** 91/**
92 * gfs2_meta_inval - Invalidate all buffers associated with a glock
93 * @gl: the glock
94 *
95 */
96
97void gfs2_meta_inval(struct gfs2_glock *gl)
98{
99 struct gfs2_sbd *sdp = gl->gl_sbd;
100 struct inode *aspace = gl->gl_aspace;
101 struct address_space *mapping = gl->gl_aspace->i_mapping;
102
103 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
104
105 atomic_inc(&aspace->i_writecount);
106 truncate_inode_pages(mapping, 0);
107 atomic_dec(&aspace->i_writecount);
108
109 gfs2_assert_withdraw(sdp, !mapping->nrpages);
110}
111
112/**
113 * gfs2_meta_sync - Sync all buffers associated with a glock 92 * gfs2_meta_sync - Sync all buffers associated with a glock
114 * @gl: The glock 93 * @gl: The glock
115 * 94 *
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index b1a5f3674d43..de270c2f9b63 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -40,7 +40,6 @@ static inline void gfs2_buffer_copy_tail(struct buffer_head *to_bh,
40struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp); 40struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp);
41void gfs2_aspace_put(struct inode *aspace); 41void gfs2_aspace_put(struct inode *aspace);
42 42
43void gfs2_meta_inval(struct gfs2_glock *gl);
44void gfs2_meta_sync(struct gfs2_glock *gl); 43void gfs2_meta_sync(struct gfs2_glock *gl);
45 44
46struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno); 45struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index 99d726f1c7a6..48ec3d5e29eb 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -355,7 +355,6 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page)
355 if (ret) 355 if (ret)
356 goto out; 356 goto out;
357 357
358 set_bit(GIF_SW_PAGED, &ip->i_flags);
359 ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required); 358 ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required);
360 if (ret || !alloc_required) 359 if (ret || !alloc_required)
361 goto out_unlock; 360 goto out_unlock;
@@ -396,6 +395,8 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page)
396 goto out_unlock_page; 395 goto out_unlock_page;
397 } 396 }
398 ret = gfs2_allocate_page_backing(page); 397 ret = gfs2_allocate_page_backing(page);
398 if (!ret)
399 set_bit(GIF_SW_PAGED, &ip->i_flags);
399 400
400out_unlock_page: 401out_unlock_page:
401 unlock_page(page); 402 unlock_page(page);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 804ca7273a49..51883b3ad89c 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -296,15 +296,15 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector)
296 __free_page(page); 296 __free_page(page);
297 return 0; 297 return 0;
298} 298}
299
299/** 300/**
300 * gfs2_read_sb - Read super block 301 * gfs2_read_sb - Read super block
301 * @sdp: The GFS2 superblock 302 * @sdp: The GFS2 superblock
302 * @gl: the glock for the superblock (assumed to be held)
303 * @silent: Don't print message if mount fails 303 * @silent: Don't print message if mount fails
304 * 304 *
305 */ 305 */
306 306
307static int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent) 307static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
308{ 308{
309 u32 hash_blocks, ind_blocks, leaf_blocks; 309 u32 hash_blocks, ind_blocks, leaf_blocks;
310 u32 tmp_blocks; 310 u32 tmp_blocks;
@@ -524,7 +524,7 @@ static int init_sb(struct gfs2_sbd *sdp, int silent)
524 return ret; 524 return ret;
525 } 525 }
526 526
527 ret = gfs2_read_sb(sdp, sb_gh.gh_gl, silent); 527 ret = gfs2_read_sb(sdp, silent);
528 if (ret) { 528 if (ret) {
529 fs_err(sdp, "can't read superblock: %d\n", ret); 529 fs_err(sdp, "can't read superblock: %d\n", ret);
530 goto out; 530 goto out;