aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glops.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2013-12-06 11:19:54 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2014-01-03 05:01:50 -0500
commit70d4ee94b370c5ef54d0870600f16bd92d18013c (patch)
treeae741f94eba92bdb6e2882145de03be662557f66 /fs/gfs2/glops.c
parent7005c3e4ae42858dbb695b2d03d340af799b1f1b (diff)
GFS2: Use only a single address space for rgrps
Prior to this patch, GFS2 had one address space for each rgrp, stored in the glock. This patch changes them to use a single address space in the super block. This therefore saves (sizeof(struct address_space) * nr_of_rgrps) bytes of memory and for large filesystems, that can be significant. It would be nice to be able to do something similar and merge the inode metadata address space into the same global address space. However, that is rather more complicated as the on-disk location doesn't have a 1:1 mapping with the inodes in general. So while it could be done, it will be a more complicated operation as it requires changing a lot more code paths. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glops.c')
-rw-r--r--fs/gfs2/glops.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 1b192c8d404d..b792dbcc83f6 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -133,7 +133,8 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
133 133
134static void rgrp_go_sync(struct gfs2_glock *gl) 134static void rgrp_go_sync(struct gfs2_glock *gl)
135{ 135{
136 struct address_space *metamapping = gfs2_glock2aspace(gl); 136 struct gfs2_sbd *sdp = gl->gl_sbd;
137 struct address_space *mapping = &sdp->sd_aspace;
137 struct gfs2_rgrpd *rgd; 138 struct gfs2_rgrpd *rgd;
138 int error; 139 int error;
139 140
@@ -141,10 +142,10 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
141 return; 142 return;
142 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 143 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
143 144
144 gfs2_log_flush(gl->gl_sbd, gl); 145 gfs2_log_flush(sdp, gl);
145 filemap_fdatawrite_range(metamapping, gl->gl_vm.start, gl->gl_vm.end); 146 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
146 error = filemap_fdatawait_range(metamapping, gl->gl_vm.start, gl->gl_vm.end); 147 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
147 mapping_set_error(metamapping, error); 148 mapping_set_error(mapping, error);
148 gfs2_ail_empty_gl(gl); 149 gfs2_ail_empty_gl(gl);
149 150
150 spin_lock(&gl->gl_spin); 151 spin_lock(&gl->gl_spin);
@@ -166,10 +167,11 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
166 167
167static void rgrp_go_inval(struct gfs2_glock *gl, int flags) 168static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
168{ 169{
169 struct address_space *mapping = gfs2_glock2aspace(gl); 170 struct gfs2_sbd *sdp = gl->gl_sbd;
171 struct address_space *mapping = &sdp->sd_aspace;
170 172
171 WARN_ON_ONCE(!(flags & DIO_METADATA)); 173 WARN_ON_ONCE(!(flags & DIO_METADATA));
172 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); 174 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
173 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); 175 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
174 176
175 if (gl->gl_object) { 177 if (gl->gl_object) {
@@ -558,7 +560,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
558 .go_unlock = gfs2_rgrp_go_unlock, 560 .go_unlock = gfs2_rgrp_go_unlock,
559 .go_dump = gfs2_rgrp_dump, 561 .go_dump = gfs2_rgrp_dump,
560 .go_type = LM_TYPE_RGRP, 562 .go_type = LM_TYPE_RGRP,
561 .go_flags = GLOF_ASPACE | GLOF_LVB, 563 .go_flags = GLOF_LVB,
562}; 564};
563 565
564const struct gfs2_glock_operations gfs2_trans_glops = { 566const struct gfs2_glock_operations gfs2_trans_glops = {