aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2009-12-08 07:12:13 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2010-03-01 09:07:37 -0500
commit009d851837ab26cab18adda6169a813f70b0b21b (patch)
tree073bc05e3a8c527bf9ce3332e2c2f6694484984d /fs/gfs2/glock.c
parent30ff056c42c665b9ea535d8515890857ae382540 (diff)
GFS2: Metadata address space clean up
Since the start of GFS2, an "extra" inode has been used to store the metadata belonging to each inode. The only reason for using this inode was to have an extra address space, the other fields were unused. This means that the memory usage was rather inefficient. The reason for keeping each inode's metadata in a separate address space is that when glocks are requested on remote nodes, we need to be able to efficiently locate the data and metadata which relating to that glock (inode) in order to sync or sync and invalidate it (depending on the remotely requested lock mode). This patch adds a new type of glock, which has in addition to its normal fields, has an address space. This applies to all inode and rgrp glocks (but to no other glock types which remain as before). As a result, we no longer need to have the second inode. This results in three major improvements: 1. A saving of approx 25% of memory used in caching inodes 2. A removal of the circular dependency between inodes and glocks 3. No confusion between "normal" and "metadata" inodes in super.c Although the first of these is the more immediately apparent, the second is just as important as it now enables a number of clean ups at umount time. Those will be the subject of future patches. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c40
1 files changed, 21 insertions, 19 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f42663325931..dfb10a4d467e 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -154,12 +154,14 @@ static unsigned int gl_hash(const struct gfs2_sbd *sdp,
154static void glock_free(struct gfs2_glock *gl) 154static void glock_free(struct gfs2_glock *gl)
155{ 155{
156 struct gfs2_sbd *sdp = gl->gl_sbd; 156 struct gfs2_sbd *sdp = gl->gl_sbd;
157 struct inode *aspace = gl->gl_aspace; 157 struct address_space *mapping = gfs2_glock2aspace(gl);
158 struct kmem_cache *cachep = gfs2_glock_cachep;
158 159
159 if (aspace) 160 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
160 gfs2_aspace_put(aspace);
161 trace_gfs2_glock_put(gl); 161 trace_gfs2_glock_put(gl);
162 sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl); 162 if (mapping)
163 cachep = gfs2_glock_aspace_cachep;
164 sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
163} 165}
164 166
165/** 167/**
@@ -750,10 +752,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
750 const struct gfs2_glock_operations *glops, int create, 752 const struct gfs2_glock_operations *glops, int create,
751 struct gfs2_glock **glp) 753 struct gfs2_glock **glp)
752{ 754{
755 struct super_block *s = sdp->sd_vfs;
753 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type }; 756 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
754 struct gfs2_glock *gl, *tmp; 757 struct gfs2_glock *gl, *tmp;
755 unsigned int hash = gl_hash(sdp, &name); 758 unsigned int hash = gl_hash(sdp, &name);
756 int error; 759 struct address_space *mapping;
757 760
758 read_lock(gl_lock_addr(hash)); 761 read_lock(gl_lock_addr(hash));
759 gl = search_bucket(hash, sdp, &name); 762 gl = search_bucket(hash, sdp, &name);
@@ -765,7 +768,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
765 if (!create) 768 if (!create)
766 return -ENOENT; 769 return -ENOENT;
767 770
768 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL); 771 if (glops->go_flags & GLOF_ASPACE)
772 gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL);
773 else
774 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
769 if (!gl) 775 if (!gl)
770 return -ENOMEM; 776 return -ENOMEM;
771 777
@@ -784,18 +790,18 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
784 gl->gl_tchange = jiffies; 790 gl->gl_tchange = jiffies;
785 gl->gl_object = NULL; 791 gl->gl_object = NULL;
786 gl->gl_sbd = sdp; 792 gl->gl_sbd = sdp;
787 gl->gl_aspace = NULL;
788 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 793 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
789 INIT_WORK(&gl->gl_delete, delete_work_func); 794 INIT_WORK(&gl->gl_delete, delete_work_func);
790 795
791 /* If this glock protects actual on-disk data or metadata blocks, 796 mapping = gfs2_glock2aspace(gl);
792 create a VFS inode to manage the pages/buffers holding them. */ 797 if (mapping) {
793 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) { 798 mapping->a_ops = &gfs2_meta_aops;
794 gl->gl_aspace = gfs2_aspace_get(sdp); 799 mapping->host = s->s_bdev->bd_inode;
795 if (!gl->gl_aspace) { 800 mapping->flags = 0;
796 error = -ENOMEM; 801 mapping_set_gfp_mask(mapping, GFP_NOFS);
797 goto fail; 802 mapping->assoc_mapping = NULL;
798 } 803 mapping->backing_dev_info = s->s_bdi;
804 mapping->writeback_index = 0;
799 } 805 }
800 806
801 write_lock(gl_lock_addr(hash)); 807 write_lock(gl_lock_addr(hash));
@@ -812,10 +818,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
812 *glp = gl; 818 *glp = gl;
813 819
814 return 0; 820 return 0;
815
816fail:
817 kmem_cache_free(gfs2_glock_cachep, gl);
818 return error;
819} 821}
820 822
821/** 823/**