aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDavid Chinner <david@fromorbit.com>2008-10-30 02:32:23 -0400
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-10-30 02:32:23 -0400
commit2cb1599f9b2ecdd7a9e59feeee647eb258966839 (patch)
treea54cb8c09d209e529d399874eb0be7bc365c5212 /fs
parent94b97e39b0c983e86f0028c456dcf213abc722a0 (diff)
Inode: Allow external initialisers
To allow XFS to combine the XFS and linux inodes into a single structure, we need to drive inode lookup from the XFS inode cache, not the generic inode cache. This means that we need initialise a struct inode from a context outside alloc_inode() as it is no longer used by XFS. Factor and export the struct inode initialisation code from alloc_inode() to inode_init_always() as a counterpart to inode_init_once(). i.e. we have to call this init function for each inode instantiation (always), as opposed inode_init_once() which is only called on slab object instantiation (once). Signed-off-by: Dave Chinner <david@fromorbit.com> Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/inode.c140
1 files changed, 78 insertions, 62 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 0487ddba1397..e7ee99907d60 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -108,84 +108,100 @@ static void wake_up_inode(struct inode *inode)
108 wake_up_bit(&inode->i_state, __I_LOCK); 108 wake_up_bit(&inode->i_state, __I_LOCK);
109} 109}
110 110
111static struct inode *alloc_inode(struct super_block *sb) 111/**
112 * inode_init_always - perform inode structure intialisation
113 * @sb - superblock inode belongs to.
114 * @inode - inode to initialise
115 *
116 * These are initializations that need to be done on every inode
117 * allocation as the fields are not initialised by slab allocation.
118 */
119struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
112{ 120{
113 static const struct address_space_operations empty_aops; 121 static const struct address_space_operations empty_aops;
114 static struct inode_operations empty_iops; 122 static struct inode_operations empty_iops;
115 static const struct file_operations empty_fops; 123 static const struct file_operations empty_fops;
116 struct inode *inode;
117
118 if (sb->s_op->alloc_inode)
119 inode = sb->s_op->alloc_inode(sb);
120 else
121 inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL);
122 124
123 if (inode) { 125 struct address_space * const mapping = &inode->i_data;
124 struct address_space * const mapping = &inode->i_data; 126
125 127 inode->i_sb = sb;
126 inode->i_sb = sb; 128 inode->i_blkbits = sb->s_blocksize_bits;
127 inode->i_blkbits = sb->s_blocksize_bits; 129 inode->i_flags = 0;
128 inode->i_flags = 0; 130 atomic_set(&inode->i_count, 1);
129 atomic_set(&inode->i_count, 1); 131 inode->i_op = &empty_iops;
130 inode->i_op = &empty_iops; 132 inode->i_fop = &empty_fops;
131 inode->i_fop = &empty_fops; 133 inode->i_nlink = 1;
132 inode->i_nlink = 1; 134 atomic_set(&inode->i_writecount, 0);
133 atomic_set(&inode->i_writecount, 0); 135 inode->i_size = 0;
134 inode->i_size = 0; 136 inode->i_blocks = 0;
135 inode->i_blocks = 0; 137 inode->i_bytes = 0;
136 inode->i_bytes = 0; 138 inode->i_generation = 0;
137 inode->i_generation = 0;
138#ifdef CONFIG_QUOTA 139#ifdef CONFIG_QUOTA
139 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); 140 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
140#endif 141#endif
141 inode->i_pipe = NULL; 142 inode->i_pipe = NULL;
142 inode->i_bdev = NULL; 143 inode->i_bdev = NULL;
143 inode->i_cdev = NULL; 144 inode->i_cdev = NULL;
144 inode->i_rdev = 0; 145 inode->i_rdev = 0;
145 inode->dirtied_when = 0; 146 inode->dirtied_when = 0;
146 if (security_inode_alloc(inode)) { 147 if (security_inode_alloc(inode)) {
147 if (inode->i_sb->s_op->destroy_inode) 148 if (inode->i_sb->s_op->destroy_inode)
148 inode->i_sb->s_op->destroy_inode(inode); 149 inode->i_sb->s_op->destroy_inode(inode);
149 else 150 else
150 kmem_cache_free(inode_cachep, (inode)); 151 kmem_cache_free(inode_cachep, (inode));
151 return NULL; 152 return NULL;
152 } 153 }
153 154
154 spin_lock_init(&inode->i_lock); 155 spin_lock_init(&inode->i_lock);
155 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 156 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
156 157
157 mutex_init(&inode->i_mutex); 158 mutex_init(&inode->i_mutex);
158 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); 159 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
159 160
160 init_rwsem(&inode->i_alloc_sem); 161 init_rwsem(&inode->i_alloc_sem);
161 lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key); 162 lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key);
162 163
163 mapping->a_ops = &empty_aops; 164 mapping->a_ops = &empty_aops;
164 mapping->host = inode; 165 mapping->host = inode;
165 mapping->flags = 0; 166 mapping->flags = 0;
166 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE); 167 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE);
167 mapping->assoc_mapping = NULL; 168 mapping->assoc_mapping = NULL;
168 mapping->backing_dev_info = &default_backing_dev_info; 169 mapping->backing_dev_info = &default_backing_dev_info;
169 mapping->writeback_index = 0; 170 mapping->writeback_index = 0;
170 171
171 /* 172 /*
172 * If the block_device provides a backing_dev_info for client 173 * If the block_device provides a backing_dev_info for client
173 * inodes then use that. Otherwise the inode share the bdev's 174 * inodes then use that. Otherwise the inode share the bdev's
174 * backing_dev_info. 175 * backing_dev_info.
175 */ 176 */
176 if (sb->s_bdev) { 177 if (sb->s_bdev) {
177 struct backing_dev_info *bdi; 178 struct backing_dev_info *bdi;
178 179
179 bdi = sb->s_bdev->bd_inode_backing_dev_info; 180 bdi = sb->s_bdev->bd_inode_backing_dev_info;
180 if (!bdi) 181 if (!bdi)
181 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; 182 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
182 mapping->backing_dev_info = bdi; 183 mapping->backing_dev_info = bdi;
183 }
184 inode->i_private = NULL;
185 inode->i_mapping = mapping;
186 } 184 }
185 inode->i_private = NULL;
186 inode->i_mapping = mapping;
187
187 return inode; 188 return inode;
188} 189}
190EXPORT_SYMBOL(inode_init_always);
191
192static struct inode *alloc_inode(struct super_block *sb)
193{
194 struct inode *inode;
195
196 if (sb->s_op->alloc_inode)
197 inode = sb->s_op->alloc_inode(sb);
198 else
199 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
200
201 if (inode)
202 return inode_init_always(sb, inode);
203 return NULL;
204}
189 205
190void destroy_inode(struct inode *inode) 206void destroy_inode(struct inode *inode)
191{ 207{