aboutsummaryrefslogtreecommitdiffstats
path: root/fs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/inode.c')
-rw-r--r--fs/inode.c270
1 files changed, 187 insertions, 83 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 0487ddba1397..bd48e5e6d3e8 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -108,84 +108,102 @@ static void wake_up_inode(struct inode *inode)
108 wake_up_bit(&inode->i_state, __I_LOCK); 108 wake_up_bit(&inode->i_state, __I_LOCK);
109} 109}
110 110
111static struct inode *alloc_inode(struct super_block *sb) 111/**
112 * inode_init_always - perform inode structure intialisation
113 * @sb - superblock inode belongs to.
114 * @inode - inode to initialise
115 *
116 * These are initializations that need to be done on every inode
117 * allocation as the fields are not initialised by slab allocation.
118 */
119struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
112{ 120{
113 static const struct address_space_operations empty_aops; 121 static const struct address_space_operations empty_aops;
114 static struct inode_operations empty_iops; 122 static struct inode_operations empty_iops;
115 static const struct file_operations empty_fops; 123 static const struct file_operations empty_fops;
116 struct inode *inode;
117 124
118 if (sb->s_op->alloc_inode) 125 struct address_space * const mapping = &inode->i_data;
119 inode = sb->s_op->alloc_inode(sb); 126
120 else 127 inode->i_sb = sb;
121 inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL); 128 inode->i_blkbits = sb->s_blocksize_bits;
122 129 inode->i_flags = 0;
123 if (inode) { 130 atomic_set(&inode->i_count, 1);
124 struct address_space * const mapping = &inode->i_data; 131 inode->i_op = &empty_iops;
125 132 inode->i_fop = &empty_fops;
126 inode->i_sb = sb; 133 inode->i_nlink = 1;
127 inode->i_blkbits = sb->s_blocksize_bits; 134 inode->i_uid = 0;
128 inode->i_flags = 0; 135 inode->i_gid = 0;
129 atomic_set(&inode->i_count, 1); 136 atomic_set(&inode->i_writecount, 0);
130 inode->i_op = &empty_iops; 137 inode->i_size = 0;
131 inode->i_fop = &empty_fops; 138 inode->i_blocks = 0;
132 inode->i_nlink = 1; 139 inode->i_bytes = 0;
133 atomic_set(&inode->i_writecount, 0); 140 inode->i_generation = 0;
134 inode->i_size = 0;
135 inode->i_blocks = 0;
136 inode->i_bytes = 0;
137 inode->i_generation = 0;
138#ifdef CONFIG_QUOTA 141#ifdef CONFIG_QUOTA
139 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); 142 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
140#endif 143#endif
141 inode->i_pipe = NULL; 144 inode->i_pipe = NULL;
142 inode->i_bdev = NULL; 145 inode->i_bdev = NULL;
143 inode->i_cdev = NULL; 146 inode->i_cdev = NULL;
144 inode->i_rdev = 0; 147 inode->i_rdev = 0;
145 inode->dirtied_when = 0; 148 inode->dirtied_when = 0;
146 if (security_inode_alloc(inode)) { 149 if (security_inode_alloc(inode)) {
147 if (inode->i_sb->s_op->destroy_inode) 150 if (inode->i_sb->s_op->destroy_inode)
148 inode->i_sb->s_op->destroy_inode(inode); 151 inode->i_sb->s_op->destroy_inode(inode);
149 else 152 else
150 kmem_cache_free(inode_cachep, (inode)); 153 kmem_cache_free(inode_cachep, (inode));
151 return NULL; 154 return NULL;
152 } 155 }
153 156
154 spin_lock_init(&inode->i_lock); 157 spin_lock_init(&inode->i_lock);
155 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 158 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
156 159
157 mutex_init(&inode->i_mutex); 160 mutex_init(&inode->i_mutex);
158 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); 161 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
159 162
160 init_rwsem(&inode->i_alloc_sem); 163 init_rwsem(&inode->i_alloc_sem);
161 lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key); 164 lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key);
162 165
163 mapping->a_ops = &empty_aops; 166 mapping->a_ops = &empty_aops;
164 mapping->host = inode; 167 mapping->host = inode;
165 mapping->flags = 0; 168 mapping->flags = 0;
166 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE); 169 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE);
167 mapping->assoc_mapping = NULL; 170 mapping->assoc_mapping = NULL;
168 mapping->backing_dev_info = &default_backing_dev_info; 171 mapping->backing_dev_info = &default_backing_dev_info;
169 mapping->writeback_index = 0; 172 mapping->writeback_index = 0;
170 173
171 /* 174 /*
172 * If the block_device provides a backing_dev_info for client 175 * If the block_device provides a backing_dev_info for client
173 * inodes then use that. Otherwise the inode share the bdev's 176 * inodes then use that. Otherwise the inode share the bdev's
174 * backing_dev_info. 177 * backing_dev_info.
175 */ 178 */
176 if (sb->s_bdev) { 179 if (sb->s_bdev) {
177 struct backing_dev_info *bdi; 180 struct backing_dev_info *bdi;
178 181
179 bdi = sb->s_bdev->bd_inode_backing_dev_info; 182 bdi = sb->s_bdev->bd_inode_backing_dev_info;
180 if (!bdi) 183 if (!bdi)
181 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; 184 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
182 mapping->backing_dev_info = bdi; 185 mapping->backing_dev_info = bdi;
183 }
184 inode->i_private = NULL;
185 inode->i_mapping = mapping;
186 } 186 }
187 inode->i_private = NULL;
188 inode->i_mapping = mapping;
189
187 return inode; 190 return inode;
188} 191}
192EXPORT_SYMBOL(inode_init_always);
193
194static struct inode *alloc_inode(struct super_block *sb)
195{
196 struct inode *inode;
197
198 if (sb->s_op->alloc_inode)
199 inode = sb->s_op->alloc_inode(sb);
200 else
201 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
202
203 if (inode)
204 return inode_init_always(sb, inode);
205 return NULL;
206}
189 207
190void destroy_inode(struct inode *inode) 208void destroy_inode(struct inode *inode)
191{ 209{
@@ -196,6 +214,7 @@ void destroy_inode(struct inode *inode)
196 else 214 else
197 kmem_cache_free(inode_cachep, (inode)); 215 kmem_cache_free(inode_cachep, (inode));
198} 216}
217EXPORT_SYMBOL(destroy_inode);
199 218
200 219
201/* 220/*
@@ -534,6 +553,49 @@ repeat:
534 return node ? inode : NULL; 553 return node ? inode : NULL;
535} 554}
536 555
556static unsigned long hash(struct super_block *sb, unsigned long hashval)
557{
558 unsigned long tmp;
559
560 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
561 L1_CACHE_BYTES;
562 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
563 return tmp & I_HASHMASK;
564}
565
566static inline void
567__inode_add_to_lists(struct super_block *sb, struct hlist_head *head,
568 struct inode *inode)
569{
570 inodes_stat.nr_inodes++;
571 list_add(&inode->i_list, &inode_in_use);
572 list_add(&inode->i_sb_list, &sb->s_inodes);
573 if (head)
574 hlist_add_head(&inode->i_hash, head);
575}
576
577/**
578 * inode_add_to_lists - add a new inode to relevant lists
579 * @sb - superblock inode belongs to.
580 * @inode - inode to mark in use
581 *
582 * When an inode is allocated it needs to be accounted for, added to the in use
583 * list, the owning superblock and the inode hash. This needs to be done under
584 * the inode_lock, so export a function to do this rather than the inode lock
585 * itself. We calculate the hash list to add to here so it is all internal
586 * which requires the caller to have already set up the inode number in the
587 * inode to add.
588 */
589void inode_add_to_lists(struct super_block *sb, struct inode *inode)
590{
591 struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino);
592
593 spin_lock(&inode_lock);
594 __inode_add_to_lists(sb, head, inode);
595 spin_unlock(&inode_lock);
596}
597EXPORT_SYMBOL_GPL(inode_add_to_lists);
598
537/** 599/**
538 * new_inode - obtain an inode 600 * new_inode - obtain an inode
539 * @sb: superblock 601 * @sb: superblock
@@ -561,9 +623,7 @@ struct inode *new_inode(struct super_block *sb)
561 inode = alloc_inode(sb); 623 inode = alloc_inode(sb);
562 if (inode) { 624 if (inode) {
563 spin_lock(&inode_lock); 625 spin_lock(&inode_lock);
564 inodes_stat.nr_inodes++; 626 __inode_add_to_lists(sb, NULL, inode);
565 list_add(&inode->i_list, &inode_in_use);
566 list_add(&inode->i_sb_list, &sb->s_inodes);
567 inode->i_ino = ++last_ino; 627 inode->i_ino = ++last_ino;
568 inode->i_state = 0; 628 inode->i_state = 0;
569 spin_unlock(&inode_lock); 629 spin_unlock(&inode_lock);
@@ -622,10 +682,7 @@ static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *h
622 if (set(inode, data)) 682 if (set(inode, data))
623 goto set_failed; 683 goto set_failed;
624 684
625 inodes_stat.nr_inodes++; 685 __inode_add_to_lists(sb, head, inode);
626 list_add(&inode->i_list, &inode_in_use);
627 list_add(&inode->i_sb_list, &sb->s_inodes);
628 hlist_add_head(&inode->i_hash, head);
629 inode->i_state = I_LOCK|I_NEW; 686 inode->i_state = I_LOCK|I_NEW;
630 spin_unlock(&inode_lock); 687 spin_unlock(&inode_lock);
631 688
@@ -671,10 +728,7 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he
671 old = find_inode_fast(sb, head, ino); 728 old = find_inode_fast(sb, head, ino);
672 if (!old) { 729 if (!old) {
673 inode->i_ino = ino; 730 inode->i_ino = ino;
674 inodes_stat.nr_inodes++; 731 __inode_add_to_lists(sb, head, inode);
675 list_add(&inode->i_list, &inode_in_use);
676 list_add(&inode->i_sb_list, &sb->s_inodes);
677 hlist_add_head(&inode->i_hash, head);
678 inode->i_state = I_LOCK|I_NEW; 732 inode->i_state = I_LOCK|I_NEW;
679 spin_unlock(&inode_lock); 733 spin_unlock(&inode_lock);
680 734
@@ -698,16 +752,6 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he
698 return inode; 752 return inode;
699} 753}
700 754
701static unsigned long hash(struct super_block *sb, unsigned long hashval)
702{
703 unsigned long tmp;
704
705 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
706 L1_CACHE_BYTES;
707 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
708 return tmp & I_HASHMASK;
709}
710
711/** 755/**
712 * iunique - get a unique inode number 756 * iunique - get a unique inode number
713 * @sb: superblock 757 * @sb: superblock
@@ -990,6 +1034,65 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino)
990 1034
991EXPORT_SYMBOL(iget_locked); 1035EXPORT_SYMBOL(iget_locked);
992 1036
1037int insert_inode_locked(struct inode *inode)
1038{
1039 struct super_block *sb = inode->i_sb;
1040 ino_t ino = inode->i_ino;
1041 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1042 struct inode *old;
1043
1044 inode->i_state |= I_LOCK|I_NEW;
1045 while (1) {
1046 spin_lock(&inode_lock);
1047 old = find_inode_fast(sb, head, ino);
1048 if (likely(!old)) {
1049 hlist_add_head(&inode->i_hash, head);
1050 spin_unlock(&inode_lock);
1051 return 0;
1052 }
1053 __iget(old);
1054 spin_unlock(&inode_lock);
1055 wait_on_inode(old);
1056 if (unlikely(!hlist_unhashed(&old->i_hash))) {
1057 iput(old);
1058 return -EBUSY;
1059 }
1060 iput(old);
1061 }
1062}
1063
1064EXPORT_SYMBOL(insert_inode_locked);
1065
1066int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1067 int (*test)(struct inode *, void *), void *data)
1068{
1069 struct super_block *sb = inode->i_sb;
1070 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1071 struct inode *old;
1072
1073 inode->i_state |= I_LOCK|I_NEW;
1074
1075 while (1) {
1076 spin_lock(&inode_lock);
1077 old = find_inode(sb, head, test, data);
1078 if (likely(!old)) {
1079 hlist_add_head(&inode->i_hash, head);
1080 spin_unlock(&inode_lock);
1081 return 0;
1082 }
1083 __iget(old);
1084 spin_unlock(&inode_lock);
1085 wait_on_inode(old);
1086 if (unlikely(!hlist_unhashed(&old->i_hash))) {
1087 iput(old);
1088 return -EBUSY;
1089 }
1090 iput(old);
1091 }
1092}
1093
1094EXPORT_SYMBOL(insert_inode_locked4);
1095
993/** 1096/**
994 * __insert_inode_hash - hash an inode 1097 * __insert_inode_hash - hash an inode
995 * @inode: unhashed inode 1098 * @inode: unhashed inode
@@ -1292,6 +1395,7 @@ int inode_wait(void *word)
1292 schedule(); 1395 schedule();
1293 return 0; 1396 return 0;
1294} 1397}
1398EXPORT_SYMBOL(inode_wait);
1295 1399
1296/* 1400/*
1297 * If we try to find an inode in the inode hash while it is being 1401 * If we try to find an inode in the inode hash while it is being