diff options
Diffstat (limited to 'fs/inode.c')
-rw-r--r-- | fs/inode.c | 208 |
1 files changed, 125 insertions, 83 deletions
diff --git a/fs/inode.c b/fs/inode.c index 0487ddba1397..f84ba338fafd 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -108,84 +108,100 @@ static void wake_up_inode(struct inode *inode) | |||
108 | wake_up_bit(&inode->i_state, __I_LOCK); | 108 | wake_up_bit(&inode->i_state, __I_LOCK); |
109 | } | 109 | } |
110 | 110 | ||
111 | static struct inode *alloc_inode(struct super_block *sb) | 111 | /** |
112 | * inode_init_always - perform inode structure intialisation | ||
113 | * @sb - superblock inode belongs to. | ||
114 | * @inode - inode to initialise | ||
115 | * | ||
116 | * These are initializations that need to be done on every inode | ||
117 | * allocation as the fields are not initialised by slab allocation. | ||
118 | */ | ||
119 | struct inode *inode_init_always(struct super_block *sb, struct inode *inode) | ||
112 | { | 120 | { |
113 | static const struct address_space_operations empty_aops; | 121 | static const struct address_space_operations empty_aops; |
114 | static struct inode_operations empty_iops; | 122 | static struct inode_operations empty_iops; |
115 | static const struct file_operations empty_fops; | 123 | static const struct file_operations empty_fops; |
116 | struct inode *inode; | ||
117 | |||
118 | if (sb->s_op->alloc_inode) | ||
119 | inode = sb->s_op->alloc_inode(sb); | ||
120 | else | ||
121 | inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL); | ||
122 | 124 | ||
123 | if (inode) { | 125 | struct address_space * const mapping = &inode->i_data; |
124 | struct address_space * const mapping = &inode->i_data; | 126 | |
125 | 127 | inode->i_sb = sb; | |
126 | inode->i_sb = sb; | 128 | inode->i_blkbits = sb->s_blocksize_bits; |
127 | inode->i_blkbits = sb->s_blocksize_bits; | 129 | inode->i_flags = 0; |
128 | inode->i_flags = 0; | 130 | atomic_set(&inode->i_count, 1); |
129 | atomic_set(&inode->i_count, 1); | 131 | inode->i_op = &empty_iops; |
130 | inode->i_op = &empty_iops; | 132 | inode->i_fop = &empty_fops; |
131 | inode->i_fop = &empty_fops; | 133 | inode->i_nlink = 1; |
132 | inode->i_nlink = 1; | 134 | atomic_set(&inode->i_writecount, 0); |
133 | atomic_set(&inode->i_writecount, 0); | 135 | inode->i_size = 0; |
134 | inode->i_size = 0; | 136 | inode->i_blocks = 0; |
135 | inode->i_blocks = 0; | 137 | inode->i_bytes = 0; |
136 | inode->i_bytes = 0; | 138 | inode->i_generation = 0; |
137 | inode->i_generation = 0; | ||
138 | #ifdef CONFIG_QUOTA | 139 | #ifdef CONFIG_QUOTA |
139 | memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); | 140 | memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); |
140 | #endif | 141 | #endif |
141 | inode->i_pipe = NULL; | 142 | inode->i_pipe = NULL; |
142 | inode->i_bdev = NULL; | 143 | inode->i_bdev = NULL; |
143 | inode->i_cdev = NULL; | 144 | inode->i_cdev = NULL; |
144 | inode->i_rdev = 0; | 145 | inode->i_rdev = 0; |
145 | inode->dirtied_when = 0; | 146 | inode->dirtied_when = 0; |
146 | if (security_inode_alloc(inode)) { | 147 | if (security_inode_alloc(inode)) { |
147 | if (inode->i_sb->s_op->destroy_inode) | 148 | if (inode->i_sb->s_op->destroy_inode) |
148 | inode->i_sb->s_op->destroy_inode(inode); | 149 | inode->i_sb->s_op->destroy_inode(inode); |
149 | else | 150 | else |
150 | kmem_cache_free(inode_cachep, (inode)); | 151 | kmem_cache_free(inode_cachep, (inode)); |
151 | return NULL; | 152 | return NULL; |
152 | } | 153 | } |
153 | 154 | ||
154 | spin_lock_init(&inode->i_lock); | 155 | spin_lock_init(&inode->i_lock); |
155 | lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); | 156 | lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); |
156 | 157 | ||
157 | mutex_init(&inode->i_mutex); | 158 | mutex_init(&inode->i_mutex); |
158 | lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); | 159 | lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); |
159 | 160 | ||
160 | init_rwsem(&inode->i_alloc_sem); | 161 | init_rwsem(&inode->i_alloc_sem); |
161 | lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key); | 162 | lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key); |
162 | 163 | ||
163 | mapping->a_ops = &empty_aops; | 164 | mapping->a_ops = &empty_aops; |
164 | mapping->host = inode; | 165 | mapping->host = inode; |
165 | mapping->flags = 0; | 166 | mapping->flags = 0; |
166 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE); | 167 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE); |
167 | mapping->assoc_mapping = NULL; | 168 | mapping->assoc_mapping = NULL; |
168 | mapping->backing_dev_info = &default_backing_dev_info; | 169 | mapping->backing_dev_info = &default_backing_dev_info; |
169 | mapping->writeback_index = 0; | 170 | mapping->writeback_index = 0; |
170 | 171 | ||
171 | /* | 172 | /* |
172 | * If the block_device provides a backing_dev_info for client | 173 | * If the block_device provides a backing_dev_info for client |
173 | * inodes then use that. Otherwise the inode share the bdev's | 174 | * inodes then use that. Otherwise the inode share the bdev's |
174 | * backing_dev_info. | 175 | * backing_dev_info. |
175 | */ | 176 | */ |
176 | if (sb->s_bdev) { | 177 | if (sb->s_bdev) { |
177 | struct backing_dev_info *bdi; | 178 | struct backing_dev_info *bdi; |
178 | 179 | ||
179 | bdi = sb->s_bdev->bd_inode_backing_dev_info; | 180 | bdi = sb->s_bdev->bd_inode_backing_dev_info; |
180 | if (!bdi) | 181 | if (!bdi) |
181 | bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; | 182 | bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; |
182 | mapping->backing_dev_info = bdi; | 183 | mapping->backing_dev_info = bdi; |
183 | } | ||
184 | inode->i_private = NULL; | ||
185 | inode->i_mapping = mapping; | ||
186 | } | 184 | } |
185 | inode->i_private = NULL; | ||
186 | inode->i_mapping = mapping; | ||
187 | |||
187 | return inode; | 188 | return inode; |
188 | } | 189 | } |
190 | EXPORT_SYMBOL(inode_init_always); | ||
191 | |||
192 | static struct inode *alloc_inode(struct super_block *sb) | ||
193 | { | ||
194 | struct inode *inode; | ||
195 | |||
196 | if (sb->s_op->alloc_inode) | ||
197 | inode = sb->s_op->alloc_inode(sb); | ||
198 | else | ||
199 | inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); | ||
200 | |||
201 | if (inode) | ||
202 | return inode_init_always(sb, inode); | ||
203 | return NULL; | ||
204 | } | ||
189 | 205 | ||
190 | void destroy_inode(struct inode *inode) | 206 | void destroy_inode(struct inode *inode) |
191 | { | 207 | { |
@@ -196,6 +212,7 @@ void destroy_inode(struct inode *inode) | |||
196 | else | 212 | else |
197 | kmem_cache_free(inode_cachep, (inode)); | 213 | kmem_cache_free(inode_cachep, (inode)); |
198 | } | 214 | } |
215 | EXPORT_SYMBOL(destroy_inode); | ||
199 | 216 | ||
200 | 217 | ||
201 | /* | 218 | /* |
@@ -534,6 +551,49 @@ repeat: | |||
534 | return node ? inode : NULL; | 551 | return node ? inode : NULL; |
535 | } | 552 | } |
536 | 553 | ||
554 | static unsigned long hash(struct super_block *sb, unsigned long hashval) | ||
555 | { | ||
556 | unsigned long tmp; | ||
557 | |||
558 | tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / | ||
559 | L1_CACHE_BYTES; | ||
560 | tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS); | ||
561 | return tmp & I_HASHMASK; | ||
562 | } | ||
563 | |||
564 | static inline void | ||
565 | __inode_add_to_lists(struct super_block *sb, struct hlist_head *head, | ||
566 | struct inode *inode) | ||
567 | { | ||
568 | inodes_stat.nr_inodes++; | ||
569 | list_add(&inode->i_list, &inode_in_use); | ||
570 | list_add(&inode->i_sb_list, &sb->s_inodes); | ||
571 | if (head) | ||
572 | hlist_add_head(&inode->i_hash, head); | ||
573 | } | ||
574 | |||
575 | /** | ||
576 | * inode_add_to_lists - add a new inode to relevant lists | ||
577 | * @sb - superblock inode belongs to. | ||
578 | * @inode - inode to mark in use | ||
579 | * | ||
580 | * When an inode is allocated it needs to be accounted for, added to the in use | ||
581 | * list, the owning superblock and the inode hash. This needs to be done under | ||
582 | * the inode_lock, so export a function to do this rather than the inode lock | ||
583 | * itself. We calculate the hash list to add to here so it is all internal | ||
584 | * which requires the caller to have already set up the inode number in the | ||
585 | * inode to add. | ||
586 | */ | ||
587 | void inode_add_to_lists(struct super_block *sb, struct inode *inode) | ||
588 | { | ||
589 | struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino); | ||
590 | |||
591 | spin_lock(&inode_lock); | ||
592 | __inode_add_to_lists(sb, head, inode); | ||
593 | spin_unlock(&inode_lock); | ||
594 | } | ||
595 | EXPORT_SYMBOL_GPL(inode_add_to_lists); | ||
596 | |||
537 | /** | 597 | /** |
538 | * new_inode - obtain an inode | 598 | * new_inode - obtain an inode |
539 | * @sb: superblock | 599 | * @sb: superblock |
@@ -561,9 +621,7 @@ struct inode *new_inode(struct super_block *sb) | |||
561 | inode = alloc_inode(sb); | 621 | inode = alloc_inode(sb); |
562 | if (inode) { | 622 | if (inode) { |
563 | spin_lock(&inode_lock); | 623 | spin_lock(&inode_lock); |
564 | inodes_stat.nr_inodes++; | 624 | __inode_add_to_lists(sb, NULL, inode); |
565 | list_add(&inode->i_list, &inode_in_use); | ||
566 | list_add(&inode->i_sb_list, &sb->s_inodes); | ||
567 | inode->i_ino = ++last_ino; | 625 | inode->i_ino = ++last_ino; |
568 | inode->i_state = 0; | 626 | inode->i_state = 0; |
569 | spin_unlock(&inode_lock); | 627 | spin_unlock(&inode_lock); |
@@ -622,10 +680,7 @@ static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *h | |||
622 | if (set(inode, data)) | 680 | if (set(inode, data)) |
623 | goto set_failed; | 681 | goto set_failed; |
624 | 682 | ||
625 | inodes_stat.nr_inodes++; | 683 | __inode_add_to_lists(sb, head, inode); |
626 | list_add(&inode->i_list, &inode_in_use); | ||
627 | list_add(&inode->i_sb_list, &sb->s_inodes); | ||
628 | hlist_add_head(&inode->i_hash, head); | ||
629 | inode->i_state = I_LOCK|I_NEW; | 684 | inode->i_state = I_LOCK|I_NEW; |
630 | spin_unlock(&inode_lock); | 685 | spin_unlock(&inode_lock); |
631 | 686 | ||
@@ -671,10 +726,7 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he | |||
671 | old = find_inode_fast(sb, head, ino); | 726 | old = find_inode_fast(sb, head, ino); |
672 | if (!old) { | 727 | if (!old) { |
673 | inode->i_ino = ino; | 728 | inode->i_ino = ino; |
674 | inodes_stat.nr_inodes++; | 729 | __inode_add_to_lists(sb, head, inode); |
675 | list_add(&inode->i_list, &inode_in_use); | ||
676 | list_add(&inode->i_sb_list, &sb->s_inodes); | ||
677 | hlist_add_head(&inode->i_hash, head); | ||
678 | inode->i_state = I_LOCK|I_NEW; | 730 | inode->i_state = I_LOCK|I_NEW; |
679 | spin_unlock(&inode_lock); | 731 | spin_unlock(&inode_lock); |
680 | 732 | ||
@@ -698,16 +750,6 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he | |||
698 | return inode; | 750 | return inode; |
699 | } | 751 | } |
700 | 752 | ||
701 | static unsigned long hash(struct super_block *sb, unsigned long hashval) | ||
702 | { | ||
703 | unsigned long tmp; | ||
704 | |||
705 | tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / | ||
706 | L1_CACHE_BYTES; | ||
707 | tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS); | ||
708 | return tmp & I_HASHMASK; | ||
709 | } | ||
710 | |||
711 | /** | 753 | /** |
712 | * iunique - get a unique inode number | 754 | * iunique - get a unique inode number |
713 | * @sb: superblock | 755 | * @sb: superblock |