aboutsummaryrefslogtreecommitdiffstats
path: root/fs/inode.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-12 05:32:03 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-12 05:32:03 -0500
commite3ee1e123183ca9847e74b7b8e2694c9e3b817a6 (patch)
tree652a84674ed05eaa46a813de2223af0bd0168a5a /fs/inode.c
parent5762ba1873b0bb9faa631aaa02f533c2b9837f82 (diff)
parentc59765042f53a79a7a65585042ff463b69cb248c (diff)
Merge commit 'v2.6.29-rc1' into timers/hrtimers
Conflicts: kernel/time/tick-common.c
Diffstat (limited to 'fs/inode.c')
-rw-r--r--fs/inode.c273
1 files changed, 189 insertions, 84 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 0487ddba1397..913ab2d9a5d1 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -22,6 +22,7 @@
22#include <linux/bootmem.h> 22#include <linux/bootmem.h>
23#include <linux/inotify.h> 23#include <linux/inotify.h>
24#include <linux/mount.h> 24#include <linux/mount.h>
25#include <linux/async.h>
25 26
26/* 27/*
27 * This is needed for the following functions: 28 * This is needed for the following functions:
@@ -108,84 +109,102 @@ static void wake_up_inode(struct inode *inode)
108 wake_up_bit(&inode->i_state, __I_LOCK); 109 wake_up_bit(&inode->i_state, __I_LOCK);
109} 110}
110 111
111static struct inode *alloc_inode(struct super_block *sb) 112/**
113 * inode_init_always - perform inode structure intialisation
114 * @sb: superblock inode belongs to
115 * @inode: inode to initialise
116 *
117 * These are initializations that need to be done on every inode
118 * allocation as the fields are not initialised by slab allocation.
119 */
120struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
112{ 121{
113 static const struct address_space_operations empty_aops; 122 static const struct address_space_operations empty_aops;
114 static struct inode_operations empty_iops; 123 static struct inode_operations empty_iops;
115 static const struct file_operations empty_fops; 124 static const struct file_operations empty_fops;
116 struct inode *inode;
117 125
118 if (sb->s_op->alloc_inode) 126 struct address_space * const mapping = &inode->i_data;
119 inode = sb->s_op->alloc_inode(sb); 127
120 else 128 inode->i_sb = sb;
121 inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL); 129 inode->i_blkbits = sb->s_blocksize_bits;
122 130 inode->i_flags = 0;
123 if (inode) { 131 atomic_set(&inode->i_count, 1);
124 struct address_space * const mapping = &inode->i_data; 132 inode->i_op = &empty_iops;
125 133 inode->i_fop = &empty_fops;
126 inode->i_sb = sb; 134 inode->i_nlink = 1;
127 inode->i_blkbits = sb->s_blocksize_bits; 135 inode->i_uid = 0;
128 inode->i_flags = 0; 136 inode->i_gid = 0;
129 atomic_set(&inode->i_count, 1); 137 atomic_set(&inode->i_writecount, 0);
130 inode->i_op = &empty_iops; 138 inode->i_size = 0;
131 inode->i_fop = &empty_fops; 139 inode->i_blocks = 0;
132 inode->i_nlink = 1; 140 inode->i_bytes = 0;
133 atomic_set(&inode->i_writecount, 0); 141 inode->i_generation = 0;
134 inode->i_size = 0;
135 inode->i_blocks = 0;
136 inode->i_bytes = 0;
137 inode->i_generation = 0;
138#ifdef CONFIG_QUOTA 142#ifdef CONFIG_QUOTA
139 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); 143 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
140#endif 144#endif
141 inode->i_pipe = NULL; 145 inode->i_pipe = NULL;
142 inode->i_bdev = NULL; 146 inode->i_bdev = NULL;
143 inode->i_cdev = NULL; 147 inode->i_cdev = NULL;
144 inode->i_rdev = 0; 148 inode->i_rdev = 0;
145 inode->dirtied_when = 0; 149 inode->dirtied_when = 0;
146 if (security_inode_alloc(inode)) { 150 if (security_inode_alloc(inode)) {
147 if (inode->i_sb->s_op->destroy_inode) 151 if (inode->i_sb->s_op->destroy_inode)
148 inode->i_sb->s_op->destroy_inode(inode); 152 inode->i_sb->s_op->destroy_inode(inode);
149 else 153 else
150 kmem_cache_free(inode_cachep, (inode)); 154 kmem_cache_free(inode_cachep, (inode));
151 return NULL; 155 return NULL;
152 } 156 }
153 157
154 spin_lock_init(&inode->i_lock); 158 spin_lock_init(&inode->i_lock);
155 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 159 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
156 160
157 mutex_init(&inode->i_mutex); 161 mutex_init(&inode->i_mutex);
158 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); 162 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
159 163
160 init_rwsem(&inode->i_alloc_sem); 164 init_rwsem(&inode->i_alloc_sem);
161 lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key); 165 lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key);
162 166
163 mapping->a_ops = &empty_aops; 167 mapping->a_ops = &empty_aops;
164 mapping->host = inode; 168 mapping->host = inode;
165 mapping->flags = 0; 169 mapping->flags = 0;
166 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE); 170 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
167 mapping->assoc_mapping = NULL; 171 mapping->assoc_mapping = NULL;
168 mapping->backing_dev_info = &default_backing_dev_info; 172 mapping->backing_dev_info = &default_backing_dev_info;
169 mapping->writeback_index = 0; 173 mapping->writeback_index = 0;
170 174
171 /* 175 /*
172 * If the block_device provides a backing_dev_info for client 176 * If the block_device provides a backing_dev_info for client
173 * inodes then use that. Otherwise the inode share the bdev's 177 * inodes then use that. Otherwise the inode share the bdev's
174 * backing_dev_info. 178 * backing_dev_info.
175 */ 179 */
176 if (sb->s_bdev) { 180 if (sb->s_bdev) {
177 struct backing_dev_info *bdi; 181 struct backing_dev_info *bdi;
178 182
179 bdi = sb->s_bdev->bd_inode_backing_dev_info; 183 bdi = sb->s_bdev->bd_inode_backing_dev_info;
180 if (!bdi) 184 if (!bdi)
181 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; 185 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
182 mapping->backing_dev_info = bdi; 186 mapping->backing_dev_info = bdi;
183 }
184 inode->i_private = NULL;
185 inode->i_mapping = mapping;
186 } 187 }
188 inode->i_private = NULL;
189 inode->i_mapping = mapping;
190
187 return inode; 191 return inode;
188} 192}
193EXPORT_SYMBOL(inode_init_always);
194
195static struct inode *alloc_inode(struct super_block *sb)
196{
197 struct inode *inode;
198
199 if (sb->s_op->alloc_inode)
200 inode = sb->s_op->alloc_inode(sb);
201 else
202 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
203
204 if (inode)
205 return inode_init_always(sb, inode);
206 return NULL;
207}
189 208
190void destroy_inode(struct inode *inode) 209void destroy_inode(struct inode *inode)
191{ 210{
@@ -196,6 +215,7 @@ void destroy_inode(struct inode *inode)
196 else 215 else
197 kmem_cache_free(inode_cachep, (inode)); 216 kmem_cache_free(inode_cachep, (inode));
198} 217}
218EXPORT_SYMBOL(destroy_inode);
199 219
200 220
201/* 221/*
@@ -534,12 +554,55 @@ repeat:
534 return node ? inode : NULL; 554 return node ? inode : NULL;
535} 555}
536 556
557static unsigned long hash(struct super_block *sb, unsigned long hashval)
558{
559 unsigned long tmp;
560
561 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
562 L1_CACHE_BYTES;
563 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
564 return tmp & I_HASHMASK;
565}
566
567static inline void
568__inode_add_to_lists(struct super_block *sb, struct hlist_head *head,
569 struct inode *inode)
570{
571 inodes_stat.nr_inodes++;
572 list_add(&inode->i_list, &inode_in_use);
573 list_add(&inode->i_sb_list, &sb->s_inodes);
574 if (head)
575 hlist_add_head(&inode->i_hash, head);
576}
577
578/**
579 * inode_add_to_lists - add a new inode to relevant lists
580 * @sb: superblock inode belongs to
581 * @inode: inode to mark in use
582 *
583 * When an inode is allocated it needs to be accounted for, added to the in use
584 * list, the owning superblock and the inode hash. This needs to be done under
585 * the inode_lock, so export a function to do this rather than the inode lock
586 * itself. We calculate the hash list to add to here so it is all internal
587 * which requires the caller to have already set up the inode number in the
588 * inode to add.
589 */
590void inode_add_to_lists(struct super_block *sb, struct inode *inode)
591{
592 struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino);
593
594 spin_lock(&inode_lock);
595 __inode_add_to_lists(sb, head, inode);
596 spin_unlock(&inode_lock);
597}
598EXPORT_SYMBOL_GPL(inode_add_to_lists);
599
537/** 600/**
538 * new_inode - obtain an inode 601 * new_inode - obtain an inode
539 * @sb: superblock 602 * @sb: superblock
540 * 603 *
541 * Allocates a new inode for given superblock. The default gfp_mask 604 * Allocates a new inode for given superblock. The default gfp_mask
542 * for allocations related to inode->i_mapping is GFP_HIGHUSER_PAGECACHE. 605 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
543 * If HIGHMEM pages are unsuitable or it is known that pages allocated 606 * If HIGHMEM pages are unsuitable or it is known that pages allocated
544 * for the page cache are not reclaimable or migratable, 607 * for the page cache are not reclaimable or migratable,
545 * mapping_set_gfp_mask() must be called with suitable flags on the 608 * mapping_set_gfp_mask() must be called with suitable flags on the
@@ -561,9 +624,7 @@ struct inode *new_inode(struct super_block *sb)
561 inode = alloc_inode(sb); 624 inode = alloc_inode(sb);
562 if (inode) { 625 if (inode) {
563 spin_lock(&inode_lock); 626 spin_lock(&inode_lock);
564 inodes_stat.nr_inodes++; 627 __inode_add_to_lists(sb, NULL, inode);
565 list_add(&inode->i_list, &inode_in_use);
566 list_add(&inode->i_sb_list, &sb->s_inodes);
567 inode->i_ino = ++last_ino; 628 inode->i_ino = ++last_ino;
568 inode->i_state = 0; 629 inode->i_state = 0;
569 spin_unlock(&inode_lock); 630 spin_unlock(&inode_lock);
@@ -622,10 +683,7 @@ static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *h
622 if (set(inode, data)) 683 if (set(inode, data))
623 goto set_failed; 684 goto set_failed;
624 685
625 inodes_stat.nr_inodes++; 686 __inode_add_to_lists(sb, head, inode);
626 list_add(&inode->i_list, &inode_in_use);
627 list_add(&inode->i_sb_list, &sb->s_inodes);
628 hlist_add_head(&inode->i_hash, head);
629 inode->i_state = I_LOCK|I_NEW; 687 inode->i_state = I_LOCK|I_NEW;
630 spin_unlock(&inode_lock); 688 spin_unlock(&inode_lock);
631 689
@@ -671,10 +729,7 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he
671 old = find_inode_fast(sb, head, ino); 729 old = find_inode_fast(sb, head, ino);
672 if (!old) { 730 if (!old) {
673 inode->i_ino = ino; 731 inode->i_ino = ino;
674 inodes_stat.nr_inodes++; 732 __inode_add_to_lists(sb, head, inode);
675 list_add(&inode->i_list, &inode_in_use);
676 list_add(&inode->i_sb_list, &sb->s_inodes);
677 hlist_add_head(&inode->i_hash, head);
678 inode->i_state = I_LOCK|I_NEW; 733 inode->i_state = I_LOCK|I_NEW;
679 spin_unlock(&inode_lock); 734 spin_unlock(&inode_lock);
680 735
@@ -698,16 +753,6 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he
698 return inode; 753 return inode;
699} 754}
700 755
701static unsigned long hash(struct super_block *sb, unsigned long hashval)
702{
703 unsigned long tmp;
704
705 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
706 L1_CACHE_BYTES;
707 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
708 return tmp & I_HASHMASK;
709}
710
711/** 756/**
712 * iunique - get a unique inode number 757 * iunique - get a unique inode number
713 * @sb: superblock 758 * @sb: superblock
@@ -990,6 +1035,65 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino)
990 1035
991EXPORT_SYMBOL(iget_locked); 1036EXPORT_SYMBOL(iget_locked);
992 1037
1038int insert_inode_locked(struct inode *inode)
1039{
1040 struct super_block *sb = inode->i_sb;
1041 ino_t ino = inode->i_ino;
1042 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1043 struct inode *old;
1044
1045 inode->i_state |= I_LOCK|I_NEW;
1046 while (1) {
1047 spin_lock(&inode_lock);
1048 old = find_inode_fast(sb, head, ino);
1049 if (likely(!old)) {
1050 hlist_add_head(&inode->i_hash, head);
1051 spin_unlock(&inode_lock);
1052 return 0;
1053 }
1054 __iget(old);
1055 spin_unlock(&inode_lock);
1056 wait_on_inode(old);
1057 if (unlikely(!hlist_unhashed(&old->i_hash))) {
1058 iput(old);
1059 return -EBUSY;
1060 }
1061 iput(old);
1062 }
1063}
1064
1065EXPORT_SYMBOL(insert_inode_locked);
1066
1067int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1068 int (*test)(struct inode *, void *), void *data)
1069{
1070 struct super_block *sb = inode->i_sb;
1071 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1072 struct inode *old;
1073
1074 inode->i_state |= I_LOCK|I_NEW;
1075
1076 while (1) {
1077 spin_lock(&inode_lock);
1078 old = find_inode(sb, head, test, data);
1079 if (likely(!old)) {
1080 hlist_add_head(&inode->i_hash, head);
1081 spin_unlock(&inode_lock);
1082 return 0;
1083 }
1084 __iget(old);
1085 spin_unlock(&inode_lock);
1086 wait_on_inode(old);
1087 if (unlikely(!hlist_unhashed(&old->i_hash))) {
1088 iput(old);
1089 return -EBUSY;
1090 }
1091 iput(old);
1092 }
1093}
1094
1095EXPORT_SYMBOL(insert_inode_locked4);
1096
993/** 1097/**
994 * __insert_inode_hash - hash an inode 1098 * __insert_inode_hash - hash an inode
995 * @inode: unhashed inode 1099 * @inode: unhashed inode
@@ -1292,6 +1396,7 @@ int inode_wait(void *word)
1292 schedule(); 1396 schedule();
1293 return 0; 1397 return 0;
1294} 1398}
1399EXPORT_SYMBOL(inode_wait);
1295 1400
1296/* 1401/*
1297 * If we try to find an inode in the inode hash while it is being 1402 * If we try to find an inode in the inode hash while it is being