aboutsummaryrefslogtreecommitdiffstats
path: root/fs/inode.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2011-08-27 09:43:54 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2011-08-27 10:06:11 -0400
commit7b1bb388bc879ffcc6c69b567816d5c354afe42b (patch)
tree5a217fdfb0b5e5a327bdcd624506337c1ae1fe32 /fs/inode.c
parent7d754596756240fa918b94cd0c3011c77a638987 (diff)
parent02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff)
Merge 'Linux v3.0' into Litmus
Some notes: * Litmus^RT scheduling class is the topmost scheduling class (above stop_sched_class). * scheduler_ipi() function (e.g., in smp_reschedule_interrupt()) may increase IPI latencies. * Added path into schedule() to quickly re-evaluate scheduling decision without becoming preemptive again. This used to be a standard path before the removal of BKL. Conflicts: Makefile arch/arm/kernel/calls.S arch/arm/kernel/smp.c arch/x86/include/asm/unistd_32.h arch/x86/kernel/smp.c arch/x86/kernel/syscall_table_32.S include/linux/hrtimer.h kernel/printk.c kernel/sched.c kernel/sched_fair.c
Diffstat (limited to 'fs/inode.c')
-rw-r--r--fs/inode.c1172
1 files changed, 653 insertions, 519 deletions
diff --git a/fs/inode.c b/fs/inode.c
index d4fe9c031864..dbf0e760c0a9 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1,9 +1,7 @@
1/* 1/*
2 * linux/fs/inode.c
3 *
4 * (C) 1997 Linus Torvalds 2 * (C) 1997 Linus Torvalds
3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
5 */ 4 */
6
7#include <linux/fs.h> 5#include <linux/fs.h>
8#include <linux/mm.h> 6#include <linux/mm.h>
9#include <linux/dcache.h> 7#include <linux/dcache.h>
@@ -24,95 +22,114 @@
24#include <linux/mount.h> 22#include <linux/mount.h>
25#include <linux/async.h> 23#include <linux/async.h>
26#include <linux/posix_acl.h> 24#include <linux/posix_acl.h>
25#include <linux/prefetch.h>
26#include <linux/ima.h>
27#include <linux/cred.h>
28#include <linux/buffer_head.h> /* for inode_has_buffers */
29#include "internal.h"
27 30
28/* 31/*
29 * This is needed for the following functions: 32 * Inode locking rules:
30 * - inode_has_buffers
31 * - invalidate_inode_buffers
32 * - invalidate_bdev
33 * 33 *
34 * FIXME: remove all knowledge of the buffer layer from this file 34 * inode->i_lock protects:
35 */ 35 * inode->i_state, inode->i_hash, __iget()
36#include <linux/buffer_head.h> 36 * inode_lru_lock protects:
37 37 * inode_lru, inode->i_lru
38/* 38 * inode_sb_list_lock protects:
39 * New inode.c implementation. 39 * sb->s_inodes, inode->i_sb_list
40 * inode_wb_list_lock protects:
41 * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
42 * inode_hash_lock protects:
43 * inode_hashtable, inode->i_hash
40 * 44 *
41 * This implementation has the basic premise of trying 45 * Lock ordering:
42 * to be extremely low-overhead and SMP-safe, yet be
43 * simple enough to be "obviously correct".
44 * 46 *
45 * Famous last words. 47 * inode_sb_list_lock
46 */ 48 * inode->i_lock
47 49 * inode_lru_lock
48/* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */ 50 *
49 51 * inode_wb_list_lock
50/* #define INODE_PARANOIA 1 */ 52 * inode->i_lock
51/* #define INODE_DEBUG 1 */ 53 *
52 54 * inode_hash_lock
53/* 55 * inode_sb_list_lock
54 * Inode lookup is no longer as critical as it used to be: 56 * inode->i_lock
55 * most of the lookups are going to be through the dcache. 57 *
58 * iunique_lock
59 * inode_hash_lock
56 */ 60 */
57#define I_HASHBITS i_hash_shift
58#define I_HASHMASK i_hash_mask
59 61
60static unsigned int i_hash_mask __read_mostly; 62static unsigned int i_hash_mask __read_mostly;
61static unsigned int i_hash_shift __read_mostly; 63static unsigned int i_hash_shift __read_mostly;
64static struct hlist_head *inode_hashtable __read_mostly;
65static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
62 66
63/* 67static LIST_HEAD(inode_lru);
64 * Each inode can be on two separate lists. One is 68static DEFINE_SPINLOCK(inode_lru_lock);
65 * the hash list of the inode, used for lookups. The
66 * other linked list is the "type" list:
67 * "in_use" - valid inode, i_count > 0, i_nlink > 0
68 * "dirty" - as "in_use" but also dirty
69 * "unused" - valid inode, i_count = 0
70 *
71 * A "dirty" list is maintained for each super block,
72 * allowing for low-overhead inode sync() operations.
73 */
74 69
75LIST_HEAD(inode_in_use); 70__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
76LIST_HEAD(inode_unused); 71__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
77static struct hlist_head *inode_hashtable __read_mostly;
78 72
79/* 73/*
80 * A simple spinlock to protect the list manipulations. 74 * iprune_sem provides exclusion between the icache shrinking and the
75 * umount path.
81 * 76 *
82 * NOTE! You also have to own the lock if you change 77 * We don't actually need it to protect anything in the umount path,
83 * the i_state of an inode while it is in use.. 78 * but only need to cycle through it to make sure any inode that
79 * prune_icache took off the LRU list has been fully torn down by the
80 * time we are past evict_inodes.
84 */ 81 */
85DEFINE_SPINLOCK(inode_lock); 82static DECLARE_RWSEM(iprune_sem);
86 83
87/* 84/*
88 * iprune_sem provides exclusion between the kswapd or try_to_free_pages 85 * Empty aops. Can be used for the cases where the user does not
89 * icache shrinking path, and the umount path. Without this exclusion, 86 * define any of the address_space operations.
90 * by the time prune_icache calls iput for the inode whose pages it has
91 * been invalidating, or by the time it calls clear_inode & destroy_inode
92 * from its final dispose_list, the struct super_block they refer to
93 * (for inode->i_sb->s_op) may already have been freed and reused.
94 *
95 * We make this an rwsem because the fastpath is icache shrinking. In
96 * some cases a filesystem may be doing a significant amount of work in
97 * its inode reclaim code, so this should improve parallelism.
98 */ 87 */
99static DECLARE_RWSEM(iprune_sem); 88const struct address_space_operations empty_aops = {
89};
90EXPORT_SYMBOL(empty_aops);
100 91
101/* 92/*
102 * Statistics gathering.. 93 * Statistics gathering..
103 */ 94 */
104struct inodes_stat_t inodes_stat; 95struct inodes_stat_t inodes_stat;
105 96
97static DEFINE_PER_CPU(unsigned int, nr_inodes);
98
106static struct kmem_cache *inode_cachep __read_mostly; 99static struct kmem_cache *inode_cachep __read_mostly;
107 100
108static void wake_up_inode(struct inode *inode) 101static int get_nr_inodes(void)
109{ 102{
110 /* 103 int i;
111 * Prevent speculative execution through spin_unlock(&inode_lock); 104 int sum = 0;
112 */ 105 for_each_possible_cpu(i)
113 smp_mb(); 106 sum += per_cpu(nr_inodes, i);
114 wake_up_bit(&inode->i_state, __I_NEW); 107 return sum < 0 ? 0 : sum;
108}
109
110static inline int get_nr_inodes_unused(void)
111{
112 return inodes_stat.nr_unused;
113}
114
115int get_nr_dirty_inodes(void)
116{
117 /* not actually dirty inodes, but a wild approximation */
118 int nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
119 return nr_dirty > 0 ? nr_dirty : 0;
120}
121
122/*
123 * Handle nr_inode sysctl
124 */
125#ifdef CONFIG_SYSCTL
126int proc_nr_inodes(ctl_table *table, int write,
127 void __user *buffer, size_t *lenp, loff_t *ppos)
128{
129 inodes_stat.nr_inodes = get_nr_inodes();
130 return proc_dointvec(table, write, buffer, lenp, ppos);
115} 131}
132#endif
116 133
117/** 134/**
118 * inode_init_always - perform inode structure intialisation 135 * inode_init_always - perform inode structure intialisation
@@ -124,7 +141,6 @@ static void wake_up_inode(struct inode *inode)
124 */ 141 */
125int inode_init_always(struct super_block *sb, struct inode *inode) 142int inode_init_always(struct super_block *sb, struct inode *inode)
126{ 143{
127 static const struct address_space_operations empty_aops;
128 static const struct inode_operations empty_iops; 144 static const struct inode_operations empty_iops;
129 static const struct file_operations empty_fops; 145 static const struct file_operations empty_fops;
130 struct address_space *const mapping = &inode->i_data; 146 struct address_space *const mapping = &inode->i_data;
@@ -192,6 +208,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
192 inode->i_fsnotify_mask = 0; 208 inode->i_fsnotify_mask = 0;
193#endif 209#endif
194 210
211 this_cpu_inc(nr_inodes);
212
195 return 0; 213 return 0;
196out: 214out:
197 return -ENOMEM; 215 return -ENOMEM;
@@ -221,6 +239,12 @@ static struct inode *alloc_inode(struct super_block *sb)
221 return inode; 239 return inode;
222} 240}
223 241
242void free_inode_nonrcu(struct inode *inode)
243{
244 kmem_cache_free(inode_cachep, inode);
245}
246EXPORT_SYMBOL(free_inode_nonrcu);
247
224void __destroy_inode(struct inode *inode) 248void __destroy_inode(struct inode *inode)
225{ 249{
226 BUG_ON(inode_has_buffers(inode)); 250 BUG_ON(inode_has_buffers(inode));
@@ -232,18 +256,40 @@ void __destroy_inode(struct inode *inode)
232 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) 256 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
233 posix_acl_release(inode->i_default_acl); 257 posix_acl_release(inode->i_default_acl);
234#endif 258#endif
259 this_cpu_dec(nr_inodes);
235} 260}
236EXPORT_SYMBOL(__destroy_inode); 261EXPORT_SYMBOL(__destroy_inode);
237 262
238void destroy_inode(struct inode *inode) 263static void i_callback(struct rcu_head *head)
239{ 264{
265 struct inode *inode = container_of(head, struct inode, i_rcu);
266 INIT_LIST_HEAD(&inode->i_dentry);
267 kmem_cache_free(inode_cachep, inode);
268}
269
270static void destroy_inode(struct inode *inode)
271{
272 BUG_ON(!list_empty(&inode->i_lru));
240 __destroy_inode(inode); 273 __destroy_inode(inode);
241 if (inode->i_sb->s_op->destroy_inode) 274 if (inode->i_sb->s_op->destroy_inode)
242 inode->i_sb->s_op->destroy_inode(inode); 275 inode->i_sb->s_op->destroy_inode(inode);
243 else 276 else
244 kmem_cache_free(inode_cachep, (inode)); 277 call_rcu(&inode->i_rcu, i_callback);
245} 278}
246 279
280void address_space_init_once(struct address_space *mapping)
281{
282 memset(mapping, 0, sizeof(*mapping));
283 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
284 spin_lock_init(&mapping->tree_lock);
285 mutex_init(&mapping->i_mmap_mutex);
286 INIT_LIST_HEAD(&mapping->private_list);
287 spin_lock_init(&mapping->private_lock);
288 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
289 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
290}
291EXPORT_SYMBOL(address_space_init_once);
292
247/* 293/*
248 * These are initializations that only need to be done 294 * These are initializations that only need to be done
249 * once, because the fields are idempotent across use 295 * once, because the fields are idempotent across use
@@ -255,13 +301,9 @@ void inode_init_once(struct inode *inode)
255 INIT_HLIST_NODE(&inode->i_hash); 301 INIT_HLIST_NODE(&inode->i_hash);
256 INIT_LIST_HEAD(&inode->i_dentry); 302 INIT_LIST_HEAD(&inode->i_dentry);
257 INIT_LIST_HEAD(&inode->i_devices); 303 INIT_LIST_HEAD(&inode->i_devices);
258 INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); 304 INIT_LIST_HEAD(&inode->i_wb_list);
259 spin_lock_init(&inode->i_data.tree_lock); 305 INIT_LIST_HEAD(&inode->i_lru);
260 spin_lock_init(&inode->i_data.i_mmap_lock); 306 address_space_init_once(&inode->i_data);
261 INIT_LIST_HEAD(&inode->i_data.private_list);
262 spin_lock_init(&inode->i_data.private_lock);
263 INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
264 INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
265 i_size_ordered_init(inode); 307 i_size_ordered_init(inode);
266#ifdef CONFIG_FSNOTIFY 308#ifdef CONFIG_FSNOTIFY
267 INIT_HLIST_HEAD(&inode->i_fsnotify_marks); 309 INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
@@ -279,34 +321,150 @@ static void init_once(void *foo)
279} 321}
280 322
281/* 323/*
282 * inode_lock must be held 324 * inode->i_lock must be held
283 */ 325 */
284void __iget(struct inode *inode) 326void __iget(struct inode *inode)
285{ 327{
286 if (atomic_inc_return(&inode->i_count) != 1) 328 atomic_inc(&inode->i_count);
287 return; 329}
330
331/*
332 * get additional reference to inode; caller must already hold one.
333 */
334void ihold(struct inode *inode)
335{
336 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
337}
338EXPORT_SYMBOL(ihold);
339
340static void inode_lru_list_add(struct inode *inode)
341{
342 spin_lock(&inode_lru_lock);
343 if (list_empty(&inode->i_lru)) {
344 list_add(&inode->i_lru, &inode_lru);
345 inodes_stat.nr_unused++;
346 }
347 spin_unlock(&inode_lru_lock);
348}
349
350static void inode_lru_list_del(struct inode *inode)
351{
352 spin_lock(&inode_lru_lock);
353 if (!list_empty(&inode->i_lru)) {
354 list_del_init(&inode->i_lru);
355 inodes_stat.nr_unused--;
356 }
357 spin_unlock(&inode_lru_lock);
358}
359
360/**
361 * inode_sb_list_add - add inode to the superblock list of inodes
362 * @inode: inode to add
363 */
364void inode_sb_list_add(struct inode *inode)
365{
366 spin_lock(&inode_sb_list_lock);
367 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
368 spin_unlock(&inode_sb_list_lock);
369}
370EXPORT_SYMBOL_GPL(inode_sb_list_add);
371
372static inline void inode_sb_list_del(struct inode *inode)
373{
374 spin_lock(&inode_sb_list_lock);
375 list_del_init(&inode->i_sb_list);
376 spin_unlock(&inode_sb_list_lock);
377}
378
379static unsigned long hash(struct super_block *sb, unsigned long hashval)
380{
381 unsigned long tmp;
382
383 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
384 L1_CACHE_BYTES;
385 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
386 return tmp & i_hash_mask;
387}
388
389/**
390 * __insert_inode_hash - hash an inode
391 * @inode: unhashed inode
392 * @hashval: unsigned long value used to locate this object in the
393 * inode_hashtable.
394 *
395 * Add an inode to the inode hash for this superblock.
396 */
397void __insert_inode_hash(struct inode *inode, unsigned long hashval)
398{
399 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
288 400
289 if (!(inode->i_state & (I_DIRTY|I_SYNC))) 401 spin_lock(&inode_hash_lock);
290 list_move(&inode->i_list, &inode_in_use); 402 spin_lock(&inode->i_lock);
291 inodes_stat.nr_unused--; 403 hlist_add_head(&inode->i_hash, b);
404 spin_unlock(&inode->i_lock);
405 spin_unlock(&inode_hash_lock);
292} 406}
407EXPORT_SYMBOL(__insert_inode_hash);
408
409/**
410 * remove_inode_hash - remove an inode from the hash
411 * @inode: inode to unhash
412 *
413 * Remove an inode from the superblock.
414 */
415void remove_inode_hash(struct inode *inode)
416{
417 spin_lock(&inode_hash_lock);
418 spin_lock(&inode->i_lock);
419 hlist_del_init(&inode->i_hash);
420 spin_unlock(&inode->i_lock);
421 spin_unlock(&inode_hash_lock);
422}
423EXPORT_SYMBOL(remove_inode_hash);
293 424
294void end_writeback(struct inode *inode) 425void end_writeback(struct inode *inode)
295{ 426{
296 might_sleep(); 427 might_sleep();
428 /*
429 * We have to cycle tree_lock here because reclaim can be still in the
430 * process of removing the last page (in __delete_from_page_cache())
431 * and we must not free mapping under it.
432 */
433 spin_lock_irq(&inode->i_data.tree_lock);
297 BUG_ON(inode->i_data.nrpages); 434 BUG_ON(inode->i_data.nrpages);
435 spin_unlock_irq(&inode->i_data.tree_lock);
298 BUG_ON(!list_empty(&inode->i_data.private_list)); 436 BUG_ON(!list_empty(&inode->i_data.private_list));
299 BUG_ON(!(inode->i_state & I_FREEING)); 437 BUG_ON(!(inode->i_state & I_FREEING));
300 BUG_ON(inode->i_state & I_CLEAR); 438 BUG_ON(inode->i_state & I_CLEAR);
301 inode_sync_wait(inode); 439 inode_sync_wait(inode);
440 /* don't need i_lock here, no concurrent mods to i_state */
302 inode->i_state = I_FREEING | I_CLEAR; 441 inode->i_state = I_FREEING | I_CLEAR;
303} 442}
304EXPORT_SYMBOL(end_writeback); 443EXPORT_SYMBOL(end_writeback);
305 444
445/*
446 * Free the inode passed in, removing it from the lists it is still connected
447 * to. We remove any pages still attached to the inode and wait for any IO that
448 * is still in progress before finally destroying the inode.
449 *
450 * An inode must already be marked I_FREEING so that we avoid the inode being
451 * moved back onto lists if we race with other code that manipulates the lists
452 * (e.g. writeback_single_inode). The caller is responsible for setting this.
453 *
454 * An inode must already be removed from the LRU list before being evicted from
455 * the cache. This should occur atomically with setting the I_FREEING state
456 * flag, so no inodes here should ever be on the LRU when being evicted.
457 */
306static void evict(struct inode *inode) 458static void evict(struct inode *inode)
307{ 459{
308 const struct super_operations *op = inode->i_sb->s_op; 460 const struct super_operations *op = inode->i_sb->s_op;
309 461
462 BUG_ON(!(inode->i_state & I_FREEING));
463 BUG_ON(!list_empty(&inode->i_lru));
464
465 inode_wb_list_del(inode);
466 inode_sb_list_del(inode);
467
310 if (op->evict_inode) { 468 if (op->evict_inode) {
311 op->evict_inode(inode); 469 op->evict_inode(inode);
312 } else { 470 } else {
@@ -318,6 +476,15 @@ static void evict(struct inode *inode)
318 bd_forget(inode); 476 bd_forget(inode);
319 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 477 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
320 cd_forget(inode); 478 cd_forget(inode);
479
480 remove_inode_hash(inode);
481
482 spin_lock(&inode->i_lock);
483 wake_up_bit(&inode->i_state, __I_NEW);
484 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
485 spin_unlock(&inode->i_lock);
486
487 destroy_inode(inode);
321} 488}
322 489
323/* 490/*
@@ -329,101 +496,108 @@ static void evict(struct inode *inode)
329 */ 496 */
330static void dispose_list(struct list_head *head) 497static void dispose_list(struct list_head *head)
331{ 498{
332 int nr_disposed = 0;
333
334 while (!list_empty(head)) { 499 while (!list_empty(head)) {
335 struct inode *inode; 500 struct inode *inode;
336 501
337 inode = list_first_entry(head, struct inode, i_list); 502 inode = list_first_entry(head, struct inode, i_lru);
338 list_del(&inode->i_list); 503 list_del_init(&inode->i_lru);
339 504
340 evict(inode); 505 evict(inode);
341
342 spin_lock(&inode_lock);
343 hlist_del_init(&inode->i_hash);
344 list_del_init(&inode->i_sb_list);
345 spin_unlock(&inode_lock);
346
347 wake_up_inode(inode);
348 destroy_inode(inode);
349 nr_disposed++;
350 } 506 }
351 spin_lock(&inode_lock);
352 inodes_stat.nr_inodes -= nr_disposed;
353 spin_unlock(&inode_lock);
354} 507}
355 508
356/* 509/**
357 * Invalidate all inodes for a device. 510 * evict_inodes - evict all evictable inodes for a superblock
511 * @sb: superblock to operate on
512 *
513 * Make sure that no inodes with zero refcount are retained. This is
514 * called by superblock shutdown after having MS_ACTIVE flag removed,
515 * so any inode reaching zero refcount during or after that call will
516 * be immediately evicted.
358 */ 517 */
359static int invalidate_list(struct list_head *head, struct list_head *dispose) 518void evict_inodes(struct super_block *sb)
360{ 519{
361 struct list_head *next; 520 struct inode *inode, *next;
362 int busy = 0, count = 0; 521 LIST_HEAD(dispose);
363 522
364 next = head->next; 523 spin_lock(&inode_sb_list_lock);
365 for (;;) { 524 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
366 struct list_head *tmp = next; 525 if (atomic_read(&inode->i_count))
367 struct inode *inode;
368
369 /*
370 * We can reschedule here without worrying about the list's
371 * consistency because the per-sb list of inodes must not
372 * change during umount anymore, and because iprune_sem keeps
373 * shrink_icache_memory() away.
374 */
375 cond_resched_lock(&inode_lock);
376
377 next = next->next;
378 if (tmp == head)
379 break;
380 inode = list_entry(tmp, struct inode, i_sb_list);
381 if (inode->i_state & I_NEW)
382 continue; 526 continue;
383 invalidate_inode_buffers(inode); 527
384 if (!atomic_read(&inode->i_count)) { 528 spin_lock(&inode->i_lock);
385 list_move(&inode->i_list, dispose); 529 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
386 WARN_ON(inode->i_state & I_NEW); 530 spin_unlock(&inode->i_lock);
387 inode->i_state |= I_FREEING;
388 count++;
389 continue; 531 continue;
390 } 532 }
391 busy = 1; 533
534 inode->i_state |= I_FREEING;
535 inode_lru_list_del(inode);
536 spin_unlock(&inode->i_lock);
537 list_add(&inode->i_lru, &dispose);
392 } 538 }
393 /* only unused inodes may be cached with i_count zero */ 539 spin_unlock(&inode_sb_list_lock);
394 inodes_stat.nr_unused -= count; 540
395 return busy; 541 dispose_list(&dispose);
542
543 /*
544 * Cycle through iprune_sem to make sure any inode that prune_icache
545 * moved off the list before we took the lock has been fully torn
546 * down.
547 */
548 down_write(&iprune_sem);
549 up_write(&iprune_sem);
396} 550}
397 551
398/** 552/**
399 * invalidate_inodes - discard the inodes on a device 553 * invalidate_inodes - attempt to free all inodes on a superblock
400 * @sb: superblock 554 * @sb: superblock to operate on
555 * @kill_dirty: flag to guide handling of dirty inodes
401 * 556 *
402 * Discard all of the inodes for a given superblock. If the discard 557 * Attempts to free all inodes for a given superblock. If there were any
403 * fails because there are busy inodes then a non zero value is returned. 558 * busy inodes return a non-zero value, else zero.
404 * If the discard is successful all the inodes have been discarded. 559 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
560 * them as busy.
405 */ 561 */
406int invalidate_inodes(struct super_block *sb) 562int invalidate_inodes(struct super_block *sb, bool kill_dirty)
407{ 563{
408 int busy; 564 int busy = 0;
409 LIST_HEAD(throw_away); 565 struct inode *inode, *next;
566 LIST_HEAD(dispose);
567
568 spin_lock(&inode_sb_list_lock);
569 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
570 spin_lock(&inode->i_lock);
571 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
572 spin_unlock(&inode->i_lock);
573 continue;
574 }
575 if (inode->i_state & I_DIRTY && !kill_dirty) {
576 spin_unlock(&inode->i_lock);
577 busy = 1;
578 continue;
579 }
580 if (atomic_read(&inode->i_count)) {
581 spin_unlock(&inode->i_lock);
582 busy = 1;
583 continue;
584 }
410 585
411 down_write(&iprune_sem); 586 inode->i_state |= I_FREEING;
412 spin_lock(&inode_lock); 587 inode_lru_list_del(inode);
413 fsnotify_unmount_inodes(&sb->s_inodes); 588 spin_unlock(&inode->i_lock);
414 busy = invalidate_list(&sb->s_inodes, &throw_away); 589 list_add(&inode->i_lru, &dispose);
415 spin_unlock(&inode_lock); 590 }
591 spin_unlock(&inode_sb_list_lock);
416 592
417 dispose_list(&throw_away); 593 dispose_list(&dispose);
418 up_write(&iprune_sem);
419 594
420 return busy; 595 return busy;
421} 596}
422EXPORT_SYMBOL(invalidate_inodes);
423 597
424static int can_unuse(struct inode *inode) 598static int can_unuse(struct inode *inode)
425{ 599{
426 if (inode->i_state) 600 if (inode->i_state & ~I_REFERENCED)
427 return 0; 601 return 0;
428 if (inode_has_buffers(inode)) 602 if (inode_has_buffers(inode))
429 return 0; 603 return 0;
@@ -435,65 +609,99 @@ static int can_unuse(struct inode *inode)
435} 609}
436 610
437/* 611/*
438 * Scan `goal' inodes on the unused list for freeable ones. They are moved to 612 * Scan `goal' inodes on the unused list for freeable ones. They are moved to a
439 * a temporary list and then are freed outside inode_lock by dispose_list(). 613 * temporary list and then are freed outside inode_lru_lock by dispose_list().
440 * 614 *
441 * Any inodes which are pinned purely because of attached pagecache have their 615 * Any inodes which are pinned purely because of attached pagecache have their
442 * pagecache removed. We expect the final iput() on that inode to add it to 616 * pagecache removed. If the inode has metadata buffers attached to
443 * the front of the inode_unused list. So look for it there and if the 617 * mapping->private_list then try to remove them.
444 * inode is still freeable, proceed. The right inode is found 99.9% of the
445 * time in testing on a 4-way.
446 * 618 *
447 * If the inode has metadata buffers attached to mapping->private_list then 619 * If the inode has the I_REFERENCED flag set, then it means that it has been
448 * try to remove them. 620 * used recently - the flag is set in iput_final(). When we encounter such an
621 * inode, clear the flag and move it to the back of the LRU so it gets another
622 * pass through the LRU before it gets reclaimed. This is necessary because of
623 * the fact we are doing lazy LRU updates to minimise lock contention so the
624 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
625 * with this flag set because they are the inodes that are out of order.
449 */ 626 */
450static void prune_icache(int nr_to_scan) 627static void prune_icache(int nr_to_scan)
451{ 628{
452 LIST_HEAD(freeable); 629 LIST_HEAD(freeable);
453 int nr_pruned = 0;
454 int nr_scanned; 630 int nr_scanned;
455 unsigned long reap = 0; 631 unsigned long reap = 0;
456 632
457 down_read(&iprune_sem); 633 down_read(&iprune_sem);
458 spin_lock(&inode_lock); 634 spin_lock(&inode_lru_lock);
459 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { 635 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
460 struct inode *inode; 636 struct inode *inode;
461 637
462 if (list_empty(&inode_unused)) 638 if (list_empty(&inode_lru))
463 break; 639 break;
464 640
465 inode = list_entry(inode_unused.prev, struct inode, i_list); 641 inode = list_entry(inode_lru.prev, struct inode, i_lru);
642
643 /*
644 * we are inverting the inode_lru_lock/inode->i_lock here,
645 * so use a trylock. If we fail to get the lock, just move the
646 * inode to the back of the list so we don't spin on it.
647 */
648 if (!spin_trylock(&inode->i_lock)) {
649 list_move(&inode->i_lru, &inode_lru);
650 continue;
651 }
652
653 /*
654 * Referenced or dirty inodes are still in use. Give them
655 * another pass through the LRU as we canot reclaim them now.
656 */
657 if (atomic_read(&inode->i_count) ||
658 (inode->i_state & ~I_REFERENCED)) {
659 list_del_init(&inode->i_lru);
660 spin_unlock(&inode->i_lock);
661 inodes_stat.nr_unused--;
662 continue;
663 }
466 664
467 if (inode->i_state || atomic_read(&inode->i_count)) { 665 /* recently referenced inodes get one more pass */
468 list_move(&inode->i_list, &inode_unused); 666 if (inode->i_state & I_REFERENCED) {
667 inode->i_state &= ~I_REFERENCED;
668 list_move(&inode->i_lru, &inode_lru);
669 spin_unlock(&inode->i_lock);
469 continue; 670 continue;
470 } 671 }
471 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 672 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
472 __iget(inode); 673 __iget(inode);
473 spin_unlock(&inode_lock); 674 spin_unlock(&inode->i_lock);
675 spin_unlock(&inode_lru_lock);
474 if (remove_inode_buffers(inode)) 676 if (remove_inode_buffers(inode))
475 reap += invalidate_mapping_pages(&inode->i_data, 677 reap += invalidate_mapping_pages(&inode->i_data,
476 0, -1); 678 0, -1);
477 iput(inode); 679 iput(inode);
478 spin_lock(&inode_lock); 680 spin_lock(&inode_lru_lock);
479 681
480 if (inode != list_entry(inode_unused.next, 682 if (inode != list_entry(inode_lru.next,
481 struct inode, i_list)) 683 struct inode, i_lru))
482 continue; /* wrong inode or list_empty */ 684 continue; /* wrong inode or list_empty */
483 if (!can_unuse(inode)) 685 /* avoid lock inversions with trylock */
686 if (!spin_trylock(&inode->i_lock))
484 continue; 687 continue;
688 if (!can_unuse(inode)) {
689 spin_unlock(&inode->i_lock);
690 continue;
691 }
485 } 692 }
486 list_move(&inode->i_list, &freeable);
487 WARN_ON(inode->i_state & I_NEW); 693 WARN_ON(inode->i_state & I_NEW);
488 inode->i_state |= I_FREEING; 694 inode->i_state |= I_FREEING;
489 nr_pruned++; 695 spin_unlock(&inode->i_lock);
696
697 list_move(&inode->i_lru, &freeable);
698 inodes_stat.nr_unused--;
490 } 699 }
491 inodes_stat.nr_unused -= nr_pruned;
492 if (current_is_kswapd()) 700 if (current_is_kswapd())
493 __count_vm_events(KSWAPD_INODESTEAL, reap); 701 __count_vm_events(KSWAPD_INODESTEAL, reap);
494 else 702 else
495 __count_vm_events(PGINODESTEAL, reap); 703 __count_vm_events(PGINODESTEAL, reap);
496 spin_unlock(&inode_lock); 704 spin_unlock(&inode_lru_lock);
497 705
498 dispose_list(&freeable); 706 dispose_list(&freeable);
499 up_read(&iprune_sem); 707 up_read(&iprune_sem);
@@ -508,8 +716,12 @@ static void prune_icache(int nr_to_scan)
508 * This function is passed the number of inodes to scan, and it returns the 716 * This function is passed the number of inodes to scan, and it returns the
509 * total number of remaining possibly-reclaimable inodes. 717 * total number of remaining possibly-reclaimable inodes.
510 */ 718 */
511static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 719static int shrink_icache_memory(struct shrinker *shrink,
720 struct shrink_control *sc)
512{ 721{
722 int nr = sc->nr_to_scan;
723 gfp_t gfp_mask = sc->gfp_mask;
724
513 if (nr) { 725 if (nr) {
514 /* 726 /*
515 * Nasty deadlock avoidance. We may hold various FS locks, 727 * Nasty deadlock avoidance. We may hold various FS locks,
@@ -520,7 +732,7 @@ static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
520 return -1; 732 return -1;
521 prune_icache(nr); 733 prune_icache(nr);
522 } 734 }
523 return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; 735 return (get_nr_inodes_unused() / 100) * sysctl_vfs_cache_pressure;
524} 736}
525 737
526static struct shrinker icache_shrinker = { 738static struct shrinker icache_shrinker = {
@@ -531,9 +743,6 @@ static struct shrinker icache_shrinker = {
531static void __wait_on_freeing_inode(struct inode *inode); 743static void __wait_on_freeing_inode(struct inode *inode);
532/* 744/*
533 * Called with the inode lock held. 745 * Called with the inode lock held.
534 * NOTE: we are not increasing the inode-refcount, you must call __iget()
535 * by hand after calling find_inode now! This simplifies iunique and won't
536 * add any additional branch in the common code.
537 */ 746 */
538static struct inode *find_inode(struct super_block *sb, 747static struct inode *find_inode(struct super_block *sb,
539 struct hlist_head *head, 748 struct hlist_head *head,
@@ -545,17 +754,24 @@ static struct inode *find_inode(struct super_block *sb,
545 754
546repeat: 755repeat:
547 hlist_for_each_entry(inode, node, head, i_hash) { 756 hlist_for_each_entry(inode, node, head, i_hash) {
548 if (inode->i_sb != sb) 757 spin_lock(&inode->i_lock);
758 if (inode->i_sb != sb) {
759 spin_unlock(&inode->i_lock);
549 continue; 760 continue;
550 if (!test(inode, data)) 761 }
762 if (!test(inode, data)) {
763 spin_unlock(&inode->i_lock);
551 continue; 764 continue;
765 }
552 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 766 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
553 __wait_on_freeing_inode(inode); 767 __wait_on_freeing_inode(inode);
554 goto repeat; 768 goto repeat;
555 } 769 }
556 break; 770 __iget(inode);
771 spin_unlock(&inode->i_lock);
772 return inode;
557 } 773 }
558 return node ? inode : NULL; 774 return NULL;
559} 775}
560 776
561/* 777/*
@@ -570,61 +786,63 @@ static struct inode *find_inode_fast(struct super_block *sb,
570 786
571repeat: 787repeat:
572 hlist_for_each_entry(inode, node, head, i_hash) { 788 hlist_for_each_entry(inode, node, head, i_hash) {
573 if (inode->i_ino != ino) 789 spin_lock(&inode->i_lock);
790 if (inode->i_ino != ino) {
791 spin_unlock(&inode->i_lock);
574 continue; 792 continue;
575 if (inode->i_sb != sb) 793 }
794 if (inode->i_sb != sb) {
795 spin_unlock(&inode->i_lock);
576 continue; 796 continue;
797 }
577 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 798 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
578 __wait_on_freeing_inode(inode); 799 __wait_on_freeing_inode(inode);
579 goto repeat; 800 goto repeat;
580 } 801 }
581 break; 802 __iget(inode);
803 spin_unlock(&inode->i_lock);
804 return inode;
582 } 805 }
583 return node ? inode : NULL; 806 return NULL;
584}
585
586static unsigned long hash(struct super_block *sb, unsigned long hashval)
587{
588 unsigned long tmp;
589
590 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
591 L1_CACHE_BYTES;
592 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
593 return tmp & I_HASHMASK;
594}
595
596static inline void
597__inode_add_to_lists(struct super_block *sb, struct hlist_head *head,
598 struct inode *inode)
599{
600 inodes_stat.nr_inodes++;
601 list_add(&inode->i_list, &inode_in_use);
602 list_add(&inode->i_sb_list, &sb->s_inodes);
603 if (head)
604 hlist_add_head(&inode->i_hash, head);
605} 807}
606 808
607/** 809/*
608 * inode_add_to_lists - add a new inode to relevant lists 810 * Each cpu owns a range of LAST_INO_BATCH numbers.
609 * @sb: superblock inode belongs to 811 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
610 * @inode: inode to mark in use 812 * to renew the exhausted range.
813 *
814 * This does not significantly increase overflow rate because every CPU can
815 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
816 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
817 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
818 * overflow rate by 2x, which does not seem too significant.
611 * 819 *
612 * When an inode is allocated it needs to be accounted for, added to the in use 820 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
613 * list, the owning superblock and the inode hash. This needs to be done under 821 * error if st_ino won't fit in target struct field. Use 32bit counter
614 * the inode_lock, so export a function to do this rather than the inode lock 822 * here to attempt to avoid that.
615 * itself. We calculate the hash list to add to here so it is all internal
616 * which requires the caller to have already set up the inode number in the
617 * inode to add.
618 */ 823 */
619void inode_add_to_lists(struct super_block *sb, struct inode *inode) 824#define LAST_INO_BATCH 1024
825static DEFINE_PER_CPU(unsigned int, last_ino);
826
827unsigned int get_next_ino(void)
620{ 828{
621 struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino); 829 unsigned int *p = &get_cpu_var(last_ino);
830 unsigned int res = *p;
622 831
623 spin_lock(&inode_lock); 832#ifdef CONFIG_SMP
624 __inode_add_to_lists(sb, head, inode); 833 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
625 spin_unlock(&inode_lock); 834 static atomic_t shared_last_ino;
835 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
836
837 res = next - LAST_INO_BATCH;
838 }
839#endif
840
841 *p = ++res;
842 put_cpu_var(last_ino);
843 return res;
626} 844}
627EXPORT_SYMBOL_GPL(inode_add_to_lists); 845EXPORT_SYMBOL(get_next_ino);
628 846
629/** 847/**
630 * new_inode - obtain an inode 848 * new_inode - obtain an inode
@@ -640,32 +858,32 @@ EXPORT_SYMBOL_GPL(inode_add_to_lists);
640 */ 858 */
641struct inode *new_inode(struct super_block *sb) 859struct inode *new_inode(struct super_block *sb)
642{ 860{
643 /*
644 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
645 * error if st_ino won't fit in target struct field. Use 32bit counter
646 * here to attempt to avoid that.
647 */
648 static unsigned int last_ino;
649 struct inode *inode; 861 struct inode *inode;
650 862
651 spin_lock_prefetch(&inode_lock); 863 spin_lock_prefetch(&inode_sb_list_lock);
652 864
653 inode = alloc_inode(sb); 865 inode = alloc_inode(sb);
654 if (inode) { 866 if (inode) {
655 spin_lock(&inode_lock); 867 spin_lock(&inode->i_lock);
656 __inode_add_to_lists(sb, NULL, inode);
657 inode->i_ino = ++last_ino;
658 inode->i_state = 0; 868 inode->i_state = 0;
659 spin_unlock(&inode_lock); 869 spin_unlock(&inode->i_lock);
870 inode_sb_list_add(inode);
660 } 871 }
661 return inode; 872 return inode;
662} 873}
663EXPORT_SYMBOL(new_inode); 874EXPORT_SYMBOL(new_inode);
664 875
876/**
877 * unlock_new_inode - clear the I_NEW state and wake up any waiters
878 * @inode: new inode to unlock
879 *
880 * Called when the inode is fully initialised to clear the new state of the
881 * inode and wake up anyone waiting for the inode to finish initialisation.
882 */
665void unlock_new_inode(struct inode *inode) 883void unlock_new_inode(struct inode *inode)
666{ 884{
667#ifdef CONFIG_DEBUG_LOCK_ALLOC 885#ifdef CONFIG_DEBUG_LOCK_ALLOC
668 if (inode->i_mode & S_IFDIR) { 886 if (S_ISDIR(inode->i_mode)) {
669 struct file_system_type *type = inode->i_sb->s_type; 887 struct file_system_type *type = inode->i_sb->s_type;
670 888
671 /* Set new key only if filesystem hasn't already changed it */ 889 /* Set new key only if filesystem hasn't already changed it */
@@ -681,50 +899,67 @@ void unlock_new_inode(struct inode *inode)
681 } 899 }
682 } 900 }
683#endif 901#endif
684 /* 902 spin_lock(&inode->i_lock);
685 * This is special! We do not need the spinlock when clearing I_NEW,
686 * because we're guaranteed that nobody else tries to do anything about
687 * the state of the inode when it is locked, as we just created it (so
688 * there can be no old holders that haven't tested I_NEW).
689 * However we must emit the memory barrier so that other CPUs reliably
690 * see the clearing of I_NEW after the other inode initialisation has
691 * completed.
692 */
693 smp_mb();
694 WARN_ON(!(inode->i_state & I_NEW)); 903 WARN_ON(!(inode->i_state & I_NEW));
695 inode->i_state &= ~I_NEW; 904 inode->i_state &= ~I_NEW;
696 wake_up_inode(inode); 905 wake_up_bit(&inode->i_state, __I_NEW);
906 spin_unlock(&inode->i_lock);
697} 907}
698EXPORT_SYMBOL(unlock_new_inode); 908EXPORT_SYMBOL(unlock_new_inode);
699 909
700/* 910/**
701 * This is called without the inode lock held.. Be careful. 911 * iget5_locked - obtain an inode from a mounted file system
912 * @sb: super block of file system
913 * @hashval: hash value (usually inode number) to get
914 * @test: callback used for comparisons between inodes
915 * @set: callback used to initialize a new struct inode
916 * @data: opaque data pointer to pass to @test and @set
702 * 917 *
703 * We no longer cache the sb_flags in i_flags - see fs.h 918 * Search for the inode specified by @hashval and @data in the inode cache,
704 * -- rmk@arm.uk.linux.org 919 * and if present it is return it with an increased reference count. This is
920 * a generalized version of iget_locked() for file systems where the inode
921 * number is not sufficient for unique identification of an inode.
922 *
923 * If the inode is not in cache, allocate a new inode and return it locked,
924 * hashed, and with the I_NEW flag set. The file system gets to fill it in
925 * before unlocking it via unlock_new_inode().
926 *
927 * Note both @test and @set are called with the inode_hash_lock held, so can't
928 * sleep.
705 */ 929 */
706static struct inode *get_new_inode(struct super_block *sb, 930struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
707 struct hlist_head *head, 931 int (*test)(struct inode *, void *),
708 int (*test)(struct inode *, void *), 932 int (*set)(struct inode *, void *), void *data)
709 int (*set)(struct inode *, void *),
710 void *data)
711{ 933{
934 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
712 struct inode *inode; 935 struct inode *inode;
713 936
937 spin_lock(&inode_hash_lock);
938 inode = find_inode(sb, head, test, data);
939 spin_unlock(&inode_hash_lock);
940
941 if (inode) {
942 wait_on_inode(inode);
943 return inode;
944 }
945
714 inode = alloc_inode(sb); 946 inode = alloc_inode(sb);
715 if (inode) { 947 if (inode) {
716 struct inode *old; 948 struct inode *old;
717 949
718 spin_lock(&inode_lock); 950 spin_lock(&inode_hash_lock);
719 /* We released the lock, so.. */ 951 /* We released the lock, so.. */
720 old = find_inode(sb, head, test, data); 952 old = find_inode(sb, head, test, data);
721 if (!old) { 953 if (!old) {
722 if (set(inode, data)) 954 if (set(inode, data))
723 goto set_failed; 955 goto set_failed;
724 956
725 __inode_add_to_lists(sb, head, inode); 957 spin_lock(&inode->i_lock);
726 inode->i_state = I_NEW; 958 inode->i_state = I_NEW;
727 spin_unlock(&inode_lock); 959 hlist_add_head(&inode->i_hash, head);
960 spin_unlock(&inode->i_lock);
961 inode_sb_list_add(inode);
962 spin_unlock(&inode_hash_lock);
728 963
729 /* Return the locked inode with I_NEW set, the 964 /* Return the locked inode with I_NEW set, the
730 * caller is responsible for filling in the contents 965 * caller is responsible for filling in the contents
@@ -737,8 +972,7 @@ static struct inode *get_new_inode(struct super_block *sb,
737 * us. Use the old inode instead of the one we just 972 * us. Use the old inode instead of the one we just
738 * allocated. 973 * allocated.
739 */ 974 */
740 __iget(old); 975 spin_unlock(&inode_hash_lock);
741 spin_unlock(&inode_lock);
742 destroy_inode(inode); 976 destroy_inode(inode);
743 inode = old; 977 inode = old;
744 wait_on_inode(inode); 978 wait_on_inode(inode);
@@ -746,32 +980,53 @@ static struct inode *get_new_inode(struct super_block *sb,
746 return inode; 980 return inode;
747 981
748set_failed: 982set_failed:
749 spin_unlock(&inode_lock); 983 spin_unlock(&inode_hash_lock);
750 destroy_inode(inode); 984 destroy_inode(inode);
751 return NULL; 985 return NULL;
752} 986}
987EXPORT_SYMBOL(iget5_locked);
753 988
754/* 989/**
755 * get_new_inode_fast is the fast path version of get_new_inode, see the 990 * iget_locked - obtain an inode from a mounted file system
756 * comment at iget_locked for details. 991 * @sb: super block of file system
992 * @ino: inode number to get
993 *
994 * Search for the inode specified by @ino in the inode cache and if present
995 * return it with an increased reference count. This is for file systems
996 * where the inode number is sufficient for unique identification of an inode.
997 *
998 * If the inode is not in cache, allocate a new inode and return it locked,
999 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1000 * before unlocking it via unlock_new_inode().
757 */ 1001 */
758static struct inode *get_new_inode_fast(struct super_block *sb, 1002struct inode *iget_locked(struct super_block *sb, unsigned long ino)
759 struct hlist_head *head, unsigned long ino)
760{ 1003{
1004 struct hlist_head *head = inode_hashtable + hash(sb, ino);
761 struct inode *inode; 1005 struct inode *inode;
762 1006
1007 spin_lock(&inode_hash_lock);
1008 inode = find_inode_fast(sb, head, ino);
1009 spin_unlock(&inode_hash_lock);
1010 if (inode) {
1011 wait_on_inode(inode);
1012 return inode;
1013 }
1014
763 inode = alloc_inode(sb); 1015 inode = alloc_inode(sb);
764 if (inode) { 1016 if (inode) {
765 struct inode *old; 1017 struct inode *old;
766 1018
767 spin_lock(&inode_lock); 1019 spin_lock(&inode_hash_lock);
768 /* We released the lock, so.. */ 1020 /* We released the lock, so.. */
769 old = find_inode_fast(sb, head, ino); 1021 old = find_inode_fast(sb, head, ino);
770 if (!old) { 1022 if (!old) {
771 inode->i_ino = ino; 1023 inode->i_ino = ino;
772 __inode_add_to_lists(sb, head, inode); 1024 spin_lock(&inode->i_lock);
773 inode->i_state = I_NEW; 1025 inode->i_state = I_NEW;
774 spin_unlock(&inode_lock); 1026 hlist_add_head(&inode->i_hash, head);
1027 spin_unlock(&inode->i_lock);
1028 inode_sb_list_add(inode);
1029 spin_unlock(&inode_hash_lock);
775 1030
776 /* Return the locked inode with I_NEW set, the 1031 /* Return the locked inode with I_NEW set, the
777 * caller is responsible for filling in the contents 1032 * caller is responsible for filling in the contents
@@ -784,14 +1039,39 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
784 * us. Use the old inode instead of the one we just 1039 * us. Use the old inode instead of the one we just
785 * allocated. 1040 * allocated.
786 */ 1041 */
787 __iget(old); 1042 spin_unlock(&inode_hash_lock);
788 spin_unlock(&inode_lock);
789 destroy_inode(inode); 1043 destroy_inode(inode);
790 inode = old; 1044 inode = old;
791 wait_on_inode(inode); 1045 wait_on_inode(inode);
792 } 1046 }
793 return inode; 1047 return inode;
794} 1048}
1049EXPORT_SYMBOL(iget_locked);
1050
1051/*
1052 * search the inode cache for a matching inode number.
1053 * If we find one, then the inode number we are trying to
1054 * allocate is not unique and so we should not use it.
1055 *
1056 * Returns 1 if the inode number is unique, 0 if it is not.
1057 */
1058static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1059{
1060 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1061 struct hlist_node *node;
1062 struct inode *inode;
1063
1064 spin_lock(&inode_hash_lock);
1065 hlist_for_each_entry(inode, node, b, i_hash) {
1066 if (inode->i_ino == ino && inode->i_sb == sb) {
1067 spin_unlock(&inode_hash_lock);
1068 return 0;
1069 }
1070 }
1071 spin_unlock(&inode_hash_lock);
1072
1073 return 1;
1074}
795 1075
796/** 1076/**
797 * iunique - get a unique inode number 1077 * iunique - get a unique inode number
@@ -814,20 +1094,17 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
814 * error if st_ino won't fit in target struct field. Use 32bit counter 1094 * error if st_ino won't fit in target struct field. Use 32bit counter
815 * here to attempt to avoid that. 1095 * here to attempt to avoid that.
816 */ 1096 */
1097 static DEFINE_SPINLOCK(iunique_lock);
817 static unsigned int counter; 1098 static unsigned int counter;
818 struct inode *inode;
819 struct hlist_head *head;
820 ino_t res; 1099 ino_t res;
821 1100
822 spin_lock(&inode_lock); 1101 spin_lock(&iunique_lock);
823 do { 1102 do {
824 if (counter <= max_reserved) 1103 if (counter <= max_reserved)
825 counter = max_reserved + 1; 1104 counter = max_reserved + 1;
826 res = counter++; 1105 res = counter++;
827 head = inode_hashtable + hash(sb, res); 1106 } while (!test_inode_iunique(sb, res));
828 inode = find_inode_fast(sb, head, res); 1107 spin_unlock(&iunique_lock);
829 } while (inode != NULL);
830 spin_unlock(&inode_lock);
831 1108
832 return res; 1109 return res;
833} 1110}
@@ -835,118 +1112,50 @@ EXPORT_SYMBOL(iunique);
835 1112
836struct inode *igrab(struct inode *inode) 1113struct inode *igrab(struct inode *inode)
837{ 1114{
838 spin_lock(&inode_lock); 1115 spin_lock(&inode->i_lock);
839 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) 1116 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
840 __iget(inode); 1117 __iget(inode);
841 else 1118 spin_unlock(&inode->i_lock);
1119 } else {
1120 spin_unlock(&inode->i_lock);
842 /* 1121 /*
843 * Handle the case where s_op->clear_inode is not been 1122 * Handle the case where s_op->clear_inode is not been
844 * called yet, and somebody is calling igrab 1123 * called yet, and somebody is calling igrab
845 * while the inode is getting freed. 1124 * while the inode is getting freed.
846 */ 1125 */
847 inode = NULL; 1126 inode = NULL;
848 spin_unlock(&inode_lock); 1127 }
849 return inode; 1128 return inode;
850} 1129}
851EXPORT_SYMBOL(igrab); 1130EXPORT_SYMBOL(igrab);
852 1131
853/** 1132/**
854 * ifind - internal function, you want ilookup5() or iget5().
855 * @sb: super block of file system to search
856 * @head: the head of the list to search
857 * @test: callback used for comparisons between inodes
858 * @data: opaque data pointer to pass to @test
859 * @wait: if true wait for the inode to be unlocked, if false do not
860 *
861 * ifind() searches for the inode specified by @data in the inode
862 * cache. This is a generalized version of ifind_fast() for file systems where
863 * the inode number is not sufficient for unique identification of an inode.
864 *
865 * If the inode is in the cache, the inode is returned with an incremented
866 * reference count.
867 *
868 * Otherwise NULL is returned.
869 *
870 * Note, @test is called with the inode_lock held, so can't sleep.
871 */
872static struct inode *ifind(struct super_block *sb,
873 struct hlist_head *head, int (*test)(struct inode *, void *),
874 void *data, const int wait)
875{
876 struct inode *inode;
877
878 spin_lock(&inode_lock);
879 inode = find_inode(sb, head, test, data);
880 if (inode) {
881 __iget(inode);
882 spin_unlock(&inode_lock);
883 if (likely(wait))
884 wait_on_inode(inode);
885 return inode;
886 }
887 spin_unlock(&inode_lock);
888 return NULL;
889}
890
891/**
892 * ifind_fast - internal function, you want ilookup() or iget().
893 * @sb: super block of file system to search
894 * @head: head of the list to search
895 * @ino: inode number to search for
896 *
897 * ifind_fast() searches for the inode @ino in the inode cache. This is for
898 * file systems where the inode number is sufficient for unique identification
899 * of an inode.
900 *
901 * If the inode is in the cache, the inode is returned with an incremented
902 * reference count.
903 *
904 * Otherwise NULL is returned.
905 */
906static struct inode *ifind_fast(struct super_block *sb,
907 struct hlist_head *head, unsigned long ino)
908{
909 struct inode *inode;
910
911 spin_lock(&inode_lock);
912 inode = find_inode_fast(sb, head, ino);
913 if (inode) {
914 __iget(inode);
915 spin_unlock(&inode_lock);
916 wait_on_inode(inode);
917 return inode;
918 }
919 spin_unlock(&inode_lock);
920 return NULL;
921}
922
923/**
924 * ilookup5_nowait - search for an inode in the inode cache 1133 * ilookup5_nowait - search for an inode in the inode cache
925 * @sb: super block of file system to search 1134 * @sb: super block of file system to search
926 * @hashval: hash value (usually inode number) to search for 1135 * @hashval: hash value (usually inode number) to search for
927 * @test: callback used for comparisons between inodes 1136 * @test: callback used for comparisons between inodes
928 * @data: opaque data pointer to pass to @test 1137 * @data: opaque data pointer to pass to @test
929 * 1138 *
930 * ilookup5() uses ifind() to search for the inode specified by @hashval and 1139 * Search for the inode specified by @hashval and @data in the inode cache.
931 * @data in the inode cache. This is a generalized version of ilookup() for
932 * file systems where the inode number is not sufficient for unique
933 * identification of an inode.
934 *
935 * If the inode is in the cache, the inode is returned with an incremented 1140 * If the inode is in the cache, the inode is returned with an incremented
936 * reference count. Note, the inode lock is not waited upon so you have to be 1141 * reference count.
937 * very careful what you do with the returned inode. You probably should be
938 * using ilookup5() instead.
939 * 1142 *
940 * Otherwise NULL is returned. 1143 * Note: I_NEW is not waited upon so you have to be very careful what you do
1144 * with the returned inode. You probably should be using ilookup5() instead.
941 * 1145 *
942 * Note, @test is called with the inode_lock held, so can't sleep. 1146 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
943 */ 1147 */
944struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1148struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
945 int (*test)(struct inode *, void *), void *data) 1149 int (*test)(struct inode *, void *), void *data)
946{ 1150{
947 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1151 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1152 struct inode *inode;
1153
1154 spin_lock(&inode_hash_lock);
1155 inode = find_inode(sb, head, test, data);
1156 spin_unlock(&inode_hash_lock);
948 1157
949 return ifind(sb, head, test, data, 0); 1158 return inode;
950} 1159}
951EXPORT_SYMBOL(ilookup5_nowait); 1160EXPORT_SYMBOL(ilookup5_nowait);
952 1161
@@ -957,24 +1166,24 @@ EXPORT_SYMBOL(ilookup5_nowait);
957 * @test: callback used for comparisons between inodes 1166 * @test: callback used for comparisons between inodes
958 * @data: opaque data pointer to pass to @test 1167 * @data: opaque data pointer to pass to @test
959 * 1168 *
960 * ilookup5() uses ifind() to search for the inode specified by @hashval and 1169 * Search for the inode specified by @hashval and @data in the inode cache,
961 * @data in the inode cache. This is a generalized version of ilookup() for 1170 * and if the inode is in the cache, return the inode with an incremented
962 * file systems where the inode number is not sufficient for unique 1171 * reference count. Waits on I_NEW before returning the inode.
963 * identification of an inode.
964 *
965 * If the inode is in the cache, the inode lock is waited upon and the inode is
966 * returned with an incremented reference count. 1172 * returned with an incremented reference count.
967 * 1173 *
968 * Otherwise NULL is returned. 1174 * This is a generalized version of ilookup() for file systems where the
1175 * inode number is not sufficient for unique identification of an inode.
969 * 1176 *
970 * Note, @test is called with the inode_lock held, so can't sleep. 1177 * Note: @test is called with the inode_hash_lock held, so can't sleep.
971 */ 1178 */
972struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1179struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
973 int (*test)(struct inode *, void *), void *data) 1180 int (*test)(struct inode *, void *), void *data)
974{ 1181{
975 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1182 struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
976 1183
977 return ifind(sb, head, test, data, 1); 1184 if (inode)
1185 wait_on_inode(inode);
1186 return inode;
978} 1187}
979EXPORT_SYMBOL(ilookup5); 1188EXPORT_SYMBOL(ilookup5);
980 1189
@@ -983,91 +1192,23 @@ EXPORT_SYMBOL(ilookup5);
983 * @sb: super block of file system to search 1192 * @sb: super block of file system to search
984 * @ino: inode number to search for 1193 * @ino: inode number to search for
985 * 1194 *
986 * ilookup() uses ifind_fast() to search for the inode @ino in the inode cache. 1195 * Search for the inode @ino in the inode cache, and if the inode is in the
987 * This is for file systems where the inode number is sufficient for unique 1196 * cache, the inode is returned with an incremented reference count.
988 * identification of an inode.
989 *
990 * If the inode is in the cache, the inode is returned with an incremented
991 * reference count.
992 *
993 * Otherwise NULL is returned.
994 */ 1197 */
995struct inode *ilookup(struct super_block *sb, unsigned long ino) 1198struct inode *ilookup(struct super_block *sb, unsigned long ino)
996{ 1199{
997 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1200 struct hlist_head *head = inode_hashtable + hash(sb, ino);
998
999 return ifind_fast(sb, head, ino);
1000}
1001EXPORT_SYMBOL(ilookup);
1002
1003/**
1004 * iget5_locked - obtain an inode from a mounted file system
1005 * @sb: super block of file system
1006 * @hashval: hash value (usually inode number) to get
1007 * @test: callback used for comparisons between inodes
1008 * @set: callback used to initialize a new struct inode
1009 * @data: opaque data pointer to pass to @test and @set
1010 *
1011 * iget5_locked() uses ifind() to search for the inode specified by @hashval
1012 * and @data in the inode cache and if present it is returned with an increased
1013 * reference count. This is a generalized version of iget_locked() for file
1014 * systems where the inode number is not sufficient for unique identification
1015 * of an inode.
1016 *
1017 * If the inode is not in cache, get_new_inode() is called to allocate a new
1018 * inode and this is returned locked, hashed, and with the I_NEW flag set. The
1019 * file system gets to fill it in before unlocking it via unlock_new_inode().
1020 *
1021 * Note both @test and @set are called with the inode_lock held, so can't sleep.
1022 */
1023struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1024 int (*test)(struct inode *, void *),
1025 int (*set)(struct inode *, void *), void *data)
1026{
1027 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1028 struct inode *inode; 1201 struct inode *inode;
1029 1202
1030 inode = ifind(sb, head, test, data, 1); 1203 spin_lock(&inode_hash_lock);
1031 if (inode) 1204 inode = find_inode_fast(sb, head, ino);
1032 return inode; 1205 spin_unlock(&inode_hash_lock);
1033 /*
1034 * get_new_inode() will do the right thing, re-trying the search
1035 * in case it had to block at any point.
1036 */
1037 return get_new_inode(sb, head, test, set, data);
1038}
1039EXPORT_SYMBOL(iget5_locked);
1040
1041/**
1042 * iget_locked - obtain an inode from a mounted file system
1043 * @sb: super block of file system
1044 * @ino: inode number to get
1045 *
1046 * iget_locked() uses ifind_fast() to search for the inode specified by @ino in
1047 * the inode cache and if present it is returned with an increased reference
1048 * count. This is for file systems where the inode number is sufficient for
1049 * unique identification of an inode.
1050 *
1051 * If the inode is not in cache, get_new_inode_fast() is called to allocate a
1052 * new inode and this is returned locked, hashed, and with the I_NEW flag set.
1053 * The file system gets to fill it in before unlocking it via
1054 * unlock_new_inode().
1055 */
1056struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1057{
1058 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1059 struct inode *inode;
1060 1206
1061 inode = ifind_fast(sb, head, ino);
1062 if (inode) 1207 if (inode)
1063 return inode; 1208 wait_on_inode(inode);
1064 /* 1209 return inode;
1065 * get_new_inode_fast() will do the right thing, re-trying the search
1066 * in case it had to block at any point.
1067 */
1068 return get_new_inode_fast(sb, head, ino);
1069} 1210}
1070EXPORT_SYMBOL(iget_locked); 1211EXPORT_SYMBOL(ilookup);
1071 1212
1072int insert_inode_locked(struct inode *inode) 1213int insert_inode_locked(struct inode *inode)
1073{ 1214{
@@ -1075,29 +1216,35 @@ int insert_inode_locked(struct inode *inode)
1075 ino_t ino = inode->i_ino; 1216 ino_t ino = inode->i_ino;
1076 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1217 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1077 1218
1078 inode->i_state |= I_NEW;
1079 while (1) { 1219 while (1) {
1080 struct hlist_node *node; 1220 struct hlist_node *node;
1081 struct inode *old = NULL; 1221 struct inode *old = NULL;
1082 spin_lock(&inode_lock); 1222 spin_lock(&inode_hash_lock);
1083 hlist_for_each_entry(old, node, head, i_hash) { 1223 hlist_for_each_entry(old, node, head, i_hash) {
1084 if (old->i_ino != ino) 1224 if (old->i_ino != ino)
1085 continue; 1225 continue;
1086 if (old->i_sb != sb) 1226 if (old->i_sb != sb)
1087 continue; 1227 continue;
1088 if (old->i_state & (I_FREEING|I_WILL_FREE)) 1228 spin_lock(&old->i_lock);
1229 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1230 spin_unlock(&old->i_lock);
1089 continue; 1231 continue;
1232 }
1090 break; 1233 break;
1091 } 1234 }
1092 if (likely(!node)) { 1235 if (likely(!node)) {
1236 spin_lock(&inode->i_lock);
1237 inode->i_state |= I_NEW;
1093 hlist_add_head(&inode->i_hash, head); 1238 hlist_add_head(&inode->i_hash, head);
1094 spin_unlock(&inode_lock); 1239 spin_unlock(&inode->i_lock);
1240 spin_unlock(&inode_hash_lock);
1095 return 0; 1241 return 0;
1096 } 1242 }
1097 __iget(old); 1243 __iget(old);
1098 spin_unlock(&inode_lock); 1244 spin_unlock(&old->i_lock);
1245 spin_unlock(&inode_hash_lock);
1099 wait_on_inode(old); 1246 wait_on_inode(old);
1100 if (unlikely(!hlist_unhashed(&old->i_hash))) { 1247 if (unlikely(!inode_unhashed(old))) {
1101 iput(old); 1248 iput(old);
1102 return -EBUSY; 1249 return -EBUSY;
1103 } 1250 }
@@ -1112,31 +1259,36 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1112 struct super_block *sb = inode->i_sb; 1259 struct super_block *sb = inode->i_sb;
1113 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1260 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1114 1261
1115 inode->i_state |= I_NEW;
1116
1117 while (1) { 1262 while (1) {
1118 struct hlist_node *node; 1263 struct hlist_node *node;
1119 struct inode *old = NULL; 1264 struct inode *old = NULL;
1120 1265
1121 spin_lock(&inode_lock); 1266 spin_lock(&inode_hash_lock);
1122 hlist_for_each_entry(old, node, head, i_hash) { 1267 hlist_for_each_entry(old, node, head, i_hash) {
1123 if (old->i_sb != sb) 1268 if (old->i_sb != sb)
1124 continue; 1269 continue;
1125 if (!test(old, data)) 1270 if (!test(old, data))
1126 continue; 1271 continue;
1127 if (old->i_state & (I_FREEING|I_WILL_FREE)) 1272 spin_lock(&old->i_lock);
1273 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1274 spin_unlock(&old->i_lock);
1128 continue; 1275 continue;
1276 }
1129 break; 1277 break;
1130 } 1278 }
1131 if (likely(!node)) { 1279 if (likely(!node)) {
1280 spin_lock(&inode->i_lock);
1281 inode->i_state |= I_NEW;
1132 hlist_add_head(&inode->i_hash, head); 1282 hlist_add_head(&inode->i_hash, head);
1133 spin_unlock(&inode_lock); 1283 spin_unlock(&inode->i_lock);
1284 spin_unlock(&inode_hash_lock);
1134 return 0; 1285 return 0;
1135 } 1286 }
1136 __iget(old); 1287 __iget(old);
1137 spin_unlock(&inode_lock); 1288 spin_unlock(&old->i_lock);
1289 spin_unlock(&inode_hash_lock);
1138 wait_on_inode(old); 1290 wait_on_inode(old);
1139 if (unlikely(!hlist_unhashed(&old->i_hash))) { 1291 if (unlikely(!inode_unhashed(old))) {
1140 iput(old); 1292 iput(old);
1141 return -EBUSY; 1293 return -EBUSY;
1142 } 1294 }
@@ -1145,36 +1297,6 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1145} 1297}
1146EXPORT_SYMBOL(insert_inode_locked4); 1298EXPORT_SYMBOL(insert_inode_locked4);
1147 1299
1148/**
1149 * __insert_inode_hash - hash an inode
1150 * @inode: unhashed inode
1151 * @hashval: unsigned long value used to locate this object in the
1152 * inode_hashtable.
1153 *
1154 * Add an inode to the inode hash for this superblock.
1155 */
1156void __insert_inode_hash(struct inode *inode, unsigned long hashval)
1157{
1158 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
1159 spin_lock(&inode_lock);
1160 hlist_add_head(&inode->i_hash, head);
1161 spin_unlock(&inode_lock);
1162}
1163EXPORT_SYMBOL(__insert_inode_hash);
1164
1165/**
1166 * remove_inode_hash - remove an inode from the hash
1167 * @inode: inode to unhash
1168 *
1169 * Remove an inode from the superblock.
1170 */
1171void remove_inode_hash(struct inode *inode)
1172{
1173 spin_lock(&inode_lock);
1174 hlist_del_init(&inode->i_hash);
1175 spin_unlock(&inode_lock);
1176}
1177EXPORT_SYMBOL(remove_inode_hash);
1178 1300
1179int generic_delete_inode(struct inode *inode) 1301int generic_delete_inode(struct inode *inode)
1180{ 1302{
@@ -1189,7 +1311,7 @@ EXPORT_SYMBOL(generic_delete_inode);
1189 */ 1311 */
1190int generic_drop_inode(struct inode *inode) 1312int generic_drop_inode(struct inode *inode)
1191{ 1313{
1192 return !inode->i_nlink || hlist_unhashed(&inode->i_hash); 1314 return !inode->i_nlink || inode_unhashed(inode);
1193} 1315}
1194EXPORT_SYMBOL_GPL(generic_drop_inode); 1316EXPORT_SYMBOL_GPL(generic_drop_inode);
1195 1317
@@ -1209,42 +1331,35 @@ static void iput_final(struct inode *inode)
1209 const struct super_operations *op = inode->i_sb->s_op; 1331 const struct super_operations *op = inode->i_sb->s_op;
1210 int drop; 1332 int drop;
1211 1333
1334 WARN_ON(inode->i_state & I_NEW);
1335
1212 if (op && op->drop_inode) 1336 if (op && op->drop_inode)
1213 drop = op->drop_inode(inode); 1337 drop = op->drop_inode(inode);
1214 else 1338 else
1215 drop = generic_drop_inode(inode); 1339 drop = generic_drop_inode(inode);
1216 1340
1217 if (!drop) { 1341 if (!drop && (sb->s_flags & MS_ACTIVE)) {
1342 inode->i_state |= I_REFERENCED;
1218 if (!(inode->i_state & (I_DIRTY|I_SYNC))) 1343 if (!(inode->i_state & (I_DIRTY|I_SYNC)))
1219 list_move(&inode->i_list, &inode_unused); 1344 inode_lru_list_add(inode);
1220 inodes_stat.nr_unused++; 1345 spin_unlock(&inode->i_lock);
1221 if (sb->s_flags & MS_ACTIVE) { 1346 return;
1222 spin_unlock(&inode_lock); 1347 }
1223 return; 1348
1224 } 1349 if (!drop) {
1225 WARN_ON(inode->i_state & I_NEW);
1226 inode->i_state |= I_WILL_FREE; 1350 inode->i_state |= I_WILL_FREE;
1227 spin_unlock(&inode_lock); 1351 spin_unlock(&inode->i_lock);
1228 write_inode_now(inode, 1); 1352 write_inode_now(inode, 1);
1229 spin_lock(&inode_lock); 1353 spin_lock(&inode->i_lock);
1230 WARN_ON(inode->i_state & I_NEW); 1354 WARN_ON(inode->i_state & I_NEW);
1231 inode->i_state &= ~I_WILL_FREE; 1355 inode->i_state &= ~I_WILL_FREE;
1232 inodes_stat.nr_unused--;
1233 hlist_del_init(&inode->i_hash);
1234 } 1356 }
1235 list_del_init(&inode->i_list); 1357
1236 list_del_init(&inode->i_sb_list);
1237 WARN_ON(inode->i_state & I_NEW);
1238 inode->i_state |= I_FREEING; 1358 inode->i_state |= I_FREEING;
1239 inodes_stat.nr_inodes--; 1359 inode_lru_list_del(inode);
1240 spin_unlock(&inode_lock); 1360 spin_unlock(&inode->i_lock);
1361
1241 evict(inode); 1362 evict(inode);
1242 spin_lock(&inode_lock);
1243 hlist_del_init(&inode->i_hash);
1244 spin_unlock(&inode_lock);
1245 wake_up_inode(inode);
1246 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
1247 destroy_inode(inode);
1248} 1363}
1249 1364
1250/** 1365/**
@@ -1261,7 +1376,7 @@ void iput(struct inode *inode)
1261 if (inode) { 1376 if (inode) {
1262 BUG_ON(inode->i_state & I_CLEAR); 1377 BUG_ON(inode->i_state & I_CLEAR);
1263 1378
1264 if (atomic_dec_and_lock(&inode->i_count, &inode_lock)) 1379 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
1265 iput_final(inode); 1380 iput_final(inode);
1266 } 1381 }
1267} 1382}
@@ -1440,9 +1555,8 @@ EXPORT_SYMBOL(inode_wait);
1440 * to recheck inode state. 1555 * to recheck inode state.
1441 * 1556 *
1442 * It doesn't matter if I_NEW is not set initially, a call to 1557 * It doesn't matter if I_NEW is not set initially, a call to
1443 * wake_up_inode() after removing from the hash list will DTRT. 1558 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1444 * 1559 * will DTRT.
1445 * This is called with inode_lock held.
1446 */ 1560 */
1447static void __wait_on_freeing_inode(struct inode *inode) 1561static void __wait_on_freeing_inode(struct inode *inode)
1448{ 1562{
@@ -1450,10 +1564,11 @@ static void __wait_on_freeing_inode(struct inode *inode)
1450 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 1564 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1451 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1565 wq = bit_waitqueue(&inode->i_state, __I_NEW);
1452 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1566 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1453 spin_unlock(&inode_lock); 1567 spin_unlock(&inode->i_lock);
1568 spin_unlock(&inode_hash_lock);
1454 schedule(); 1569 schedule();
1455 finish_wait(wq, &wait.wait); 1570 finish_wait(wq, &wait.wait);
1456 spin_lock(&inode_lock); 1571 spin_lock(&inode_hash_lock);
1457} 1572}
1458 1573
1459static __initdata unsigned long ihash_entries; 1574static __initdata unsigned long ihash_entries;
@@ -1545,7 +1660,7 @@ void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1545EXPORT_SYMBOL(init_special_inode); 1660EXPORT_SYMBOL(init_special_inode);
1546 1661
1547/** 1662/**
1548 * Init uid,gid,mode for new inode according to posix standards 1663 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
1549 * @inode: New inode 1664 * @inode: New inode
1550 * @dir: Directory inode 1665 * @dir: Directory inode
1551 * @mode: mode of the new inode 1666 * @mode: mode of the new inode
@@ -1563,3 +1678,22 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
1563 inode->i_mode = mode; 1678 inode->i_mode = mode;
1564} 1679}
1565EXPORT_SYMBOL(inode_init_owner); 1680EXPORT_SYMBOL(inode_init_owner);
1681
1682/**
1683 * inode_owner_or_capable - check current task permissions to inode
1684 * @inode: inode being checked
1685 *
1686 * Return true if current either has CAP_FOWNER to the inode, or
1687 * owns the file.
1688 */
1689bool inode_owner_or_capable(const struct inode *inode)
1690{
1691 struct user_namespace *ns = inode_userns(inode);
1692
1693 if (current_user_ns() == ns && current_fsuid() == inode->i_uid)
1694 return true;
1695 if (ns_capable(ns, CAP_FOWNER))
1696 return true;
1697 return false;
1698}
1699EXPORT_SYMBOL(inode_owner_or_capable);