aboutsummaryrefslogtreecommitdiffstats
path: root/fs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/inode.c')
-rw-r--r--fs/inode.c71
1 files changed, 19 insertions, 52 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 33c963d08ab4..43566d17d1b8 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1,9 +1,7 @@
1/* 1/*
2 * linux/fs/inode.c
3 *
4 * (C) 1997 Linus Torvalds 2 * (C) 1997 Linus Torvalds
3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
5 */ 4 */
6
7#include <linux/fs.h> 5#include <linux/fs.h>
8#include <linux/mm.h> 6#include <linux/mm.h>
9#include <linux/dcache.h> 7#include <linux/dcache.h>
@@ -24,12 +22,14 @@
24#include <linux/mount.h> 22#include <linux/mount.h>
25#include <linux/async.h> 23#include <linux/async.h>
26#include <linux/posix_acl.h> 24#include <linux/posix_acl.h>
25#include <linux/prefetch.h>
27#include <linux/ima.h> 26#include <linux/ima.h>
28#include <linux/cred.h> 27#include <linux/cred.h>
28#include <linux/buffer_head.h> /* for inode_has_buffers */
29#include "internal.h" 29#include "internal.h"
30 30
31/* 31/*
32 * inode locking rules. 32 * Inode locking rules:
33 * 33 *
34 * inode->i_lock protects: 34 * inode->i_lock protects:
35 * inode->i_state, inode->i_hash, __iget() 35 * inode->i_state, inode->i_hash, __iget()
@@ -59,54 +59,11 @@
59 * inode_hash_lock 59 * inode_hash_lock
60 */ 60 */
61 61
62/*
63 * This is needed for the following functions:
64 * - inode_has_buffers
65 * - invalidate_bdev
66 *
67 * FIXME: remove all knowledge of the buffer layer from this file
68 */
69#include <linux/buffer_head.h>
70
71/*
72 * New inode.c implementation.
73 *
74 * This implementation has the basic premise of trying
75 * to be extremely low-overhead and SMP-safe, yet be
76 * simple enough to be "obviously correct".
77 *
78 * Famous last words.
79 */
80
81/* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */
82
83/* #define INODE_PARANOIA 1 */
84/* #define INODE_DEBUG 1 */
85
86/*
87 * Inode lookup is no longer as critical as it used to be:
88 * most of the lookups are going to be through the dcache.
89 */
90#define I_HASHBITS i_hash_shift
91#define I_HASHMASK i_hash_mask
92
93static unsigned int i_hash_mask __read_mostly; 62static unsigned int i_hash_mask __read_mostly;
94static unsigned int i_hash_shift __read_mostly; 63static unsigned int i_hash_shift __read_mostly;
95static struct hlist_head *inode_hashtable __read_mostly; 64static struct hlist_head *inode_hashtable __read_mostly;
96static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 65static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
97 66
98/*
99 * Each inode can be on two separate lists. One is
100 * the hash list of the inode, used for lookups. The
101 * other linked list is the "type" list:
102 * "in_use" - valid inode, i_count > 0, i_nlink > 0
103 * "dirty" - as "in_use" but also dirty
104 * "unused" - valid inode, i_count = 0
105 *
106 * A "dirty" list is maintained for each super block,
107 * allowing for low-overhead inode sync() operations.
108 */
109
110static LIST_HEAD(inode_lru); 67static LIST_HEAD(inode_lru);
111static DEFINE_SPINLOCK(inode_lru_lock); 68static DEFINE_SPINLOCK(inode_lru_lock);
112 69
@@ -325,12 +282,11 @@ void address_space_init_once(struct address_space *mapping)
325 memset(mapping, 0, sizeof(*mapping)); 282 memset(mapping, 0, sizeof(*mapping));
326 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); 283 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
327 spin_lock_init(&mapping->tree_lock); 284 spin_lock_init(&mapping->tree_lock);
328 spin_lock_init(&mapping->i_mmap_lock); 285 mutex_init(&mapping->i_mmap_mutex);
329 INIT_LIST_HEAD(&mapping->private_list); 286 INIT_LIST_HEAD(&mapping->private_list);
330 spin_lock_init(&mapping->private_lock); 287 spin_lock_init(&mapping->private_lock);
331 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); 288 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
332 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); 289 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
333 mutex_init(&mapping->unmap_mutex);
334} 290}
335EXPORT_SYMBOL(address_space_init_once); 291EXPORT_SYMBOL(address_space_init_once);
336 292
@@ -424,8 +380,8 @@ static unsigned long hash(struct super_block *sb, unsigned long hashval)
424 380
425 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 381 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
426 L1_CACHE_BYTES; 382 L1_CACHE_BYTES;
427 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS); 383 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
428 return tmp & I_HASHMASK; 384 return tmp & i_hash_mask;
429} 385}
430 386
431/** 387/**
@@ -467,7 +423,14 @@ EXPORT_SYMBOL(remove_inode_hash);
467void end_writeback(struct inode *inode) 423void end_writeback(struct inode *inode)
468{ 424{
469 might_sleep(); 425 might_sleep();
426 /*
427 * We have to cycle tree_lock here because reclaim can be still in the
428 * process of removing the last page (in __delete_from_page_cache())
429 * and we must not free mapping under it.
430 */
431 spin_lock_irq(&inode->i_data.tree_lock);
470 BUG_ON(inode->i_data.nrpages); 432 BUG_ON(inode->i_data.nrpages);
433 spin_unlock_irq(&inode->i_data.tree_lock);
471 BUG_ON(!list_empty(&inode->i_data.private_list)); 434 BUG_ON(!list_empty(&inode->i_data.private_list));
472 BUG_ON(!(inode->i_state & I_FREEING)); 435 BUG_ON(!(inode->i_state & I_FREEING));
473 BUG_ON(inode->i_state & I_CLEAR); 436 BUG_ON(inode->i_state & I_CLEAR);
@@ -751,8 +714,12 @@ static void prune_icache(int nr_to_scan)
751 * This function is passed the number of inodes to scan, and it returns the 714 * This function is passed the number of inodes to scan, and it returns the
752 * total number of remaining possibly-reclaimable inodes. 715 * total number of remaining possibly-reclaimable inodes.
753 */ 716 */
754static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 717static int shrink_icache_memory(struct shrinker *shrink,
718 struct shrink_control *sc)
755{ 719{
720 int nr = sc->nr_to_scan;
721 gfp_t gfp_mask = sc->gfp_mask;
722
756 if (nr) { 723 if (nr) {
757 /* 724 /*
758 * Nasty deadlock avoidance. We may hold various FS locks, 725 * Nasty deadlock avoidance. We may hold various FS locks,