aboutsummaryrefslogtreecommitdiffstats
path: root/fs/namespace.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/namespace.c')
-rw-r--r--fs/namespace.c45
1 files changed, 11 insertions, 34 deletions
diff --git a/fs/namespace.c b/fs/namespace.c
index 61bf376e29e8..e9c10cd01e13 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -25,18 +25,21 @@
25#include <linux/security.h> 25#include <linux/security.h>
26#include <linux/mount.h> 26#include <linux/mount.h>
27#include <linux/ramfs.h> 27#include <linux/ramfs.h>
28#include <linux/log2.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
29#include <asm/unistd.h> 30#include <asm/unistd.h>
30#include "pnode.h" 31#include "pnode.h"
31#include "internal.h" 32#include "internal.h"
32 33
34#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
35#define HASH_SIZE (1UL << HASH_SHIFT)
36
33/* spinlock for vfsmount related operations, inplace of dcache_lock */ 37/* spinlock for vfsmount related operations, inplace of dcache_lock */
34__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); 38__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
35 39
36static int event; 40static int event;
37 41
38static struct list_head *mount_hashtable __read_mostly; 42static struct list_head *mount_hashtable __read_mostly;
39static int hash_mask __read_mostly, hash_bits __read_mostly;
40static struct kmem_cache *mnt_cache __read_mostly; 43static struct kmem_cache *mnt_cache __read_mostly;
41static struct rw_semaphore namespace_sem; 44static struct rw_semaphore namespace_sem;
42 45
@@ -48,8 +51,8 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
48{ 51{
49 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); 52 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
50 tmp += ((unsigned long)dentry / L1_CACHE_BYTES); 53 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
51 tmp = tmp + (tmp >> hash_bits); 54 tmp = tmp + (tmp >> HASH_SHIFT);
52 return tmp & hash_mask; 55 return tmp & (HASH_SIZE - 1);
53} 56}
54 57
55struct vfsmount *alloc_vfsmnt(const char *name) 58struct vfsmount *alloc_vfsmnt(const char *name)
@@ -1813,9 +1816,7 @@ static void __init init_mount_tree(void)
1813 1816
1814void __init mnt_init(void) 1817void __init mnt_init(void)
1815{ 1818{
1816 struct list_head *d; 1819 unsigned u;
1817 unsigned int nr_hash;
1818 int i;
1819 int err; 1820 int err;
1820 1821
1821 init_rwsem(&namespace_sem); 1822 init_rwsem(&namespace_sem);
@@ -1828,35 +1829,11 @@ void __init mnt_init(void)
1828 if (!mount_hashtable) 1829 if (!mount_hashtable)
1829 panic("Failed to allocate mount hash table\n"); 1830 panic("Failed to allocate mount hash table\n");
1830 1831
1831 /* 1832 printk("Mount-cache hash table entries: %lu\n", HASH_SIZE);
1832 * Find the power-of-two list-heads that can fit into the allocation.. 1833
1833 * We don't guarantee that "sizeof(struct list_head)" is necessarily 1834 for (u = 0; u < HASH_SIZE; u++)
1834 * a power-of-two. 1835 INIT_LIST_HEAD(&mount_hashtable[u]);
1835 */
1836 nr_hash = PAGE_SIZE / sizeof(struct list_head);
1837 hash_bits = 0;
1838 do {
1839 hash_bits++;
1840 } while ((nr_hash >> hash_bits) != 0);
1841 hash_bits--;
1842 1836
1843 /*
1844 * Re-calculate the actual number of entries and the mask
1845 * from the number of bits we can fit.
1846 */
1847 nr_hash = 1UL << hash_bits;
1848 hash_mask = nr_hash - 1;
1849
1850 printk("Mount-cache hash table entries: %d\n", nr_hash);
1851
1852 /* And initialize the newly allocated array */
1853 d = mount_hashtable;
1854 i = nr_hash;
1855 do {
1856 INIT_LIST_HEAD(d);
1857 d++;
1858 i--;
1859 } while (i);
1860 err = sysfs_init(); 1837 err = sysfs_init();
1861 if (err) 1838 if (err)
1862 printk(KERN_WARNING "%s: sysfs_init error: %d\n", 1839 printk(KERN_WARNING "%s: sysfs_init error: %d\n",