aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/bio.c4
-rw-r--r--fs/block_dev.c4
-rw-r--r--fs/dcache.c14
-rw-r--r--fs/dcookies.c6
-rw-r--r--fs/dnotify.c4
-rw-r--r--fs/eventpoll.c6
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/inode.c8
-rw-r--r--fs/inotify.c12
-rw-r--r--fs/locks.c2
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/pipe.c2
12 files changed, 35 insertions, 35 deletions
diff --git a/fs/bio.c b/fs/bio.c
index 73e664c01d30..49db9286a3b4 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -30,7 +30,7 @@
30 30
31#define BIO_POOL_SIZE 256 31#define BIO_POOL_SIZE 256
32 32
33static kmem_cache_t *bio_slab; 33static kmem_cache_t *bio_slab __read_mostly;
34 34
35#define BIOVEC_NR_POOLS 6 35#define BIOVEC_NR_POOLS 6
36 36
@@ -39,7 +39,7 @@ static kmem_cache_t *bio_slab;
39 * basically we just need to survive 39 * basically we just need to survive
40 */ 40 */
41#define BIO_SPLIT_ENTRIES 8 41#define BIO_SPLIT_ENTRIES 8
42mempool_t *bio_split_pool; 42mempool_t *bio_split_pool __read_mostly;
43 43
44struct biovec_slab { 44struct biovec_slab {
45 int nr_vecs; 45 int nr_vecs;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 573fc8e0b67a..9a451a9ffad4 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -234,7 +234,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
234 */ 234 */
235 235
236static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); 236static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
237static kmem_cache_t * bdev_cachep; 237static kmem_cache_t * bdev_cachep __read_mostly;
238 238
239static struct inode *bdev_alloc_inode(struct super_block *sb) 239static struct inode *bdev_alloc_inode(struct super_block *sb)
240{ 240{
@@ -308,7 +308,7 @@ static struct file_system_type bd_type = {
308 .kill_sb = kill_anon_super, 308 .kill_sb = kill_anon_super,
309}; 309};
310 310
311static struct vfsmount *bd_mnt; 311static struct vfsmount *bd_mnt __read_mostly;
312struct super_block *blockdev_superblock; 312struct super_block *blockdev_superblock;
313 313
314void __init bdev_cache_init(void) 314void __init bdev_cache_init(void)
diff --git a/fs/dcache.c b/fs/dcache.c
index 939584648504..aaca5e7970bc 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -36,7 +36,7 @@
36 36
37/* #define DCACHE_DEBUG 1 */ 37/* #define DCACHE_DEBUG 1 */
38 38
39int sysctl_vfs_cache_pressure = 100; 39int sysctl_vfs_cache_pressure __read_mostly = 100;
40EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 40EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
41 41
42 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); 42 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
@@ -44,7 +44,7 @@ static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
44 44
45EXPORT_SYMBOL(dcache_lock); 45EXPORT_SYMBOL(dcache_lock);
46 46
47static kmem_cache_t *dentry_cache; 47static kmem_cache_t *dentry_cache __read_mostly;
48 48
49#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) 49#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
50 50
@@ -59,9 +59,9 @@ static kmem_cache_t *dentry_cache;
59#define D_HASHBITS d_hash_shift 59#define D_HASHBITS d_hash_shift
60#define D_HASHMASK d_hash_mask 60#define D_HASHMASK d_hash_mask
61 61
62static unsigned int d_hash_mask; 62static unsigned int d_hash_mask __read_mostly;
63static unsigned int d_hash_shift; 63static unsigned int d_hash_shift __read_mostly;
64static struct hlist_head *dentry_hashtable; 64static struct hlist_head *dentry_hashtable __read_mostly;
65static LIST_HEAD(dentry_unused); 65static LIST_HEAD(dentry_unused);
66 66
67/* Statistics gathering. */ 67/* Statistics gathering. */
@@ -1719,10 +1719,10 @@ static void __init dcache_init(unsigned long mempages)
1719} 1719}
1720 1720
1721/* SLAB cache for __getname() consumers */ 1721/* SLAB cache for __getname() consumers */
1722kmem_cache_t *names_cachep; 1722kmem_cache_t *names_cachep __read_mostly;
1723 1723
1724/* SLAB cache for file structures */ 1724/* SLAB cache for file structures */
1725kmem_cache_t *filp_cachep; 1725kmem_cache_t *filp_cachep __read_mostly;
1726 1726
1727EXPORT_SYMBOL(d_genocide); 1727EXPORT_SYMBOL(d_genocide);
1728 1728
diff --git a/fs/dcookies.c b/fs/dcookies.c
index ef758cfa5565..8749339bf4f6 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -38,9 +38,9 @@ struct dcookie_struct {
38 38
39static LIST_HEAD(dcookie_users); 39static LIST_HEAD(dcookie_users);
40static DEFINE_MUTEX(dcookie_mutex); 40static DEFINE_MUTEX(dcookie_mutex);
41static kmem_cache_t * dcookie_cache; 41static kmem_cache_t *dcookie_cache __read_mostly;
42static struct list_head * dcookie_hashtable; 42static struct list_head *dcookie_hashtable __read_mostly;
43static size_t hash_size; 43static size_t hash_size __read_mostly;
44 44
45static inline int is_live(void) 45static inline int is_live(void)
46{ 46{
diff --git a/fs/dnotify.c b/fs/dnotify.c
index f3b540dd5d11..f932591df5a4 100644
--- a/fs/dnotify.c
+++ b/fs/dnotify.c
@@ -21,9 +21,9 @@
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
24int dir_notify_enable = 1; 24int dir_notify_enable __read_mostly = 1;
25 25
26static kmem_cache_t *dn_cache; 26static kmem_cache_t *dn_cache __read_mostly;
27 27
28static void redo_inode_mask(struct inode *inode) 28static void redo_inode_mask(struct inode *inode)
29{ 29{
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index a0f682cdd03e..e067a06c6464 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -281,13 +281,13 @@ static struct mutex epmutex;
281static struct poll_safewake psw; 281static struct poll_safewake psw;
282 282
283/* Slab cache used to allocate "struct epitem" */ 283/* Slab cache used to allocate "struct epitem" */
284static kmem_cache_t *epi_cache; 284static kmem_cache_t *epi_cache __read_mostly;
285 285
286/* Slab cache used to allocate "struct eppoll_entry" */ 286/* Slab cache used to allocate "struct eppoll_entry" */
287static kmem_cache_t *pwq_cache; 287static kmem_cache_t *pwq_cache __read_mostly;
288 288
289/* Virtual fs used to allocate inodes for eventpoll files */ 289/* Virtual fs used to allocate inodes for eventpoll files */
290static struct vfsmount *eventpoll_mnt; 290static struct vfsmount *eventpoll_mnt __read_mostly;
291 291
292/* File callbacks that implement the eventpoll file behaviour */ 292/* File callbacks that implement the eventpoll file behaviour */
293static struct file_operations eventpoll_fops = { 293static struct file_operations eventpoll_fops = {
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 03c789560fb8..2a2479196f96 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -412,7 +412,7 @@ out:
412 412
413/* Table to convert sigio signal codes into poll band bitmaps */ 413/* Table to convert sigio signal codes into poll band bitmaps */
414 414
415static long band_table[NSIGPOLL] = { 415static const long band_table[NSIGPOLL] = {
416 POLLIN | POLLRDNORM, /* POLL_IN */ 416 POLLIN | POLLRDNORM, /* POLL_IN */
417 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ 417 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
418 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ 418 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
@@ -531,7 +531,7 @@ int send_sigurg(struct fown_struct *fown)
531} 531}
532 532
533static DEFINE_RWLOCK(fasync_lock); 533static DEFINE_RWLOCK(fasync_lock);
534static kmem_cache_t *fasync_cache; 534static kmem_cache_t *fasync_cache __read_mostly;
535 535
536/* 536/*
537 * fasync_helper() is used by some character device drivers (mainly mice) 537 * fasync_helper() is used by some character device drivers (mainly mice)
diff --git a/fs/inode.c b/fs/inode.c
index 85da11044adc..1fddf2803af8 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -56,8 +56,8 @@
56#define I_HASHBITS i_hash_shift 56#define I_HASHBITS i_hash_shift
57#define I_HASHMASK i_hash_mask 57#define I_HASHMASK i_hash_mask
58 58
59static unsigned int i_hash_mask; 59static unsigned int i_hash_mask __read_mostly;
60static unsigned int i_hash_shift; 60static unsigned int i_hash_shift __read_mostly;
61 61
62/* 62/*
63 * Each inode can be on two separate lists. One is 63 * Each inode can be on two separate lists. One is
@@ -73,7 +73,7 @@ static unsigned int i_hash_shift;
73 73
74LIST_HEAD(inode_in_use); 74LIST_HEAD(inode_in_use);
75LIST_HEAD(inode_unused); 75LIST_HEAD(inode_unused);
76static struct hlist_head *inode_hashtable; 76static struct hlist_head *inode_hashtable __read_mostly;
77 77
78/* 78/*
79 * A simple spinlock to protect the list manipulations. 79 * A simple spinlock to protect the list manipulations.
@@ -98,7 +98,7 @@ static DEFINE_MUTEX(iprune_mutex);
98 */ 98 */
99struct inodes_stat_t inodes_stat; 99struct inodes_stat_t inodes_stat;
100 100
101static kmem_cache_t * inode_cachep; 101static kmem_cache_t * inode_cachep __read_mostly;
102 102
103static struct inode *alloc_inode(struct super_block *sb) 103static struct inode *alloc_inode(struct super_block *sb)
104{ 104{
diff --git a/fs/inotify.c b/fs/inotify.c
index a61e93e17853..f48a3dae0712 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -39,15 +39,15 @@
39 39
40static atomic_t inotify_cookie; 40static atomic_t inotify_cookie;
41 41
42static kmem_cache_t *watch_cachep; 42static kmem_cache_t *watch_cachep __read_mostly;
43static kmem_cache_t *event_cachep; 43static kmem_cache_t *event_cachep __read_mostly;
44 44
45static struct vfsmount *inotify_mnt; 45static struct vfsmount *inotify_mnt __read_mostly;
46 46
47/* these are configurable via /proc/sys/fs/inotify/ */ 47/* these are configurable via /proc/sys/fs/inotify/ */
48int inotify_max_user_instances; 48int inotify_max_user_instances __read_mostly;
49int inotify_max_user_watches; 49int inotify_max_user_watches __read_mostly;
50int inotify_max_queued_events; 50int inotify_max_queued_events __read_mostly;
51 51
52/* 52/*
53 * Lock ordering: 53 * Lock ordering:
diff --git a/fs/locks.c b/fs/locks.c
index 56f996e98bbc..709450a7b89d 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -142,7 +142,7 @@ int lease_break_time = 45;
142static LIST_HEAD(file_lock_list); 142static LIST_HEAD(file_lock_list);
143static LIST_HEAD(blocked_list); 143static LIST_HEAD(blocked_list);
144 144
145static kmem_cache_t *filelock_cache; 145static kmem_cache_t *filelock_cache __read_mostly;
146 146
147/* Allocate an empty lock structure. */ 147/* Allocate an empty lock structure. */
148static struct file_lock *locks_alloc_lock(void) 148static struct file_lock *locks_alloc_lock(void)
diff --git a/fs/namespace.c b/fs/namespace.c
index 71e75bcf4d28..e069a4c5e389 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -43,9 +43,9 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
43 43
44static int event; 44static int event;
45 45
46static struct list_head *mount_hashtable; 46static struct list_head *mount_hashtable __read_mostly;
47static int hash_mask __read_mostly, hash_bits __read_mostly; 47static int hash_mask __read_mostly, hash_bits __read_mostly;
48static kmem_cache_t *mnt_cache; 48static kmem_cache_t *mnt_cache __read_mostly;
49static struct rw_semaphore namespace_sem; 49static struct rw_semaphore namespace_sem;
50 50
51/* /sys/fs */ 51/* /sys/fs */
diff --git a/fs/pipe.c b/fs/pipe.c
index d976866a115b..4384c9290943 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -675,7 +675,7 @@ fail_page:
675 return NULL; 675 return NULL;
676} 676}
677 677
678static struct vfsmount *pipe_mnt; 678static struct vfsmount *pipe_mnt __read_mostly;
679static int pipefs_delete_dentry(struct dentry *dentry) 679static int pipefs_delete_dentry(struct dentry *dentry)
680{ 680{
681 return 1; 681 return 1;