aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlauber Costa <glommer@openvz.org>2013-08-27 20:18:18 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:32 -0400
commit5ca302c8e502ca53b7d75f12127ec0289904003a (patch)
tree80a5b248c01fc3f33392a0b6ef14a2baab86cdb0
parenta0b02131c5fcd8545b867db72224b3659e813f10 (diff)
list_lru: dynamically adjust node arrays
We currently use a compile-time constant to size the node array for the list_lru structure. Due to this, we don't need to allocate any memory at initialization time. But as a consequence, the structures that contain embedded list_lru lists can become way too big (the superblock for instance contains two of them). This patch aims at ameliorating this situation by dynamically allocating the node arrays with the firmware provided nr_node_ids. Signed-off-by: Glauber Costa <glommer@openvz.org> Cc: Dave Chinner <dchinner@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--fs/super.c11
-rw-r--r--fs/xfs/xfs_buf.c6
-rw-r--r--fs/xfs/xfs_qm.c10
-rw-r--r--include/linux/list_lru.h13
-rw-r--r--mm/list_lru.c14
5 files changed, 37 insertions, 17 deletions
diff --git a/fs/super.c b/fs/super.c
index 181d42e2abff..269d96857caa 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -195,8 +195,12 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
195 INIT_HLIST_NODE(&s->s_instances); 195 INIT_HLIST_NODE(&s->s_instances);
196 INIT_HLIST_BL_HEAD(&s->s_anon); 196 INIT_HLIST_BL_HEAD(&s->s_anon);
197 INIT_LIST_HEAD(&s->s_inodes); 197 INIT_LIST_HEAD(&s->s_inodes);
198 list_lru_init(&s->s_dentry_lru); 198
199 list_lru_init(&s->s_inode_lru); 199 if (list_lru_init(&s->s_dentry_lru))
200 goto err_out;
201 if (list_lru_init(&s->s_inode_lru))
202 goto err_out_dentry_lru;
203
200 INIT_LIST_HEAD(&s->s_mounts); 204 INIT_LIST_HEAD(&s->s_mounts);
201 init_rwsem(&s->s_umount); 205 init_rwsem(&s->s_umount);
202 lockdep_set_class(&s->s_umount, &type->s_umount_key); 206 lockdep_set_class(&s->s_umount, &type->s_umount_key);
@@ -236,6 +240,9 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
236 } 240 }
237out: 241out:
238 return s; 242 return s;
243
244err_out_dentry_lru:
245 list_lru_destroy(&s->s_dentry_lru);
239err_out: 246err_out:
240 security_sb_free(s); 247 security_sb_free(s);
241#ifdef CONFIG_SMP 248#ifdef CONFIG_SMP
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index d46f6a3dc1de..49fdb7bed481 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1592,6 +1592,7 @@ xfs_free_buftarg(
1592 struct xfs_mount *mp, 1592 struct xfs_mount *mp,
1593 struct xfs_buftarg *btp) 1593 struct xfs_buftarg *btp)
1594{ 1594{
1595 list_lru_destroy(&btp->bt_lru);
1595 unregister_shrinker(&btp->bt_shrinker); 1596 unregister_shrinker(&btp->bt_shrinker);
1596 1597
1597 if (mp->m_flags & XFS_MOUNT_BARRIER) 1598 if (mp->m_flags & XFS_MOUNT_BARRIER)
@@ -1666,9 +1667,12 @@ xfs_alloc_buftarg(
1666 if (!btp->bt_bdi) 1667 if (!btp->bt_bdi)
1667 goto error; 1668 goto error;
1668 1669
1669 list_lru_init(&btp->bt_lru);
1670 if (xfs_setsize_buftarg_early(btp, bdev)) 1670 if (xfs_setsize_buftarg_early(btp, bdev))
1671 goto error; 1671 goto error;
1672
1673 if (list_lru_init(&btp->bt_lru))
1674 goto error;
1675
1672 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count; 1676 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1673 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan; 1677 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1674 btp->bt_shrinker.seeks = DEFAULT_SEEKS; 1678 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index a29169b062e3..7f4138629a80 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -831,11 +831,18 @@ xfs_qm_init_quotainfo(
831 831
832 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); 832 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
833 833
834 if ((error = list_lru_init(&qinf->qi_lru))) {
835 kmem_free(qinf);
836 mp->m_quotainfo = NULL;
837 return error;
838 }
839
834 /* 840 /*
835 * See if quotainodes are setup, and if not, allocate them, 841 * See if quotainodes are setup, and if not, allocate them,
836 * and change the superblock accordingly. 842 * and change the superblock accordingly.
837 */ 843 */
838 if ((error = xfs_qm_init_quotainos(mp))) { 844 if ((error = xfs_qm_init_quotainos(mp))) {
845 list_lru_destroy(&qinf->qi_lru);
839 kmem_free(qinf); 846 kmem_free(qinf);
840 mp->m_quotainfo = NULL; 847 mp->m_quotainfo = NULL;
841 return error; 848 return error;
@@ -846,8 +853,6 @@ xfs_qm_init_quotainfo(
846 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS); 853 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
847 mutex_init(&qinf->qi_tree_lock); 854 mutex_init(&qinf->qi_tree_lock);
848 855
849 list_lru_init(&qinf->qi_lru);
850
851 /* mutex used to serialize quotaoffs */ 856 /* mutex used to serialize quotaoffs */
852 mutex_init(&qinf->qi_quotaofflock); 857 mutex_init(&qinf->qi_quotaofflock);
853 858
@@ -935,6 +940,7 @@ xfs_qm_destroy_quotainfo(
935 qi = mp->m_quotainfo; 940 qi = mp->m_quotainfo;
936 ASSERT(qi != NULL); 941 ASSERT(qi != NULL);
937 942
943 list_lru_destroy(&qi->qi_lru);
938 unregister_shrinker(&qi->qi_shrinker); 944 unregister_shrinker(&qi->qi_shrinker);
939 945
940 if (qi->qi_uquotaip) { 946 if (qi->qi_uquotaip) {
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 4d02ad3badab..3ce541753c88 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -27,20 +27,11 @@ struct list_lru_node {
27} ____cacheline_aligned_in_smp; 27} ____cacheline_aligned_in_smp;
28 28
29struct list_lru { 29struct list_lru {
30 /* 30 struct list_lru_node *node;
31 * Because we use a fixed-size array, this struct can be very big if
32 * MAX_NUMNODES is big. If this becomes a problem this is fixable by
33 * turning this into a pointer and dynamically allocating this to
34 * nr_node_ids. This quantity is firwmare-provided, and still would
35 * provide room for all nodes at the cost of a pointer lookup and an
36 * extra allocation. Because that allocation will most likely come from
37 * a different slab cache than the main structure holding this
38 * structure, we may very well fail.
39 */
40 struct list_lru_node node[MAX_NUMNODES];
41 nodemask_t active_nodes; 31 nodemask_t active_nodes;
42}; 32};
43 33
34void list_lru_destroy(struct list_lru *lru);
44int list_lru_init(struct list_lru *lru); 35int list_lru_init(struct list_lru *lru);
45 36
46/** 37/**
diff --git a/mm/list_lru.c b/mm/list_lru.c
index f91c24188573..72467914b856 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -8,6 +8,7 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/list_lru.h> 10#include <linux/list_lru.h>
11#include <linux/slab.h>
11 12
12bool list_lru_add(struct list_lru *lru, struct list_head *item) 13bool list_lru_add(struct list_lru *lru, struct list_head *item)
13{ 14{
@@ -115,9 +116,14 @@ EXPORT_SYMBOL_GPL(list_lru_walk_node);
115int list_lru_init(struct list_lru *lru) 116int list_lru_init(struct list_lru *lru)
116{ 117{
117 int i; 118 int i;
119 size_t size = sizeof(*lru->node) * nr_node_ids;
120
121 lru->node = kzalloc(size, GFP_KERNEL);
122 if (!lru->node)
123 return -ENOMEM;
118 124
119 nodes_clear(lru->active_nodes); 125 nodes_clear(lru->active_nodes);
120 for (i = 0; i < MAX_NUMNODES; i++) { 126 for (i = 0; i < nr_node_ids; i++) {
121 spin_lock_init(&lru->node[i].lock); 127 spin_lock_init(&lru->node[i].lock);
122 INIT_LIST_HEAD(&lru->node[i].list); 128 INIT_LIST_HEAD(&lru->node[i].list);
123 lru->node[i].nr_items = 0; 129 lru->node[i].nr_items = 0;
@@ -125,3 +131,9 @@ int list_lru_init(struct list_lru *lru)
125 return 0; 131 return 0;
126} 132}
127EXPORT_SYMBOL_GPL(list_lru_init); 133EXPORT_SYMBOL_GPL(list_lru_init);
134
135void list_lru_destroy(struct list_lru *lru)
136{
137 kfree(lru->node);
138}
139EXPORT_SYMBOL_GPL(list_lru_destroy);