aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-05-19 20:10:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 22:12:14 -0400
commitded0ecf61118930988f0943e741056c8fd5d439c (patch)
tree916c00c4cbcbdf4cbe83dde8cbcb27b615ea3b95 /mm/slab.c
parenta5aa63a5f7352aa8991f64d46854dcb8d3788d55 (diff)
mm/slab: factor out kmem_cache_node initialization code
It can be reused on other place, so factor out it. Following patch will use it. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c74
1 files changed, 45 insertions, 29 deletions
diff --git a/mm/slab.c b/mm/slab.c
index a998d35599a3..9bef33bc4daa 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -848,6 +848,46 @@ static inline gfp_t gfp_exact_node(gfp_t flags)
848} 848}
849#endif 849#endif
850 850
851static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
852{
853 struct kmem_cache_node *n;
854
855 /*
856 * Set up the kmem_cache_node for cpu before we can
857 * begin anything. Make sure some other cpu on this
858 * node has not already allocated this
859 */
860 n = get_node(cachep, node);
861 if (n) {
862 spin_lock_irq(&n->list_lock);
863 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
864 cachep->num;
865 spin_unlock_irq(&n->list_lock);
866
867 return 0;
868 }
869
870 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
871 if (!n)
872 return -ENOMEM;
873
874 kmem_cache_node_init(n);
875 n->next_reap = jiffies + REAPTIMEOUT_NODE +
876 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
877
878 n->free_limit =
879 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
880
881 /*
882 * The kmem_cache_nodes don't come and go as CPUs
883 * come and go. slab_mutex is sufficient
884 * protection here.
885 */
886 cachep->node[node] = n;
887
888 return 0;
889}
890
851/* 891/*
852 * Allocates and initializes node for a node on each slab cache, used for 892 * Allocates and initializes node for a node on each slab cache, used for
853 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node 893 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
@@ -859,39 +899,15 @@ static inline gfp_t gfp_exact_node(gfp_t flags)
859 */ 899 */
860static int init_cache_node_node(int node) 900static int init_cache_node_node(int node)
861{ 901{
902 int ret;
862 struct kmem_cache *cachep; 903 struct kmem_cache *cachep;
863 struct kmem_cache_node *n;
864 const size_t memsize = sizeof(struct kmem_cache_node);
865 904
866 list_for_each_entry(cachep, &slab_caches, list) { 905 list_for_each_entry(cachep, &slab_caches, list) {
867 /* 906 ret = init_cache_node(cachep, node, GFP_KERNEL);
868 * Set up the kmem_cache_node for cpu before we can 907 if (ret)
869 * begin anything. Make sure some other cpu on this 908 return ret;
870 * node has not already allocated this
871 */
872 n = get_node(cachep, node);
873 if (!n) {
874 n = kmalloc_node(memsize, GFP_KERNEL, node);
875 if (!n)
876 return -ENOMEM;
877 kmem_cache_node_init(n);
878 n->next_reap = jiffies + REAPTIMEOUT_NODE +
879 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
880
881 /*
882 * The kmem_cache_nodes don't come and go as CPUs
883 * come and go. slab_mutex is sufficient
884 * protection here.
885 */
886 cachep->node[node] = n;
887 }
888
889 spin_lock_irq(&n->list_lock);
890 n->free_limit =
891 (1 + nr_cpus_node(node)) *
892 cachep->batchcount + cachep->num;
893 spin_unlock_irq(&n->list_lock);
894 } 909 }
910
895 return 0; 911 return 0;
896} 912}
897 913