aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/memory.h4
-rw-r--r--mm/slub.c118
2 files changed, 122 insertions, 0 deletions
diff --git a/include/linux/memory.h b/include/linux/memory.h
index ec376e482abb..33f0ff0cf634 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -83,10 +83,14 @@ extern int memory_notify(unsigned long val, void *v);
83 83
84#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 84#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
85 85
86#ifdef CONFIG_MEMORY_HOTPLUG
86#define hotplug_memory_notifier(fn, pri) { \ 87#define hotplug_memory_notifier(fn, pri) { \
87 static struct notifier_block fn##_mem_nb = \ 88 static struct notifier_block fn##_mem_nb = \
88 { .notifier_call = fn, .priority = pri }; \ 89 { .notifier_call = fn, .priority = pri }; \
89 register_memory_notifier(&fn##_mem_nb); \ 90 register_memory_notifier(&fn##_mem_nb); \
90} 91}
92#else
93#define hotplug_memory_notifier(fn, pri) do { } while (0)
94#endif
91 95
92#endif /* _LINUX_MEMORY_H_ */ 96#endif /* _LINUX_MEMORY_H_ */
diff --git a/mm/slub.c b/mm/slub.c
index e29a42988c78..aac1dd3c657d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -20,6 +20,7 @@
20#include <linux/mempolicy.h> 20#include <linux/mempolicy.h>
21#include <linux/ctype.h> 21#include <linux/ctype.h>
22#include <linux/kallsyms.h> 22#include <linux/kallsyms.h>
23#include <linux/memory.h>
23 24
24/* 25/*
25 * Lock order: 26 * Lock order:
@@ -2694,6 +2695,121 @@ int kmem_cache_shrink(struct kmem_cache *s)
2694} 2695}
2695EXPORT_SYMBOL(kmem_cache_shrink); 2696EXPORT_SYMBOL(kmem_cache_shrink);
2696 2697
2698#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
2699static int slab_mem_going_offline_callback(void *arg)
2700{
2701 struct kmem_cache *s;
2702
2703 down_read(&slub_lock);
2704 list_for_each_entry(s, &slab_caches, list)
2705 kmem_cache_shrink(s);
2706 up_read(&slub_lock);
2707
2708 return 0;
2709}
2710
2711static void slab_mem_offline_callback(void *arg)
2712{
2713 struct kmem_cache_node *n;
2714 struct kmem_cache *s;
2715 struct memory_notify *marg = arg;
2716 int offline_node;
2717
2718 offline_node = marg->status_change_nid;
2719
2720 /*
2721 * If the node still has available memory. we need kmem_cache_node
2722 * for it yet.
2723 */
2724 if (offline_node < 0)
2725 return;
2726
2727 down_read(&slub_lock);
2728 list_for_each_entry(s, &slab_caches, list) {
2729 n = get_node(s, offline_node);
2730 if (n) {
2731 /*
2732 * if n->nr_slabs > 0, slabs still exist on the node
2733 * that is going down. We were unable to free them,
2734 * and offline_pages() function shoudn't call this
2735 * callback. So, we must fail.
2736 */
2737 BUG_ON(atomic_read(&n->nr_slabs));
2738
2739 s->node[offline_node] = NULL;
2740 kmem_cache_free(kmalloc_caches, n);
2741 }
2742 }
2743 up_read(&slub_lock);
2744}
2745
2746static int slab_mem_going_online_callback(void *arg)
2747{
2748 struct kmem_cache_node *n;
2749 struct kmem_cache *s;
2750 struct memory_notify *marg = arg;
2751 int nid = marg->status_change_nid;
2752 int ret = 0;
2753
2754 /*
2755 * If the node's memory is already available, then kmem_cache_node is
2756 * already created. Nothing to do.
2757 */
2758 if (nid < 0)
2759 return 0;
2760
2761 /*
2762 * We are bringing a node online. No memory is availabe yet. We must
2763 * allocate a kmem_cache_node structure in order to bring the node
2764 * online.
2765 */
2766 down_read(&slub_lock);
2767 list_for_each_entry(s, &slab_caches, list) {
2768 /*
2769 * XXX: kmem_cache_alloc_node will fallback to other nodes
2770 * since memory is not yet available from the node that
2771 * is brought up.
2772 */
2773 n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
2774 if (!n) {
2775 ret = -ENOMEM;
2776 goto out;
2777 }
2778 init_kmem_cache_node(n);
2779 s->node[nid] = n;
2780 }
2781out:
2782 up_read(&slub_lock);
2783 return ret;
2784}
2785
2786static int slab_memory_callback(struct notifier_block *self,
2787 unsigned long action, void *arg)
2788{
2789 int ret = 0;
2790
2791 switch (action) {
2792 case MEM_GOING_ONLINE:
2793 ret = slab_mem_going_online_callback(arg);
2794 break;
2795 case MEM_GOING_OFFLINE:
2796 ret = slab_mem_going_offline_callback(arg);
2797 break;
2798 case MEM_OFFLINE:
2799 case MEM_CANCEL_ONLINE:
2800 slab_mem_offline_callback(arg);
2801 break;
2802 case MEM_ONLINE:
2803 case MEM_CANCEL_OFFLINE:
2804 break;
2805 }
2806
2807 ret = notifier_from_errno(ret);
2808 return ret;
2809}
2810
2811#endif /* CONFIG_MEMORY_HOTPLUG */
2812
2697/******************************************************************** 2813/********************************************************************
2698 * Basic setup of slabs 2814 * Basic setup of slabs
2699 *******************************************************************/ 2815 *******************************************************************/
@@ -2715,6 +2831,8 @@ void __init kmem_cache_init(void)
2715 sizeof(struct kmem_cache_node), GFP_KERNEL); 2831 sizeof(struct kmem_cache_node), GFP_KERNEL);
2716 kmalloc_caches[0].refcount = -1; 2832 kmalloc_caches[0].refcount = -1;
2717 caches++; 2833 caches++;
2834
2835 hotplug_memory_notifier(slab_memory_callback, 1);
2718#endif 2836#endif
2719 2837
2720 /* Able to allocate the per node structures */ 2838 /* Able to allocate the per node structures */