diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 118 |
1 files changed, 118 insertions, 0 deletions
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/mempolicy.h> | 20 | #include <linux/mempolicy.h> |
21 | #include <linux/ctype.h> | 21 | #include <linux/ctype.h> |
22 | #include <linux/kallsyms.h> | 22 | #include <linux/kallsyms.h> |
23 | #include <linux/memory.h> | ||
23 | 24 | ||
24 | /* | 25 | /* |
25 | * Lock order: | 26 | * Lock order: |
@@ -2694,6 +2695,121 @@ int kmem_cache_shrink(struct kmem_cache *s) | |||
2694 | } | 2695 | } |
2695 | EXPORT_SYMBOL(kmem_cache_shrink); | 2696 | EXPORT_SYMBOL(kmem_cache_shrink); |
2696 | 2697 | ||
2698 | #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) | ||
2699 | static int slab_mem_going_offline_callback(void *arg) | ||
2700 | { | ||
2701 | struct kmem_cache *s; | ||
2702 | |||
2703 | down_read(&slub_lock); | ||
2704 | list_for_each_entry(s, &slab_caches, list) | ||
2705 | kmem_cache_shrink(s); | ||
2706 | up_read(&slub_lock); | ||
2707 | |||
2708 | return 0; | ||
2709 | } | ||
2710 | |||
2711 | static void slab_mem_offline_callback(void *arg) | ||
2712 | { | ||
2713 | struct kmem_cache_node *n; | ||
2714 | struct kmem_cache *s; | ||
2715 | struct memory_notify *marg = arg; | ||
2716 | int offline_node; | ||
2717 | |||
2718 | offline_node = marg->status_change_nid; | ||
2719 | |||
2720 | /* | ||
2721 | * If the node still has available memory. we need kmem_cache_node | ||
2722 | * for it yet. | ||
2723 | */ | ||
2724 | if (offline_node < 0) | ||
2725 | return; | ||
2726 | |||
2727 | down_read(&slub_lock); | ||
2728 | list_for_each_entry(s, &slab_caches, list) { | ||
2729 | n = get_node(s, offline_node); | ||
2730 | if (n) { | ||
2731 | /* | ||
2732 | * if n->nr_slabs > 0, slabs still exist on the node | ||
2733 | * that is going down. We were unable to free them, | ||
2734 | * and offline_pages() function shoudn't call this | ||
2735 | * callback. So, we must fail. | ||
2736 | */ | ||
2737 | BUG_ON(atomic_read(&n->nr_slabs)); | ||
2738 | |||
2739 | s->node[offline_node] = NULL; | ||
2740 | kmem_cache_free(kmalloc_caches, n); | ||
2741 | } | ||
2742 | } | ||
2743 | up_read(&slub_lock); | ||
2744 | } | ||
2745 | |||
2746 | static int slab_mem_going_online_callback(void *arg) | ||
2747 | { | ||
2748 | struct kmem_cache_node *n; | ||
2749 | struct kmem_cache *s; | ||
2750 | struct memory_notify *marg = arg; | ||
2751 | int nid = marg->status_change_nid; | ||
2752 | int ret = 0; | ||
2753 | |||
2754 | /* | ||
2755 | * If the node's memory is already available, then kmem_cache_node is | ||
2756 | * already created. Nothing to do. | ||
2757 | */ | ||
2758 | if (nid < 0) | ||
2759 | return 0; | ||
2760 | |||
2761 | /* | ||
2762 | * We are bringing a node online. No memory is availabe yet. We must | ||
2763 | * allocate a kmem_cache_node structure in order to bring the node | ||
2764 | * online. | ||
2765 | */ | ||
2766 | down_read(&slub_lock); | ||
2767 | list_for_each_entry(s, &slab_caches, list) { | ||
2768 | /* | ||
2769 | * XXX: kmem_cache_alloc_node will fallback to other nodes | ||
2770 | * since memory is not yet available from the node that | ||
2771 | * is brought up. | ||
2772 | */ | ||
2773 | n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL); | ||
2774 | if (!n) { | ||
2775 | ret = -ENOMEM; | ||
2776 | goto out; | ||
2777 | } | ||
2778 | init_kmem_cache_node(n); | ||
2779 | s->node[nid] = n; | ||
2780 | } | ||
2781 | out: | ||
2782 | up_read(&slub_lock); | ||
2783 | return ret; | ||
2784 | } | ||
2785 | |||
2786 | static int slab_memory_callback(struct notifier_block *self, | ||
2787 | unsigned long action, void *arg) | ||
2788 | { | ||
2789 | int ret = 0; | ||
2790 | |||
2791 | switch (action) { | ||
2792 | case MEM_GOING_ONLINE: | ||
2793 | ret = slab_mem_going_online_callback(arg); | ||
2794 | break; | ||
2795 | case MEM_GOING_OFFLINE: | ||
2796 | ret = slab_mem_going_offline_callback(arg); | ||
2797 | break; | ||
2798 | case MEM_OFFLINE: | ||
2799 | case MEM_CANCEL_ONLINE: | ||
2800 | slab_mem_offline_callback(arg); | ||
2801 | break; | ||
2802 | case MEM_ONLINE: | ||
2803 | case MEM_CANCEL_OFFLINE: | ||
2804 | break; | ||
2805 | } | ||
2806 | |||
2807 | ret = notifier_from_errno(ret); | ||
2808 | return ret; | ||
2809 | } | ||
2810 | |||
2811 | #endif /* CONFIG_MEMORY_HOTPLUG */ | ||
2812 | |||
2697 | /******************************************************************** | 2813 | /******************************************************************** |
2698 | * Basic setup of slabs | 2814 | * Basic setup of slabs |
2699 | *******************************************************************/ | 2815 | *******************************************************************/ |
@@ -2715,6 +2831,8 @@ void __init kmem_cache_init(void) | |||
2715 | sizeof(struct kmem_cache_node), GFP_KERNEL); | 2831 | sizeof(struct kmem_cache_node), GFP_KERNEL); |
2716 | kmalloc_caches[0].refcount = -1; | 2832 | kmalloc_caches[0].refcount = -1; |
2717 | caches++; | 2833 | caches++; |
2834 | |||
2835 | hotplug_memory_notifier(slab_memory_callback, 1); | ||
2718 | #endif | 2836 | #endif |
2719 | 2837 | ||
2720 | /* Able to allocate the per node structures */ | 2838 | /* Able to allocate the per node structures */ |