diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 126 |
1 files changed, 86 insertions, 40 deletions
@@ -972,7 +972,39 @@ static int transfer_objects(struct array_cache *to, | |||
972 | return nr; | 972 | return nr; |
973 | } | 973 | } |
974 | 974 | ||
975 | #ifdef CONFIG_NUMA | 975 | #ifndef CONFIG_NUMA |
976 | |||
977 | #define drain_alien_cache(cachep, alien) do { } while (0) | ||
978 | #define reap_alien(cachep, l3) do { } while (0) | ||
979 | |||
980 | static inline struct array_cache **alloc_alien_cache(int node, int limit) | ||
981 | { | ||
982 | return (struct array_cache **)BAD_ALIEN_MAGIC; | ||
983 | } | ||
984 | |||
985 | static inline void free_alien_cache(struct array_cache **ac_ptr) | ||
986 | { | ||
987 | } | ||
988 | |||
989 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | ||
990 | { | ||
991 | return 0; | ||
992 | } | ||
993 | |||
994 | static inline void *alternate_node_alloc(struct kmem_cache *cachep, | ||
995 | gfp_t flags) | ||
996 | { | ||
997 | return NULL; | ||
998 | } | ||
999 | |||
1000 | static inline void *__cache_alloc_node(struct kmem_cache *cachep, | ||
1001 | gfp_t flags, int nodeid) | ||
1002 | { | ||
1003 | return NULL; | ||
1004 | } | ||
1005 | |||
1006 | #else /* CONFIG_NUMA */ | ||
1007 | |||
976 | static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); | 1008 | static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); |
977 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); | 1009 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); |
978 | 1010 | ||
@@ -1101,26 +1133,6 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1101 | } | 1133 | } |
1102 | return 1; | 1134 | return 1; |
1103 | } | 1135 | } |
1104 | |||
1105 | #else | ||
1106 | |||
1107 | #define drain_alien_cache(cachep, alien) do { } while (0) | ||
1108 | #define reap_alien(cachep, l3) do { } while (0) | ||
1109 | |||
1110 | static inline struct array_cache **alloc_alien_cache(int node, int limit) | ||
1111 | { | ||
1112 | return (struct array_cache **)BAD_ALIEN_MAGIC; | ||
1113 | } | ||
1114 | |||
1115 | static inline void free_alien_cache(struct array_cache **ac_ptr) | ||
1116 | { | ||
1117 | } | ||
1118 | |||
1119 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | ||
1120 | { | ||
1121 | return 0; | ||
1122 | } | ||
1123 | |||
1124 | #endif | 1136 | #endif |
1125 | 1137 | ||
1126 | static int __cpuinit cpuup_callback(struct notifier_block *nfb, | 1138 | static int __cpuinit cpuup_callback(struct notifier_block *nfb, |
@@ -1564,7 +1576,13 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1564 | */ | 1576 | */ |
1565 | flags |= __GFP_COMP; | 1577 | flags |= __GFP_COMP; |
1566 | #endif | 1578 | #endif |
1567 | flags |= cachep->gfpflags; | 1579 | |
1580 | /* | ||
1581 | * Under NUMA we want memory on the indicated node. We will handle | ||
1582 | * the needed fallback ourselves since we want to serve from our | ||
1583 | * per node object lists first for other nodes. | ||
1584 | */ | ||
1585 | flags |= cachep->gfpflags | GFP_THISNODE; | ||
1568 | 1586 | ||
1569 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); | 1587 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); |
1570 | if (!page) | 1588 | if (!page) |
@@ -2442,7 +2460,6 @@ EXPORT_SYMBOL(kmem_cache_shrink); | |||
2442 | * @cachep: the cache to destroy | 2460 | * @cachep: the cache to destroy |
2443 | * | 2461 | * |
2444 | * Remove a struct kmem_cache object from the slab cache. | 2462 | * Remove a struct kmem_cache object from the slab cache. |
2445 | * Returns 0 on success. | ||
2446 | * | 2463 | * |
2447 | * It is expected this function will be called by a module when it is | 2464 | * It is expected this function will be called by a module when it is |
2448 | * unloaded. This will remove the cache completely, and avoid a duplicate | 2465 | * unloaded. This will remove the cache completely, and avoid a duplicate |
@@ -2454,7 +2471,7 @@ EXPORT_SYMBOL(kmem_cache_shrink); | |||
2454 | * The caller must guarantee that noone will allocate memory from the cache | 2471 | * The caller must guarantee that noone will allocate memory from the cache |
2455 | * during the kmem_cache_destroy(). | 2472 | * during the kmem_cache_destroy(). |
2456 | */ | 2473 | */ |
2457 | int kmem_cache_destroy(struct kmem_cache *cachep) | 2474 | void kmem_cache_destroy(struct kmem_cache *cachep) |
2458 | { | 2475 | { |
2459 | BUG_ON(!cachep || in_interrupt()); | 2476 | BUG_ON(!cachep || in_interrupt()); |
2460 | 2477 | ||
@@ -2475,7 +2492,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep) | |||
2475 | list_add(&cachep->next, &cache_chain); | 2492 | list_add(&cachep->next, &cache_chain); |
2476 | mutex_unlock(&cache_chain_mutex); | 2493 | mutex_unlock(&cache_chain_mutex); |
2477 | unlock_cpu_hotplug(); | 2494 | unlock_cpu_hotplug(); |
2478 | return 1; | 2495 | return; |
2479 | } | 2496 | } |
2480 | 2497 | ||
2481 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) | 2498 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) |
@@ -2483,7 +2500,6 @@ int kmem_cache_destroy(struct kmem_cache *cachep) | |||
2483 | 2500 | ||
2484 | __kmem_cache_destroy(cachep); | 2501 | __kmem_cache_destroy(cachep); |
2485 | unlock_cpu_hotplug(); | 2502 | unlock_cpu_hotplug(); |
2486 | return 0; | ||
2487 | } | 2503 | } |
2488 | EXPORT_SYMBOL(kmem_cache_destroy); | 2504 | EXPORT_SYMBOL(kmem_cache_destroy); |
2489 | 2505 | ||
@@ -3030,14 +3046,6 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3030 | void *objp; | 3046 | void *objp; |
3031 | struct array_cache *ac; | 3047 | struct array_cache *ac; |
3032 | 3048 | ||
3033 | #ifdef CONFIG_NUMA | ||
3034 | if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) { | ||
3035 | objp = alternate_node_alloc(cachep, flags); | ||
3036 | if (objp != NULL) | ||
3037 | return objp; | ||
3038 | } | ||
3039 | #endif | ||
3040 | |||
3041 | check_irq_off(); | 3049 | check_irq_off(); |
3042 | ac = cpu_cache_get(cachep); | 3050 | ac = cpu_cache_get(cachep); |
3043 | if (likely(ac->avail)) { | 3051 | if (likely(ac->avail)) { |
@@ -3055,12 +3063,24 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep, | |||
3055 | gfp_t flags, void *caller) | 3063 | gfp_t flags, void *caller) |
3056 | { | 3064 | { |
3057 | unsigned long save_flags; | 3065 | unsigned long save_flags; |
3058 | void *objp; | 3066 | void *objp = NULL; |
3059 | 3067 | ||
3060 | cache_alloc_debugcheck_before(cachep, flags); | 3068 | cache_alloc_debugcheck_before(cachep, flags); |
3061 | 3069 | ||
3062 | local_irq_save(save_flags); | 3070 | local_irq_save(save_flags); |
3063 | objp = ____cache_alloc(cachep, flags); | 3071 | |
3072 | if (unlikely(NUMA_BUILD && | ||
3073 | current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) | ||
3074 | objp = alternate_node_alloc(cachep, flags); | ||
3075 | |||
3076 | if (!objp) | ||
3077 | objp = ____cache_alloc(cachep, flags); | ||
3078 | /* | ||
3079 | * We may just have run out of memory on the local node. | ||
3080 | * __cache_alloc_node() knows how to locate memory on other nodes | ||
3081 | */ | ||
3082 | if (NUMA_BUILD && !objp) | ||
3083 | objp = __cache_alloc_node(cachep, flags, numa_node_id()); | ||
3064 | local_irq_restore(save_flags); | 3084 | local_irq_restore(save_flags); |
3065 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, | 3085 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, |
3066 | caller); | 3086 | caller); |
@@ -3079,7 +3099,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3079 | { | 3099 | { |
3080 | int nid_alloc, nid_here; | 3100 | int nid_alloc, nid_here; |
3081 | 3101 | ||
3082 | if (in_interrupt()) | 3102 | if (in_interrupt() || (flags & __GFP_THISNODE)) |
3083 | return NULL; | 3103 | return NULL; |
3084 | nid_alloc = nid_here = numa_node_id(); | 3104 | nid_alloc = nid_here = numa_node_id(); |
3085 | if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) | 3105 | if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) |
@@ -3092,6 +3112,28 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3092 | } | 3112 | } |
3093 | 3113 | ||
3094 | /* | 3114 | /* |
3115 | * Fallback function if there was no memory available and no objects on a | ||
3116 | * certain node and we are allowed to fall back. We mimick the behavior of | ||
3117 | * the page allocator. We fall back according to a zonelist determined by | ||
3118 | * the policy layer while obeying cpuset constraints. | ||
3119 | */ | ||
3120 | void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) | ||
3121 | { | ||
3122 | struct zonelist *zonelist = &NODE_DATA(slab_node(current->mempolicy)) | ||
3123 | ->node_zonelists[gfp_zone(flags)]; | ||
3124 | struct zone **z; | ||
3125 | void *obj = NULL; | ||
3126 | |||
3127 | for (z = zonelist->zones; *z && !obj; z++) | ||
3128 | if (zone_idx(*z) <= ZONE_NORMAL && | ||
3129 | cpuset_zone_allowed(*z, flags)) | ||
3130 | obj = __cache_alloc_node(cache, | ||
3131 | flags | __GFP_THISNODE, | ||
3132 | zone_to_nid(*z)); | ||
3133 | return obj; | ||
3134 | } | ||
3135 | |||
3136 | /* | ||
3095 | * A interface to enable slab creation on nodeid | 3137 | * A interface to enable slab creation on nodeid |
3096 | */ | 3138 | */ |
3097 | static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, | 3139 | static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, |
@@ -3144,11 +3186,15 @@ retry: | |||
3144 | must_grow: | 3186 | must_grow: |
3145 | spin_unlock(&l3->list_lock); | 3187 | spin_unlock(&l3->list_lock); |
3146 | x = cache_grow(cachep, flags, nodeid); | 3188 | x = cache_grow(cachep, flags, nodeid); |
3189 | if (x) | ||
3190 | goto retry; | ||
3147 | 3191 | ||
3148 | if (!x) | 3192 | if (!(flags & __GFP_THISNODE)) |
3149 | return NULL; | 3193 | /* Unable to grow the cache. Fall back to other nodes. */ |
3194 | return fallback_alloc(cachep, flags); | ||
3195 | |||
3196 | return NULL; | ||
3150 | 3197 | ||
3151 | goto retry; | ||
3152 | done: | 3198 | done: |
3153 | return obj; | 3199 | return obj; |
3154 | } | 3200 | } |