aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c45
1 files changed, 23 insertions, 22 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 437d3388054b..c9adfce00405 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -308,12 +308,12 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
308#define SIZE_L3 (1 + MAX_NUMNODES) 308#define SIZE_L3 (1 + MAX_NUMNODES)
309 309
310/* 310/*
311 * This function may be completely optimized away if 311 * This function must be completely optimized away if
312 * a constant is passed to it. Mostly the same as 312 * a constant is passed to it. Mostly the same as
313 * what is in linux/slab.h except it returns an 313 * what is in linux/slab.h except it returns an
314 * index. 314 * index.
315 */ 315 */
316static inline int index_of(const size_t size) 316static __always_inline int index_of(const size_t size)
317{ 317{
318 if (__builtin_constant_p(size)) { 318 if (__builtin_constant_p(size)) {
319 int i = 0; 319 int i = 0;
@@ -329,7 +329,8 @@ static inline int index_of(const size_t size)
329 extern void __bad_size(void); 329 extern void __bad_size(void);
330 __bad_size(); 330 __bad_size();
331 } 331 }
332 } 332 } else
333 BUG();
333 return 0; 334 return 0;
334} 335}
335 336
@@ -639,7 +640,7 @@ static enum {
639 640
640static DEFINE_PER_CPU(struct work_struct, reap_work); 641static DEFINE_PER_CPU(struct work_struct, reap_work);
641 642
642static void free_block(kmem_cache_t* cachep, void** objpp, int len); 643static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node);
643static void enable_cpucache (kmem_cache_t *cachep); 644static void enable_cpucache (kmem_cache_t *cachep);
644static void cache_reap (void *unused); 645static void cache_reap (void *unused);
645static int __node_shrink(kmem_cache_t *cachep, int node); 646static int __node_shrink(kmem_cache_t *cachep, int node);
@@ -804,7 +805,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache
804 805
805 if (ac->avail) { 806 if (ac->avail) {
806 spin_lock(&rl3->list_lock); 807 spin_lock(&rl3->list_lock);
807 free_block(cachep, ac->entry, ac->avail); 808 free_block(cachep, ac->entry, ac->avail, node);
808 ac->avail = 0; 809 ac->avail = 0;
809 spin_unlock(&rl3->list_lock); 810 spin_unlock(&rl3->list_lock);
810 } 811 }
@@ -925,7 +926,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
925 /* Free limit for this kmem_list3 */ 926 /* Free limit for this kmem_list3 */
926 l3->free_limit -= cachep->batchcount; 927 l3->free_limit -= cachep->batchcount;
927 if (nc) 928 if (nc)
928 free_block(cachep, nc->entry, nc->avail); 929 free_block(cachep, nc->entry, nc->avail, node);
929 930
930 if (!cpus_empty(mask)) { 931 if (!cpus_empty(mask)) {
931 spin_unlock(&l3->list_lock); 932 spin_unlock(&l3->list_lock);
@@ -934,7 +935,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
934 935
935 if (l3->shared) { 936 if (l3->shared) {
936 free_block(cachep, l3->shared->entry, 937 free_block(cachep, l3->shared->entry,
937 l3->shared->avail); 938 l3->shared->avail, node);
938 kfree(l3->shared); 939 kfree(l3->shared);
939 l3->shared = NULL; 940 l3->shared = NULL;
940 } 941 }
@@ -1882,12 +1883,13 @@ static void do_drain(void *arg)
1882{ 1883{
1883 kmem_cache_t *cachep = (kmem_cache_t*)arg; 1884 kmem_cache_t *cachep = (kmem_cache_t*)arg;
1884 struct array_cache *ac; 1885 struct array_cache *ac;
1886 int node = numa_node_id();
1885 1887
1886 check_irq_off(); 1888 check_irq_off();
1887 ac = ac_data(cachep); 1889 ac = ac_data(cachep);
1888 spin_lock(&cachep->nodelists[numa_node_id()]->list_lock); 1890 spin_lock(&cachep->nodelists[node]->list_lock);
1889 free_block(cachep, ac->entry, ac->avail); 1891 free_block(cachep, ac->entry, ac->avail, node);
1890 spin_unlock(&cachep->nodelists[numa_node_id()]->list_lock); 1892 spin_unlock(&cachep->nodelists[node]->list_lock);
1891 ac->avail = 0; 1893 ac->avail = 0;
1892} 1894}
1893 1895
@@ -2608,7 +2610,7 @@ done:
2608/* 2610/*
2609 * Caller needs to acquire correct kmem_list's list_lock 2611 * Caller needs to acquire correct kmem_list's list_lock
2610 */ 2612 */
2611static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) 2613static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node)
2612{ 2614{
2613 int i; 2615 int i;
2614 struct kmem_list3 *l3; 2616 struct kmem_list3 *l3;
@@ -2617,14 +2619,12 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects)
2617 void *objp = objpp[i]; 2619 void *objp = objpp[i];
2618 struct slab *slabp; 2620 struct slab *slabp;
2619 unsigned int objnr; 2621 unsigned int objnr;
2620 int nodeid = 0;
2621 2622
2622 slabp = GET_PAGE_SLAB(virt_to_page(objp)); 2623 slabp = GET_PAGE_SLAB(virt_to_page(objp));
2623 nodeid = slabp->nodeid; 2624 l3 = cachep->nodelists[node];
2624 l3 = cachep->nodelists[nodeid];
2625 list_del(&slabp->list); 2625 list_del(&slabp->list);
2626 objnr = (objp - slabp->s_mem) / cachep->objsize; 2626 objnr = (objp - slabp->s_mem) / cachep->objsize;
2627 check_spinlock_acquired_node(cachep, nodeid); 2627 check_spinlock_acquired_node(cachep, node);
2628 check_slabp(cachep, slabp); 2628 check_slabp(cachep, slabp);
2629 2629
2630 2630
@@ -2664,13 +2664,14 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
2664{ 2664{
2665 int batchcount; 2665 int batchcount;
2666 struct kmem_list3 *l3; 2666 struct kmem_list3 *l3;
2667 int node = numa_node_id();
2667 2668
2668 batchcount = ac->batchcount; 2669 batchcount = ac->batchcount;
2669#if DEBUG 2670#if DEBUG
2670 BUG_ON(!batchcount || batchcount > ac->avail); 2671 BUG_ON(!batchcount || batchcount > ac->avail);
2671#endif 2672#endif
2672 check_irq_off(); 2673 check_irq_off();
2673 l3 = cachep->nodelists[numa_node_id()]; 2674 l3 = cachep->nodelists[node];
2674 spin_lock(&l3->list_lock); 2675 spin_lock(&l3->list_lock);
2675 if (l3->shared) { 2676 if (l3->shared) {
2676 struct array_cache *shared_array = l3->shared; 2677 struct array_cache *shared_array = l3->shared;
@@ -2686,7 +2687,7 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
2686 } 2687 }
2687 } 2688 }
2688 2689
2689 free_block(cachep, ac->entry, batchcount); 2690 free_block(cachep, ac->entry, batchcount, node);
2690free_done: 2691free_done:
2691#if STATS 2692#if STATS
2692 { 2693 {
@@ -2751,7 +2752,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
2751 } else { 2752 } else {
2752 spin_lock(&(cachep->nodelists[nodeid])-> 2753 spin_lock(&(cachep->nodelists[nodeid])->
2753 list_lock); 2754 list_lock);
2754 free_block(cachep, &objp, 1); 2755 free_block(cachep, &objp, 1, nodeid);
2755 spin_unlock(&(cachep->nodelists[nodeid])-> 2756 spin_unlock(&(cachep->nodelists[nodeid])->
2756 list_lock); 2757 list_lock);
2757 } 2758 }
@@ -2844,7 +2845,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i
2844 unsigned long save_flags; 2845 unsigned long save_flags;
2845 void *ptr; 2846 void *ptr;
2846 2847
2847 if (nodeid == numa_node_id() || nodeid == -1) 2848 if (nodeid == -1)
2848 return __cache_alloc(cachep, flags); 2849 return __cache_alloc(cachep, flags);
2849 2850
2850 if (unlikely(!cachep->nodelists[nodeid])) { 2851 if (unlikely(!cachep->nodelists[nodeid])) {
@@ -3079,7 +3080,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
3079 3080
3080 if ((nc = cachep->nodelists[node]->shared)) 3081 if ((nc = cachep->nodelists[node]->shared))
3081 free_block(cachep, nc->entry, 3082 free_block(cachep, nc->entry,
3082 nc->avail); 3083 nc->avail, node);
3083 3084
3084 l3->shared = new; 3085 l3->shared = new;
3085 if (!cachep->nodelists[node]->alien) { 3086 if (!cachep->nodelists[node]->alien) {
@@ -3160,7 +3161,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
3160 if (!ccold) 3161 if (!ccold)
3161 continue; 3162 continue;
3162 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3163 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3163 free_block(cachep, ccold->entry, ccold->avail); 3164 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
3164 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3165 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3165 kfree(ccold); 3166 kfree(ccold);
3166 } 3167 }
@@ -3240,7 +3241,7 @@ static void drain_array_locked(kmem_cache_t *cachep,
3240 if (tofree > ac->avail) { 3241 if (tofree > ac->avail) {
3241 tofree = (ac->avail+1)/2; 3242 tofree = (ac->avail+1)/2;
3242 } 3243 }
3243 free_block(cachep, ac->entry, tofree); 3244 free_block(cachep, ac->entry, tofree, node);
3244 ac->avail -= tofree; 3245 ac->avail -= tofree;
3245 memmove(ac->entry, &(ac->entry[tofree]), 3246 memmove(ac->entry, &(ac->entry[tofree]),
3246 sizeof(void*)*ac->avail); 3247 sizeof(void*)*ac->avail);