diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 1139 |
1 files changed, 576 insertions, 563 deletions
@@ -130,7 +130,6 @@ | |||
130 | #define FORCED_DEBUG 0 | 130 | #define FORCED_DEBUG 0 |
131 | #endif | 131 | #endif |
132 | 132 | ||
133 | |||
134 | /* Shouldn't this be in a header file somewhere? */ | 133 | /* Shouldn't this be in a header file somewhere? */ |
135 | #define BYTES_PER_WORD sizeof(void *) | 134 | #define BYTES_PER_WORD sizeof(void *) |
136 | 135 | ||
@@ -217,12 +216,12 @@ static unsigned long offslab_limit; | |||
217 | * Slabs are chained into three list: fully used, partial, fully free slabs. | 216 | * Slabs are chained into three list: fully used, partial, fully free slabs. |
218 | */ | 217 | */ |
219 | struct slab { | 218 | struct slab { |
220 | struct list_head list; | 219 | struct list_head list; |
221 | unsigned long colouroff; | 220 | unsigned long colouroff; |
222 | void *s_mem; /* including colour offset */ | 221 | void *s_mem; /* including colour offset */ |
223 | unsigned int inuse; /* num of objs active in slab */ | 222 | unsigned int inuse; /* num of objs active in slab */ |
224 | kmem_bufctl_t free; | 223 | kmem_bufctl_t free; |
225 | unsigned short nodeid; | 224 | unsigned short nodeid; |
226 | }; | 225 | }; |
227 | 226 | ||
228 | /* | 227 | /* |
@@ -242,9 +241,9 @@ struct slab { | |||
242 | * We assume struct slab_rcu can overlay struct slab when destroying. | 241 | * We assume struct slab_rcu can overlay struct slab when destroying. |
243 | */ | 242 | */ |
244 | struct slab_rcu { | 243 | struct slab_rcu { |
245 | struct rcu_head head; | 244 | struct rcu_head head; |
246 | kmem_cache_t *cachep; | 245 | kmem_cache_t *cachep; |
247 | void *addr; | 246 | void *addr; |
248 | }; | 247 | }; |
249 | 248 | ||
250 | /* | 249 | /* |
@@ -279,23 +278,23 @@ struct array_cache { | |||
279 | #define BOOT_CPUCACHE_ENTRIES 1 | 278 | #define BOOT_CPUCACHE_ENTRIES 1 |
280 | struct arraycache_init { | 279 | struct arraycache_init { |
281 | struct array_cache cache; | 280 | struct array_cache cache; |
282 | void * entries[BOOT_CPUCACHE_ENTRIES]; | 281 | void *entries[BOOT_CPUCACHE_ENTRIES]; |
283 | }; | 282 | }; |
284 | 283 | ||
285 | /* | 284 | /* |
286 | * The slab lists for all objects. | 285 | * The slab lists for all objects. |
287 | */ | 286 | */ |
288 | struct kmem_list3 { | 287 | struct kmem_list3 { |
289 | struct list_head slabs_partial; /* partial list first, better asm code */ | 288 | struct list_head slabs_partial; /* partial list first, better asm code */ |
290 | struct list_head slabs_full; | 289 | struct list_head slabs_full; |
291 | struct list_head slabs_free; | 290 | struct list_head slabs_free; |
292 | unsigned long free_objects; | 291 | unsigned long free_objects; |
293 | unsigned long next_reap; | 292 | unsigned long next_reap; |
294 | int free_touched; | 293 | int free_touched; |
295 | unsigned int free_limit; | 294 | unsigned int free_limit; |
296 | spinlock_t list_lock; | 295 | spinlock_t list_lock; |
297 | struct array_cache *shared; /* shared per node */ | 296 | struct array_cache *shared; /* shared per node */ |
298 | struct array_cache **alien; /* on other nodes */ | 297 | struct array_cache **alien; /* on other nodes */ |
299 | }; | 298 | }; |
300 | 299 | ||
301 | /* | 300 | /* |
@@ -367,63 +366,63 @@ static inline void kmem_list3_init(struct kmem_list3 *parent) | |||
367 | * | 366 | * |
368 | * manages a cache. | 367 | * manages a cache. |
369 | */ | 368 | */ |
370 | 369 | ||
371 | struct kmem_cache { | 370 | struct kmem_cache { |
372 | /* 1) per-cpu data, touched during every alloc/free */ | 371 | /* 1) per-cpu data, touched during every alloc/free */ |
373 | struct array_cache *array[NR_CPUS]; | 372 | struct array_cache *array[NR_CPUS]; |
374 | unsigned int batchcount; | 373 | unsigned int batchcount; |
375 | unsigned int limit; | 374 | unsigned int limit; |
376 | unsigned int shared; | 375 | unsigned int shared; |
377 | unsigned int objsize; | 376 | unsigned int objsize; |
378 | /* 2) touched by every alloc & free from the backend */ | 377 | /* 2) touched by every alloc & free from the backend */ |
379 | struct kmem_list3 *nodelists[MAX_NUMNODES]; | 378 | struct kmem_list3 *nodelists[MAX_NUMNODES]; |
380 | unsigned int flags; /* constant flags */ | 379 | unsigned int flags; /* constant flags */ |
381 | unsigned int num; /* # of objs per slab */ | 380 | unsigned int num; /* # of objs per slab */ |
382 | spinlock_t spinlock; | 381 | spinlock_t spinlock; |
383 | 382 | ||
384 | /* 3) cache_grow/shrink */ | 383 | /* 3) cache_grow/shrink */ |
385 | /* order of pgs per slab (2^n) */ | 384 | /* order of pgs per slab (2^n) */ |
386 | unsigned int gfporder; | 385 | unsigned int gfporder; |
387 | 386 | ||
388 | /* force GFP flags, e.g. GFP_DMA */ | 387 | /* force GFP flags, e.g. GFP_DMA */ |
389 | gfp_t gfpflags; | 388 | gfp_t gfpflags; |
390 | 389 | ||
391 | size_t colour; /* cache colouring range */ | 390 | size_t colour; /* cache colouring range */ |
392 | unsigned int colour_off; /* colour offset */ | 391 | unsigned int colour_off; /* colour offset */ |
393 | unsigned int colour_next; /* cache colouring */ | 392 | unsigned int colour_next; /* cache colouring */ |
394 | kmem_cache_t *slabp_cache; | 393 | kmem_cache_t *slabp_cache; |
395 | unsigned int slab_size; | 394 | unsigned int slab_size; |
396 | unsigned int dflags; /* dynamic flags */ | 395 | unsigned int dflags; /* dynamic flags */ |
397 | 396 | ||
398 | /* constructor func */ | 397 | /* constructor func */ |
399 | void (*ctor)(void *, kmem_cache_t *, unsigned long); | 398 | void (*ctor) (void *, kmem_cache_t *, unsigned long); |
400 | 399 | ||
401 | /* de-constructor func */ | 400 | /* de-constructor func */ |
402 | void (*dtor)(void *, kmem_cache_t *, unsigned long); | 401 | void (*dtor) (void *, kmem_cache_t *, unsigned long); |
403 | 402 | ||
404 | /* 4) cache creation/removal */ | 403 | /* 4) cache creation/removal */ |
405 | const char *name; | 404 | const char *name; |
406 | struct list_head next; | 405 | struct list_head next; |
407 | 406 | ||
408 | /* 5) statistics */ | 407 | /* 5) statistics */ |
409 | #if STATS | 408 | #if STATS |
410 | unsigned long num_active; | 409 | unsigned long num_active; |
411 | unsigned long num_allocations; | 410 | unsigned long num_allocations; |
412 | unsigned long high_mark; | 411 | unsigned long high_mark; |
413 | unsigned long grown; | 412 | unsigned long grown; |
414 | unsigned long reaped; | 413 | unsigned long reaped; |
415 | unsigned long errors; | 414 | unsigned long errors; |
416 | unsigned long max_freeable; | 415 | unsigned long max_freeable; |
417 | unsigned long node_allocs; | 416 | unsigned long node_allocs; |
418 | unsigned long node_frees; | 417 | unsigned long node_frees; |
419 | atomic_t allochit; | 418 | atomic_t allochit; |
420 | atomic_t allocmiss; | 419 | atomic_t allocmiss; |
421 | atomic_t freehit; | 420 | atomic_t freehit; |
422 | atomic_t freemiss; | 421 | atomic_t freemiss; |
423 | #endif | 422 | #endif |
424 | #if DEBUG | 423 | #if DEBUG |
425 | int dbghead; | 424 | int dbghead; |
426 | int reallen; | 425 | int reallen; |
427 | #endif | 426 | #endif |
428 | }; | 427 | }; |
429 | 428 | ||
@@ -523,14 +522,15 @@ static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp) | |||
523 | { | 522 | { |
524 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); | 523 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); |
525 | if (cachep->flags & SLAB_STORE_USER) | 524 | if (cachep->flags & SLAB_STORE_USER) |
526 | return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD); | 525 | return (unsigned long *)(objp + cachep->objsize - |
527 | return (unsigned long*) (objp+cachep->objsize-BYTES_PER_WORD); | 526 | 2 * BYTES_PER_WORD); |
527 | return (unsigned long *)(objp + cachep->objsize - BYTES_PER_WORD); | ||
528 | } | 528 | } |
529 | 529 | ||
530 | static void **dbg_userword(kmem_cache_t *cachep, void *objp) | 530 | static void **dbg_userword(kmem_cache_t *cachep, void *objp) |
531 | { | 531 | { |
532 | BUG_ON(!(cachep->flags & SLAB_STORE_USER)); | 532 | BUG_ON(!(cachep->flags & SLAB_STORE_USER)); |
533 | return (void**)(objp+cachep->objsize-BYTES_PER_WORD); | 533 | return (void **)(objp + cachep->objsize - BYTES_PER_WORD); |
534 | } | 534 | } |
535 | 535 | ||
536 | #else | 536 | #else |
@@ -607,31 +607,31 @@ struct cache_names { | |||
607 | static struct cache_names __initdata cache_names[] = { | 607 | static struct cache_names __initdata cache_names[] = { |
608 | #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, | 608 | #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, |
609 | #include <linux/kmalloc_sizes.h> | 609 | #include <linux/kmalloc_sizes.h> |
610 | { NULL, } | 610 | {NULL,} |
611 | #undef CACHE | 611 | #undef CACHE |
612 | }; | 612 | }; |
613 | 613 | ||
614 | static struct arraycache_init initarray_cache __initdata = | 614 | static struct arraycache_init initarray_cache __initdata = |
615 | { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; | 615 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; |
616 | static struct arraycache_init initarray_generic = | 616 | static struct arraycache_init initarray_generic = |
617 | { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; | 617 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; |
618 | 618 | ||
619 | /* internal cache of cache description objs */ | 619 | /* internal cache of cache description objs */ |
620 | static kmem_cache_t cache_cache = { | 620 | static kmem_cache_t cache_cache = { |
621 | .batchcount = 1, | 621 | .batchcount = 1, |
622 | .limit = BOOT_CPUCACHE_ENTRIES, | 622 | .limit = BOOT_CPUCACHE_ENTRIES, |
623 | .shared = 1, | 623 | .shared = 1, |
624 | .objsize = sizeof(kmem_cache_t), | 624 | .objsize = sizeof(kmem_cache_t), |
625 | .flags = SLAB_NO_REAP, | 625 | .flags = SLAB_NO_REAP, |
626 | .spinlock = SPIN_LOCK_UNLOCKED, | 626 | .spinlock = SPIN_LOCK_UNLOCKED, |
627 | .name = "kmem_cache", | 627 | .name = "kmem_cache", |
628 | #if DEBUG | 628 | #if DEBUG |
629 | .reallen = sizeof(kmem_cache_t), | 629 | .reallen = sizeof(kmem_cache_t), |
630 | #endif | 630 | #endif |
631 | }; | 631 | }; |
632 | 632 | ||
633 | /* Guard access to the cache-chain. */ | 633 | /* Guard access to the cache-chain. */ |
634 | static struct semaphore cache_chain_sem; | 634 | static struct semaphore cache_chain_sem; |
635 | static struct list_head cache_chain; | 635 | static struct list_head cache_chain; |
636 | 636 | ||
637 | /* | 637 | /* |
@@ -655,9 +655,9 @@ static enum { | |||
655 | 655 | ||
656 | static DEFINE_PER_CPU(struct work_struct, reap_work); | 656 | static DEFINE_PER_CPU(struct work_struct, reap_work); |
657 | 657 | ||
658 | static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node); | 658 | static void free_block(kmem_cache_t *cachep, void **objpp, int len, int node); |
659 | static void enable_cpucache (kmem_cache_t *cachep); | 659 | static void enable_cpucache(kmem_cache_t *cachep); |
660 | static void cache_reap (void *unused); | 660 | static void cache_reap(void *unused); |
661 | static int __node_shrink(kmem_cache_t *cachep, int node); | 661 | static int __node_shrink(kmem_cache_t *cachep, int node); |
662 | 662 | ||
663 | static inline struct array_cache *ac_data(kmem_cache_t *cachep) | 663 | static inline struct array_cache *ac_data(kmem_cache_t *cachep) |
@@ -671,9 +671,9 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) | |||
671 | 671 | ||
672 | #if DEBUG | 672 | #if DEBUG |
673 | /* This happens if someone tries to call | 673 | /* This happens if someone tries to call |
674 | * kmem_cache_create(), or __kmalloc(), before | 674 | * kmem_cache_create(), or __kmalloc(), before |
675 | * the generic caches are initialized. | 675 | * the generic caches are initialized. |
676 | */ | 676 | */ |
677 | BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); | 677 | BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); |
678 | #endif | 678 | #endif |
679 | while (size > csizep->cs_size) | 679 | while (size > csizep->cs_size) |
@@ -697,10 +697,10 @@ EXPORT_SYMBOL(kmem_find_general_cachep); | |||
697 | 697 | ||
698 | /* Cal the num objs, wastage, and bytes left over for a given slab size. */ | 698 | /* Cal the num objs, wastage, and bytes left over for a given slab size. */ |
699 | static void cache_estimate(unsigned long gfporder, size_t size, size_t align, | 699 | static void cache_estimate(unsigned long gfporder, size_t size, size_t align, |
700 | int flags, size_t *left_over, unsigned int *num) | 700 | int flags, size_t *left_over, unsigned int *num) |
701 | { | 701 | { |
702 | int i; | 702 | int i; |
703 | size_t wastage = PAGE_SIZE<<gfporder; | 703 | size_t wastage = PAGE_SIZE << gfporder; |
704 | size_t extra = 0; | 704 | size_t extra = 0; |
705 | size_t base = 0; | 705 | size_t base = 0; |
706 | 706 | ||
@@ -709,7 +709,7 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align, | |||
709 | extra = sizeof(kmem_bufctl_t); | 709 | extra = sizeof(kmem_bufctl_t); |
710 | } | 710 | } |
711 | i = 0; | 711 | i = 0; |
712 | while (i*size + ALIGN(base+i*extra, align) <= wastage) | 712 | while (i * size + ALIGN(base + i * extra, align) <= wastage) |
713 | i++; | 713 | i++; |
714 | if (i > 0) | 714 | if (i > 0) |
715 | i--; | 715 | i--; |
@@ -718,8 +718,8 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align, | |||
718 | i = SLAB_LIMIT; | 718 | i = SLAB_LIMIT; |
719 | 719 | ||
720 | *num = i; | 720 | *num = i; |
721 | wastage -= i*size; | 721 | wastage -= i * size; |
722 | wastage -= ALIGN(base+i*extra, align); | 722 | wastage -= ALIGN(base + i * extra, align); |
723 | *left_over = wastage; | 723 | *left_over = wastage; |
724 | } | 724 | } |
725 | 725 | ||
@@ -728,7 +728,7 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align, | |||
728 | static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg) | 728 | static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg) |
729 | { | 729 | { |
730 | printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", | 730 | printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", |
731 | function, cachep->name, msg); | 731 | function, cachep->name, msg); |
732 | dump_stack(); | 732 | dump_stack(); |
733 | } | 733 | } |
734 | 734 | ||
@@ -755,9 +755,9 @@ static void __devinit start_cpu_timer(int cpu) | |||
755 | } | 755 | } |
756 | 756 | ||
757 | static struct array_cache *alloc_arraycache(int node, int entries, | 757 | static struct array_cache *alloc_arraycache(int node, int entries, |
758 | int batchcount) | 758 | int batchcount) |
759 | { | 759 | { |
760 | int memsize = sizeof(void*)*entries+sizeof(struct array_cache); | 760 | int memsize = sizeof(void *) * entries + sizeof(struct array_cache); |
761 | struct array_cache *nc = NULL; | 761 | struct array_cache *nc = NULL; |
762 | 762 | ||
763 | nc = kmalloc_node(memsize, GFP_KERNEL, node); | 763 | nc = kmalloc_node(memsize, GFP_KERNEL, node); |
@@ -775,7 +775,7 @@ static struct array_cache *alloc_arraycache(int node, int entries, | |||
775 | static inline struct array_cache **alloc_alien_cache(int node, int limit) | 775 | static inline struct array_cache **alloc_alien_cache(int node, int limit) |
776 | { | 776 | { |
777 | struct array_cache **ac_ptr; | 777 | struct array_cache **ac_ptr; |
778 | int memsize = sizeof(void*)*MAX_NUMNODES; | 778 | int memsize = sizeof(void *) * MAX_NUMNODES; |
779 | int i; | 779 | int i; |
780 | 780 | ||
781 | if (limit > 1) | 781 | if (limit > 1) |
@@ -789,7 +789,7 @@ static inline struct array_cache **alloc_alien_cache(int node, int limit) | |||
789 | } | 789 | } |
790 | ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); | 790 | ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); |
791 | if (!ac_ptr[i]) { | 791 | if (!ac_ptr[i]) { |
792 | for (i--; i <=0; i--) | 792 | for (i--; i <= 0; i--) |
793 | kfree(ac_ptr[i]); | 793 | kfree(ac_ptr[i]); |
794 | kfree(ac_ptr); | 794 | kfree(ac_ptr); |
795 | return NULL; | 795 | return NULL; |
@@ -807,12 +807,13 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) | |||
807 | return; | 807 | return; |
808 | 808 | ||
809 | for_each_node(i) | 809 | for_each_node(i) |
810 | kfree(ac_ptr[i]); | 810 | kfree(ac_ptr[i]); |
811 | 811 | ||
812 | kfree(ac_ptr); | 812 | kfree(ac_ptr); |
813 | } | 813 | } |
814 | 814 | ||
815 | static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache *ac, int node) | 815 | static inline void __drain_alien_cache(kmem_cache_t *cachep, |
816 | struct array_cache *ac, int node) | ||
816 | { | 817 | { |
817 | struct kmem_list3 *rl3 = cachep->nodelists[node]; | 818 | struct kmem_list3 *rl3 = cachep->nodelists[node]; |
818 | 819 | ||
@@ -826,7 +827,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache | |||
826 | 827 | ||
827 | static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3) | 828 | static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3) |
828 | { | 829 | { |
829 | int i=0; | 830 | int i = 0; |
830 | struct array_cache *ac; | 831 | struct array_cache *ac; |
831 | unsigned long flags; | 832 | unsigned long flags; |
832 | 833 | ||
@@ -846,14 +847,13 @@ static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3) | |||
846 | #endif | 847 | #endif |
847 | 848 | ||
848 | static int __devinit cpuup_callback(struct notifier_block *nfb, | 849 | static int __devinit cpuup_callback(struct notifier_block *nfb, |
849 | unsigned long action, void *hcpu) | 850 | unsigned long action, void *hcpu) |
850 | { | 851 | { |
851 | long cpu = (long)hcpu; | 852 | long cpu = (long)hcpu; |
852 | kmem_cache_t* cachep; | 853 | kmem_cache_t *cachep; |
853 | struct kmem_list3 *l3 = NULL; | 854 | struct kmem_list3 *l3 = NULL; |
854 | int node = cpu_to_node(cpu); | 855 | int node = cpu_to_node(cpu); |
855 | int memsize = sizeof(struct kmem_list3); | 856 | int memsize = sizeof(struct kmem_list3); |
856 | struct array_cache *nc = NULL; | ||
857 | 857 | ||
858 | switch (action) { | 858 | switch (action) { |
859 | case CPU_UP_PREPARE: | 859 | case CPU_UP_PREPARE: |
@@ -871,27 +871,29 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, | |||
871 | */ | 871 | */ |
872 | if (!cachep->nodelists[node]) { | 872 | if (!cachep->nodelists[node]) { |
873 | if (!(l3 = kmalloc_node(memsize, | 873 | if (!(l3 = kmalloc_node(memsize, |
874 | GFP_KERNEL, node))) | 874 | GFP_KERNEL, node))) |
875 | goto bad; | 875 | goto bad; |
876 | kmem_list3_init(l3); | 876 | kmem_list3_init(l3); |
877 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + | 877 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + |
878 | ((unsigned long)cachep)%REAPTIMEOUT_LIST3; | 878 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; |
879 | 879 | ||
880 | cachep->nodelists[node] = l3; | 880 | cachep->nodelists[node] = l3; |
881 | } | 881 | } |
882 | 882 | ||
883 | spin_lock_irq(&cachep->nodelists[node]->list_lock); | 883 | spin_lock_irq(&cachep->nodelists[node]->list_lock); |
884 | cachep->nodelists[node]->free_limit = | 884 | cachep->nodelists[node]->free_limit = |
885 | (1 + nr_cpus_node(node)) * | 885 | (1 + nr_cpus_node(node)) * |
886 | cachep->batchcount + cachep->num; | 886 | cachep->batchcount + cachep->num; |
887 | spin_unlock_irq(&cachep->nodelists[node]->list_lock); | 887 | spin_unlock_irq(&cachep->nodelists[node]->list_lock); |
888 | } | 888 | } |
889 | 889 | ||
890 | /* Now we can go ahead with allocating the shared array's | 890 | /* Now we can go ahead with allocating the shared array's |
891 | & array cache's */ | 891 | & array cache's */ |
892 | list_for_each_entry(cachep, &cache_chain, next) { | 892 | list_for_each_entry(cachep, &cache_chain, next) { |
893 | struct array_cache *nc; | ||
894 | |||
893 | nc = alloc_arraycache(node, cachep->limit, | 895 | nc = alloc_arraycache(node, cachep->limit, |
894 | cachep->batchcount); | 896 | cachep->batchcount); |
895 | if (!nc) | 897 | if (!nc) |
896 | goto bad; | 898 | goto bad; |
897 | cachep->array[cpu] = nc; | 899 | cachep->array[cpu] = nc; |
@@ -900,12 +902,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, | |||
900 | BUG_ON(!l3); | 902 | BUG_ON(!l3); |
901 | if (!l3->shared) { | 903 | if (!l3->shared) { |
902 | if (!(nc = alloc_arraycache(node, | 904 | if (!(nc = alloc_arraycache(node, |
903 | cachep->shared*cachep->batchcount, | 905 | cachep->shared * |
904 | 0xbaadf00d))) | 906 | cachep->batchcount, |
905 | goto bad; | 907 | 0xbaadf00d))) |
908 | goto bad; | ||
906 | 909 | ||
907 | /* we are serialised from CPU_DEAD or | 910 | /* we are serialised from CPU_DEAD or |
908 | CPU_UP_CANCELLED by the cpucontrol lock */ | 911 | CPU_UP_CANCELLED by the cpucontrol lock */ |
909 | l3->shared = nc; | 912 | l3->shared = nc; |
910 | } | 913 | } |
911 | } | 914 | } |
@@ -942,13 +945,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, | |||
942 | free_block(cachep, nc->entry, nc->avail, node); | 945 | free_block(cachep, nc->entry, nc->avail, node); |
943 | 946 | ||
944 | if (!cpus_empty(mask)) { | 947 | if (!cpus_empty(mask)) { |
945 | spin_unlock(&l3->list_lock); | 948 | spin_unlock(&l3->list_lock); |
946 | goto unlock_cache; | 949 | goto unlock_cache; |
947 | } | 950 | } |
948 | 951 | ||
949 | if (l3->shared) { | 952 | if (l3->shared) { |
950 | free_block(cachep, l3->shared->entry, | 953 | free_block(cachep, l3->shared->entry, |
951 | l3->shared->avail, node); | 954 | l3->shared->avail, node); |
952 | kfree(l3->shared); | 955 | kfree(l3->shared); |
953 | l3->shared = NULL; | 956 | l3->shared = NULL; |
954 | } | 957 | } |
@@ -966,7 +969,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, | |||
966 | } else { | 969 | } else { |
967 | spin_unlock(&l3->list_lock); | 970 | spin_unlock(&l3->list_lock); |
968 | } | 971 | } |
969 | unlock_cache: | 972 | unlock_cache: |
970 | spin_unlock_irq(&cachep->spinlock); | 973 | spin_unlock_irq(&cachep->spinlock); |
971 | kfree(nc); | 974 | kfree(nc); |
972 | } | 975 | } |
@@ -975,7 +978,7 @@ unlock_cache: | |||
975 | #endif | 978 | #endif |
976 | } | 979 | } |
977 | return NOTIFY_OK; | 980 | return NOTIFY_OK; |
978 | bad: | 981 | bad: |
979 | up(&cache_chain_sem); | 982 | up(&cache_chain_sem); |
980 | return NOTIFY_BAD; | 983 | return NOTIFY_BAD; |
981 | } | 984 | } |
@@ -985,8 +988,7 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 }; | |||
985 | /* | 988 | /* |
986 | * swap the static kmem_list3 with kmalloced memory | 989 | * swap the static kmem_list3 with kmalloced memory |
987 | */ | 990 | */ |
988 | static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, | 991 | static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, int nodeid) |
989 | int nodeid) | ||
990 | { | 992 | { |
991 | struct kmem_list3 *ptr; | 993 | struct kmem_list3 *ptr; |
992 | 994 | ||
@@ -1055,14 +1057,14 @@ void __init kmem_cache_init(void) | |||
1055 | cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size()); | 1057 | cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size()); |
1056 | 1058 | ||
1057 | cache_estimate(0, cache_cache.objsize, cache_line_size(), 0, | 1059 | cache_estimate(0, cache_cache.objsize, cache_line_size(), 0, |
1058 | &left_over, &cache_cache.num); | 1060 | &left_over, &cache_cache.num); |
1059 | if (!cache_cache.num) | 1061 | if (!cache_cache.num) |
1060 | BUG(); | 1062 | BUG(); |
1061 | 1063 | ||
1062 | cache_cache.colour = left_over/cache_cache.colour_off; | 1064 | cache_cache.colour = left_over / cache_cache.colour_off; |
1063 | cache_cache.colour_next = 0; | 1065 | cache_cache.colour_next = 0; |
1064 | cache_cache.slab_size = ALIGN(cache_cache.num*sizeof(kmem_bufctl_t) + | 1066 | cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + |
1065 | sizeof(struct slab), cache_line_size()); | 1067 | sizeof(struct slab), cache_line_size()); |
1066 | 1068 | ||
1067 | /* 2+3) create the kmalloc caches */ | 1069 | /* 2+3) create the kmalloc caches */ |
1068 | sizes = malloc_sizes; | 1070 | sizes = malloc_sizes; |
@@ -1074,14 +1076,18 @@ void __init kmem_cache_init(void) | |||
1074 | */ | 1076 | */ |
1075 | 1077 | ||
1076 | sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, | 1078 | sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, |
1077 | sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN, | 1079 | sizes[INDEX_AC].cs_size, |
1078 | (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL); | 1080 | ARCH_KMALLOC_MINALIGN, |
1081 | (ARCH_KMALLOC_FLAGS | | ||
1082 | SLAB_PANIC), NULL, NULL); | ||
1079 | 1083 | ||
1080 | if (INDEX_AC != INDEX_L3) | 1084 | if (INDEX_AC != INDEX_L3) |
1081 | sizes[INDEX_L3].cs_cachep = | 1085 | sizes[INDEX_L3].cs_cachep = |
1082 | kmem_cache_create(names[INDEX_L3].name, | 1086 | kmem_cache_create(names[INDEX_L3].name, |
1083 | sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN, | 1087 | sizes[INDEX_L3].cs_size, |
1084 | (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL); | 1088 | ARCH_KMALLOC_MINALIGN, |
1089 | (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, | ||
1090 | NULL); | ||
1085 | 1091 | ||
1086 | while (sizes->cs_size != ULONG_MAX) { | 1092 | while (sizes->cs_size != ULONG_MAX) { |
1087 | /* | 1093 | /* |
@@ -1091,35 +1097,41 @@ void __init kmem_cache_init(void) | |||
1091 | * Note for systems short on memory removing the alignment will | 1097 | * Note for systems short on memory removing the alignment will |
1092 | * allow tighter packing of the smaller caches. | 1098 | * allow tighter packing of the smaller caches. |
1093 | */ | 1099 | */ |
1094 | if(!sizes->cs_cachep) | 1100 | if (!sizes->cs_cachep) |
1095 | sizes->cs_cachep = kmem_cache_create(names->name, | 1101 | sizes->cs_cachep = kmem_cache_create(names->name, |
1096 | sizes->cs_size, ARCH_KMALLOC_MINALIGN, | 1102 | sizes->cs_size, |
1097 | (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL); | 1103 | ARCH_KMALLOC_MINALIGN, |
1104 | (ARCH_KMALLOC_FLAGS | ||
1105 | | SLAB_PANIC), | ||
1106 | NULL, NULL); | ||
1098 | 1107 | ||
1099 | /* Inc off-slab bufctl limit until the ceiling is hit. */ | 1108 | /* Inc off-slab bufctl limit until the ceiling is hit. */ |
1100 | if (!(OFF_SLAB(sizes->cs_cachep))) { | 1109 | if (!(OFF_SLAB(sizes->cs_cachep))) { |
1101 | offslab_limit = sizes->cs_size-sizeof(struct slab); | 1110 | offslab_limit = sizes->cs_size - sizeof(struct slab); |
1102 | offslab_limit /= sizeof(kmem_bufctl_t); | 1111 | offslab_limit /= sizeof(kmem_bufctl_t); |
1103 | } | 1112 | } |
1104 | 1113 | ||
1105 | sizes->cs_dmacachep = kmem_cache_create(names->name_dma, | 1114 | sizes->cs_dmacachep = kmem_cache_create(names->name_dma, |
1106 | sizes->cs_size, ARCH_KMALLOC_MINALIGN, | 1115 | sizes->cs_size, |
1107 | (ARCH_KMALLOC_FLAGS | SLAB_CACHE_DMA | SLAB_PANIC), | 1116 | ARCH_KMALLOC_MINALIGN, |
1108 | NULL, NULL); | 1117 | (ARCH_KMALLOC_FLAGS | |
1118 | SLAB_CACHE_DMA | | ||
1119 | SLAB_PANIC), NULL, | ||
1120 | NULL); | ||
1109 | 1121 | ||
1110 | sizes++; | 1122 | sizes++; |
1111 | names++; | 1123 | names++; |
1112 | } | 1124 | } |
1113 | /* 4) Replace the bootstrap head arrays */ | 1125 | /* 4) Replace the bootstrap head arrays */ |
1114 | { | 1126 | { |
1115 | void * ptr; | 1127 | void *ptr; |
1116 | 1128 | ||
1117 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 1129 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); |
1118 | 1130 | ||
1119 | local_irq_disable(); | 1131 | local_irq_disable(); |
1120 | BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache); | 1132 | BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache); |
1121 | memcpy(ptr, ac_data(&cache_cache), | 1133 | memcpy(ptr, ac_data(&cache_cache), |
1122 | sizeof(struct arraycache_init)); | 1134 | sizeof(struct arraycache_init)); |
1123 | cache_cache.array[smp_processor_id()] = ptr; | 1135 | cache_cache.array[smp_processor_id()] = ptr; |
1124 | local_irq_enable(); | 1136 | local_irq_enable(); |
1125 | 1137 | ||
@@ -1127,11 +1139,11 @@ void __init kmem_cache_init(void) | |||
1127 | 1139 | ||
1128 | local_irq_disable(); | 1140 | local_irq_disable(); |
1129 | BUG_ON(ac_data(malloc_sizes[INDEX_AC].cs_cachep) | 1141 | BUG_ON(ac_data(malloc_sizes[INDEX_AC].cs_cachep) |
1130 | != &initarray_generic.cache); | 1142 | != &initarray_generic.cache); |
1131 | memcpy(ptr, ac_data(malloc_sizes[INDEX_AC].cs_cachep), | 1143 | memcpy(ptr, ac_data(malloc_sizes[INDEX_AC].cs_cachep), |
1132 | sizeof(struct arraycache_init)); | 1144 | sizeof(struct arraycache_init)); |
1133 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = | 1145 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = |
1134 | ptr; | 1146 | ptr; |
1135 | local_irq_enable(); | 1147 | local_irq_enable(); |
1136 | } | 1148 | } |
1137 | /* 5) Replace the bootstrap kmem_list3's */ | 1149 | /* 5) Replace the bootstrap kmem_list3's */ |
@@ -1139,16 +1151,16 @@ void __init kmem_cache_init(void) | |||
1139 | int node; | 1151 | int node; |
1140 | /* Replace the static kmem_list3 structures for the boot cpu */ | 1152 | /* Replace the static kmem_list3 structures for the boot cpu */ |
1141 | init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], | 1153 | init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], |
1142 | numa_node_id()); | 1154 | numa_node_id()); |
1143 | 1155 | ||
1144 | for_each_online_node(node) { | 1156 | for_each_online_node(node) { |
1145 | init_list(malloc_sizes[INDEX_AC].cs_cachep, | 1157 | init_list(malloc_sizes[INDEX_AC].cs_cachep, |
1146 | &initkmem_list3[SIZE_AC+node], node); | 1158 | &initkmem_list3[SIZE_AC + node], node); |
1147 | 1159 | ||
1148 | if (INDEX_AC != INDEX_L3) { | 1160 | if (INDEX_AC != INDEX_L3) { |
1149 | init_list(malloc_sizes[INDEX_L3].cs_cachep, | 1161 | init_list(malloc_sizes[INDEX_L3].cs_cachep, |
1150 | &initkmem_list3[SIZE_L3+node], | 1162 | &initkmem_list3[SIZE_L3 + node], |
1151 | node); | 1163 | node); |
1152 | } | 1164 | } |
1153 | } | 1165 | } |
1154 | } | 1166 | } |
@@ -1158,7 +1170,7 @@ void __init kmem_cache_init(void) | |||
1158 | kmem_cache_t *cachep; | 1170 | kmem_cache_t *cachep; |
1159 | down(&cache_chain_sem); | 1171 | down(&cache_chain_sem); |
1160 | list_for_each_entry(cachep, &cache_chain, next) | 1172 | list_for_each_entry(cachep, &cache_chain, next) |
1161 | enable_cpucache(cachep); | 1173 | enable_cpucache(cachep); |
1162 | up(&cache_chain_sem); | 1174 | up(&cache_chain_sem); |
1163 | } | 1175 | } |
1164 | 1176 | ||
@@ -1184,7 +1196,7 @@ static int __init cpucache_init(void) | |||
1184 | * pages to gfp. | 1196 | * pages to gfp. |
1185 | */ | 1197 | */ |
1186 | for_each_online_cpu(cpu) | 1198 | for_each_online_cpu(cpu) |
1187 | start_cpu_timer(cpu); | 1199 | start_cpu_timer(cpu); |
1188 | 1200 | ||
1189 | return 0; | 1201 | return 0; |
1190 | } | 1202 | } |
@@ -1226,7 +1238,7 @@ static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) | |||
1226 | */ | 1238 | */ |
1227 | static void kmem_freepages(kmem_cache_t *cachep, void *addr) | 1239 | static void kmem_freepages(kmem_cache_t *cachep, void *addr) |
1228 | { | 1240 | { |
1229 | unsigned long i = (1<<cachep->gfporder); | 1241 | unsigned long i = (1 << cachep->gfporder); |
1230 | struct page *page = virt_to_page(addr); | 1242 | struct page *page = virt_to_page(addr); |
1231 | const unsigned long nr_freed = i; | 1243 | const unsigned long nr_freed = i; |
1232 | 1244 | ||
@@ -1239,13 +1251,13 @@ static void kmem_freepages(kmem_cache_t *cachep, void *addr) | |||
1239 | if (current->reclaim_state) | 1251 | if (current->reclaim_state) |
1240 | current->reclaim_state->reclaimed_slab += nr_freed; | 1252 | current->reclaim_state->reclaimed_slab += nr_freed; |
1241 | free_pages((unsigned long)addr, cachep->gfporder); | 1253 | free_pages((unsigned long)addr, cachep->gfporder); |
1242 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1254 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1243 | atomic_sub(1<<cachep->gfporder, &slab_reclaim_pages); | 1255 | atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages); |
1244 | } | 1256 | } |
1245 | 1257 | ||
1246 | static void kmem_rcu_free(struct rcu_head *head) | 1258 | static void kmem_rcu_free(struct rcu_head *head) |
1247 | { | 1259 | { |
1248 | struct slab_rcu *slab_rcu = (struct slab_rcu *) head; | 1260 | struct slab_rcu *slab_rcu = (struct slab_rcu *)head; |
1249 | kmem_cache_t *cachep = slab_rcu->cachep; | 1261 | kmem_cache_t *cachep = slab_rcu->cachep; |
1250 | 1262 | ||
1251 | kmem_freepages(cachep, slab_rcu->addr); | 1263 | kmem_freepages(cachep, slab_rcu->addr); |
@@ -1257,19 +1269,19 @@ static void kmem_rcu_free(struct rcu_head *head) | |||
1257 | 1269 | ||
1258 | #ifdef CONFIG_DEBUG_PAGEALLOC | 1270 | #ifdef CONFIG_DEBUG_PAGEALLOC |
1259 | static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, | 1271 | static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, |
1260 | unsigned long caller) | 1272 | unsigned long caller) |
1261 | { | 1273 | { |
1262 | int size = obj_reallen(cachep); | 1274 | int size = obj_reallen(cachep); |
1263 | 1275 | ||
1264 | addr = (unsigned long *)&((char*)addr)[obj_dbghead(cachep)]; | 1276 | addr = (unsigned long *)&((char *)addr)[obj_dbghead(cachep)]; |
1265 | 1277 | ||
1266 | if (size < 5*sizeof(unsigned long)) | 1278 | if (size < 5 * sizeof(unsigned long)) |
1267 | return; | 1279 | return; |
1268 | 1280 | ||
1269 | *addr++=0x12345678; | 1281 | *addr++ = 0x12345678; |
1270 | *addr++=caller; | 1282 | *addr++ = caller; |
1271 | *addr++=smp_processor_id(); | 1283 | *addr++ = smp_processor_id(); |
1272 | size -= 3*sizeof(unsigned long); | 1284 | size -= 3 * sizeof(unsigned long); |
1273 | { | 1285 | { |
1274 | unsigned long *sptr = &caller; | 1286 | unsigned long *sptr = &caller; |
1275 | unsigned long svalue; | 1287 | unsigned long svalue; |
@@ -1277,7 +1289,7 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, | |||
1277 | while (!kstack_end(sptr)) { | 1289 | while (!kstack_end(sptr)) { |
1278 | svalue = *sptr++; | 1290 | svalue = *sptr++; |
1279 | if (kernel_text_address(svalue)) { | 1291 | if (kernel_text_address(svalue)) { |
1280 | *addr++=svalue; | 1292 | *addr++ = svalue; |
1281 | size -= sizeof(unsigned long); | 1293 | size -= sizeof(unsigned long); |
1282 | if (size <= sizeof(unsigned long)) | 1294 | if (size <= sizeof(unsigned long)) |
1283 | break; | 1295 | break; |
@@ -1285,25 +1297,25 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, | |||
1285 | } | 1297 | } |
1286 | 1298 | ||
1287 | } | 1299 | } |
1288 | *addr++=0x87654321; | 1300 | *addr++ = 0x87654321; |
1289 | } | 1301 | } |
1290 | #endif | 1302 | #endif |
1291 | 1303 | ||
1292 | static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val) | 1304 | static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val) |
1293 | { | 1305 | { |
1294 | int size = obj_reallen(cachep); | 1306 | int size = obj_reallen(cachep); |
1295 | addr = &((char*)addr)[obj_dbghead(cachep)]; | 1307 | addr = &((char *)addr)[obj_dbghead(cachep)]; |
1296 | 1308 | ||
1297 | memset(addr, val, size); | 1309 | memset(addr, val, size); |
1298 | *(unsigned char *)(addr+size-1) = POISON_END; | 1310 | *(unsigned char *)(addr + size - 1) = POISON_END; |
1299 | } | 1311 | } |
1300 | 1312 | ||
1301 | static void dump_line(char *data, int offset, int limit) | 1313 | static void dump_line(char *data, int offset, int limit) |
1302 | { | 1314 | { |
1303 | int i; | 1315 | int i; |
1304 | printk(KERN_ERR "%03x:", offset); | 1316 | printk(KERN_ERR "%03x:", offset); |
1305 | for (i=0;i<limit;i++) { | 1317 | for (i = 0; i < limit; i++) { |
1306 | printk(" %02x", (unsigned char)data[offset+i]); | 1318 | printk(" %02x", (unsigned char)data[offset + i]); |
1307 | } | 1319 | } |
1308 | printk("\n"); | 1320 | printk("\n"); |
1309 | } | 1321 | } |
@@ -1318,24 +1330,24 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines) | |||
1318 | 1330 | ||
1319 | if (cachep->flags & SLAB_RED_ZONE) { | 1331 | if (cachep->flags & SLAB_RED_ZONE) { |
1320 | printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", | 1332 | printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", |
1321 | *dbg_redzone1(cachep, objp), | 1333 | *dbg_redzone1(cachep, objp), |
1322 | *dbg_redzone2(cachep, objp)); | 1334 | *dbg_redzone2(cachep, objp)); |
1323 | } | 1335 | } |
1324 | 1336 | ||
1325 | if (cachep->flags & SLAB_STORE_USER) { | 1337 | if (cachep->flags & SLAB_STORE_USER) { |
1326 | printk(KERN_ERR "Last user: [<%p>]", | 1338 | printk(KERN_ERR "Last user: [<%p>]", |
1327 | *dbg_userword(cachep, objp)); | 1339 | *dbg_userword(cachep, objp)); |
1328 | print_symbol("(%s)", | 1340 | print_symbol("(%s)", |
1329 | (unsigned long)*dbg_userword(cachep, objp)); | 1341 | (unsigned long)*dbg_userword(cachep, objp)); |
1330 | printk("\n"); | 1342 | printk("\n"); |
1331 | } | 1343 | } |
1332 | realobj = (char*)objp+obj_dbghead(cachep); | 1344 | realobj = (char *)objp + obj_dbghead(cachep); |
1333 | size = obj_reallen(cachep); | 1345 | size = obj_reallen(cachep); |
1334 | for (i=0; i<size && lines;i+=16, lines--) { | 1346 | for (i = 0; i < size && lines; i += 16, lines--) { |
1335 | int limit; | 1347 | int limit; |
1336 | limit = 16; | 1348 | limit = 16; |
1337 | if (i+limit > size) | 1349 | if (i + limit > size) |
1338 | limit = size-i; | 1350 | limit = size - i; |
1339 | dump_line(realobj, i, limit); | 1351 | dump_line(realobj, i, limit); |
1340 | } | 1352 | } |
1341 | } | 1353 | } |
@@ -1346,27 +1358,28 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) | |||
1346 | int size, i; | 1358 | int size, i; |
1347 | int lines = 0; | 1359 | int lines = 0; |
1348 | 1360 | ||
1349 | realobj = (char*)objp+obj_dbghead(cachep); | 1361 | realobj = (char *)objp + obj_dbghead(cachep); |
1350 | size = obj_reallen(cachep); | 1362 | size = obj_reallen(cachep); |
1351 | 1363 | ||
1352 | for (i=0;i<size;i++) { | 1364 | for (i = 0; i < size; i++) { |
1353 | char exp = POISON_FREE; | 1365 | char exp = POISON_FREE; |
1354 | if (i == size-1) | 1366 | if (i == size - 1) |
1355 | exp = POISON_END; | 1367 | exp = POISON_END; |
1356 | if (realobj[i] != exp) { | 1368 | if (realobj[i] != exp) { |
1357 | int limit; | 1369 | int limit; |
1358 | /* Mismatch ! */ | 1370 | /* Mismatch ! */ |
1359 | /* Print header */ | 1371 | /* Print header */ |
1360 | if (lines == 0) { | 1372 | if (lines == 0) { |
1361 | printk(KERN_ERR "Slab corruption: start=%p, len=%d\n", | 1373 | printk(KERN_ERR |
1362 | realobj, size); | 1374 | "Slab corruption: start=%p, len=%d\n", |
1375 | realobj, size); | ||
1363 | print_objinfo(cachep, objp, 0); | 1376 | print_objinfo(cachep, objp, 0); |
1364 | } | 1377 | } |
1365 | /* Hexdump the affected line */ | 1378 | /* Hexdump the affected line */ |
1366 | i = (i/16)*16; | 1379 | i = (i / 16) * 16; |
1367 | limit = 16; | 1380 | limit = 16; |
1368 | if (i+limit > size) | 1381 | if (i + limit > size) |
1369 | limit = size-i; | 1382 | limit = size - i; |
1370 | dump_line(realobj, i, limit); | 1383 | dump_line(realobj, i, limit); |
1371 | i += 16; | 1384 | i += 16; |
1372 | lines++; | 1385 | lines++; |
@@ -1382,19 +1395,19 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) | |||
1382 | struct slab *slabp = page_get_slab(virt_to_page(objp)); | 1395 | struct slab *slabp = page_get_slab(virt_to_page(objp)); |
1383 | int objnr; | 1396 | int objnr; |
1384 | 1397 | ||
1385 | objnr = (objp-slabp->s_mem)/cachep->objsize; | 1398 | objnr = (objp - slabp->s_mem) / cachep->objsize; |
1386 | if (objnr) { | 1399 | if (objnr) { |
1387 | objp = slabp->s_mem+(objnr-1)*cachep->objsize; | 1400 | objp = slabp->s_mem + (objnr - 1) * cachep->objsize; |
1388 | realobj = (char*)objp+obj_dbghead(cachep); | 1401 | realobj = (char *)objp + obj_dbghead(cachep); |
1389 | printk(KERN_ERR "Prev obj: start=%p, len=%d\n", | 1402 | printk(KERN_ERR "Prev obj: start=%p, len=%d\n", |
1390 | realobj, size); | 1403 | realobj, size); |
1391 | print_objinfo(cachep, objp, 2); | 1404 | print_objinfo(cachep, objp, 2); |
1392 | } | 1405 | } |
1393 | if (objnr+1 < cachep->num) { | 1406 | if (objnr + 1 < cachep->num) { |
1394 | objp = slabp->s_mem+(objnr+1)*cachep->objsize; | 1407 | objp = slabp->s_mem + (objnr + 1) * cachep->objsize; |
1395 | realobj = (char*)objp+obj_dbghead(cachep); | 1408 | realobj = (char *)objp + obj_dbghead(cachep); |
1396 | printk(KERN_ERR "Next obj: start=%p, len=%d\n", | 1409 | printk(KERN_ERR "Next obj: start=%p, len=%d\n", |
1397 | realobj, size); | 1410 | realobj, size); |
1398 | print_objinfo(cachep, objp, 2); | 1411 | print_objinfo(cachep, objp, 2); |
1399 | } | 1412 | } |
1400 | } | 1413 | } |
@@ -1405,7 +1418,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) | |||
1405 | * Before calling the slab must have been unlinked from the cache. | 1418 | * Before calling the slab must have been unlinked from the cache. |
1406 | * The cache-lock is not held/needed. | 1419 | * The cache-lock is not held/needed. |
1407 | */ | 1420 | */ |
1408 | static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp) | 1421 | static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp) |
1409 | { | 1422 | { |
1410 | void *addr = slabp->s_mem - slabp->colouroff; | 1423 | void *addr = slabp->s_mem - slabp->colouroff; |
1411 | 1424 | ||
@@ -1416,8 +1429,11 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp) | |||
1416 | 1429 | ||
1417 | if (cachep->flags & SLAB_POISON) { | 1430 | if (cachep->flags & SLAB_POISON) { |
1418 | #ifdef CONFIG_DEBUG_PAGEALLOC | 1431 | #ifdef CONFIG_DEBUG_PAGEALLOC |
1419 | if ((cachep->objsize%PAGE_SIZE)==0 && OFF_SLAB(cachep)) | 1432 | if ((cachep->objsize % PAGE_SIZE) == 0 |
1420 | kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE,1); | 1433 | && OFF_SLAB(cachep)) |
1434 | kernel_map_pages(virt_to_page(objp), | ||
1435 | cachep->objsize / PAGE_SIZE, | ||
1436 | 1); | ||
1421 | else | 1437 | else |
1422 | check_poison_obj(cachep, objp); | 1438 | check_poison_obj(cachep, objp); |
1423 | #else | 1439 | #else |
@@ -1427,20 +1443,20 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp) | |||
1427 | if (cachep->flags & SLAB_RED_ZONE) { | 1443 | if (cachep->flags & SLAB_RED_ZONE) { |
1428 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) | 1444 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) |
1429 | slab_error(cachep, "start of a freed object " | 1445 | slab_error(cachep, "start of a freed object " |
1430 | "was overwritten"); | 1446 | "was overwritten"); |
1431 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) | 1447 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) |
1432 | slab_error(cachep, "end of a freed object " | 1448 | slab_error(cachep, "end of a freed object " |
1433 | "was overwritten"); | 1449 | "was overwritten"); |
1434 | } | 1450 | } |
1435 | if (cachep->dtor && !(cachep->flags & SLAB_POISON)) | 1451 | if (cachep->dtor && !(cachep->flags & SLAB_POISON)) |
1436 | (cachep->dtor)(objp+obj_dbghead(cachep), cachep, 0); | 1452 | (cachep->dtor) (objp + obj_dbghead(cachep), cachep, 0); |
1437 | } | 1453 | } |
1438 | #else | 1454 | #else |
1439 | if (cachep->dtor) { | 1455 | if (cachep->dtor) { |
1440 | int i; | 1456 | int i; |
1441 | for (i = 0; i < cachep->num; i++) { | 1457 | for (i = 0; i < cachep->num; i++) { |
1442 | void* objp = slabp->s_mem+cachep->objsize*i; | 1458 | void *objp = slabp->s_mem + cachep->objsize * i; |
1443 | (cachep->dtor)(objp, cachep, 0); | 1459 | (cachep->dtor) (objp, cachep, 0); |
1444 | } | 1460 | } |
1445 | } | 1461 | } |
1446 | #endif | 1462 | #endif |
@@ -1448,7 +1464,7 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp) | |||
1448 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { | 1464 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { |
1449 | struct slab_rcu *slab_rcu; | 1465 | struct slab_rcu *slab_rcu; |
1450 | 1466 | ||
1451 | slab_rcu = (struct slab_rcu *) slabp; | 1467 | slab_rcu = (struct slab_rcu *)slabp; |
1452 | slab_rcu->cachep = cachep; | 1468 | slab_rcu->cachep = cachep; |
1453 | slab_rcu->addr = addr; | 1469 | slab_rcu->addr = addr; |
1454 | call_rcu(&slab_rcu->head, kmem_rcu_free); | 1470 | call_rcu(&slab_rcu->head, kmem_rcu_free); |
@@ -1466,11 +1482,58 @@ static inline void set_up_list3s(kmem_cache_t *cachep, int index) | |||
1466 | int node; | 1482 | int node; |
1467 | 1483 | ||
1468 | for_each_online_node(node) { | 1484 | for_each_online_node(node) { |
1469 | cachep->nodelists[node] = &initkmem_list3[index+node]; | 1485 | cachep->nodelists[node] = &initkmem_list3[index + node]; |
1470 | cachep->nodelists[node]->next_reap = jiffies + | 1486 | cachep->nodelists[node]->next_reap = jiffies + |
1471 | REAPTIMEOUT_LIST3 + | 1487 | REAPTIMEOUT_LIST3 + |
1472 | ((unsigned long)cachep)%REAPTIMEOUT_LIST3; | 1488 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; |
1489 | } | ||
1490 | } | ||
1491 | |||
1492 | /** | ||
1493 | * calculate_slab_order - calculate size (page order) of slabs and the number | ||
1494 | * of objects per slab. | ||
1495 | * | ||
1496 | * This could be made much more intelligent. For now, try to avoid using | ||
1497 | * high order pages for slabs. When the gfp() functions are more friendly | ||
1498 | * towards high-order requests, this should be changed. | ||
1499 | */ | ||
1500 | static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size, | ||
1501 | size_t align, gfp_t flags) | ||
1502 | { | ||
1503 | size_t left_over = 0; | ||
1504 | |||
1505 | for (;; cachep->gfporder++) { | ||
1506 | unsigned int num; | ||
1507 | size_t remainder; | ||
1508 | |||
1509 | if (cachep->gfporder > MAX_GFP_ORDER) { | ||
1510 | cachep->num = 0; | ||
1511 | break; | ||
1512 | } | ||
1513 | |||
1514 | cache_estimate(cachep->gfporder, size, align, flags, | ||
1515 | &remainder, &num); | ||
1516 | if (!num) | ||
1517 | continue; | ||
1518 | /* More than offslab_limit objects will cause problems */ | ||
1519 | if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit) | ||
1520 | break; | ||
1521 | |||
1522 | cachep->num = num; | ||
1523 | left_over = remainder; | ||
1524 | |||
1525 | /* | ||
1526 | * Large number of objects is good, but very large slabs are | ||
1527 | * currently bad for the gfp()s. | ||
1528 | */ | ||
1529 | if (cachep->gfporder >= slab_break_gfp_order) | ||
1530 | break; | ||
1531 | |||
1532 | if ((left_over * 8) <= (PAGE_SIZE << cachep->gfporder)) | ||
1533 | /* Acceptable internal fragmentation */ | ||
1534 | break; | ||
1473 | } | 1535 | } |
1536 | return left_over; | ||
1474 | } | 1537 | } |
1475 | 1538 | ||
1476 | /** | 1539 | /** |
@@ -1519,14 +1582,13 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
1519 | * Sanity checks... these are all serious usage bugs. | 1582 | * Sanity checks... these are all serious usage bugs. |
1520 | */ | 1583 | */ |
1521 | if ((!name) || | 1584 | if ((!name) || |
1522 | in_interrupt() || | 1585 | in_interrupt() || |
1523 | (size < BYTES_PER_WORD) || | 1586 | (size < BYTES_PER_WORD) || |
1524 | (size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) || | 1587 | (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { |
1525 | (dtor && !ctor)) { | 1588 | printk(KERN_ERR "%s: Early error in slab %s\n", |
1526 | printk(KERN_ERR "%s: Early error in slab %s\n", | 1589 | __FUNCTION__, name); |
1527 | __FUNCTION__, name); | 1590 | BUG(); |
1528 | BUG(); | 1591 | } |
1529 | } | ||
1530 | 1592 | ||
1531 | down(&cache_chain_sem); | 1593 | down(&cache_chain_sem); |
1532 | 1594 | ||
@@ -1546,11 +1608,11 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
1546 | set_fs(old_fs); | 1608 | set_fs(old_fs); |
1547 | if (res) { | 1609 | if (res) { |
1548 | printk("SLAB: cache with size %d has lost its name\n", | 1610 | printk("SLAB: cache with size %d has lost its name\n", |
1549 | pc->objsize); | 1611 | pc->objsize); |
1550 | continue; | 1612 | continue; |
1551 | } | 1613 | } |
1552 | 1614 | ||
1553 | if (!strcmp(pc->name,name)) { | 1615 | if (!strcmp(pc->name, name)) { |
1554 | printk("kmem_cache_create: duplicate cache %s\n", name); | 1616 | printk("kmem_cache_create: duplicate cache %s\n", name); |
1555 | dump_stack(); | 1617 | dump_stack(); |
1556 | goto oops; | 1618 | goto oops; |
@@ -1562,10 +1624,9 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
1562 | if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { | 1624 | if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { |
1563 | /* No constructor, but inital state check requested */ | 1625 | /* No constructor, but inital state check requested */ |
1564 | printk(KERN_ERR "%s: No con, but init state check " | 1626 | printk(KERN_ERR "%s: No con, but init state check " |
1565 | "requested - %s\n", __FUNCTION__, name); | 1627 | "requested - %s\n", __FUNCTION__, name); |
1566 | flags &= ~SLAB_DEBUG_INITIAL; | 1628 | flags &= ~SLAB_DEBUG_INITIAL; |
1567 | } | 1629 | } |
1568 | |||
1569 | #if FORCED_DEBUG | 1630 | #if FORCED_DEBUG |
1570 | /* | 1631 | /* |
1571 | * Enable redzoning and last user accounting, except for caches with | 1632 | * Enable redzoning and last user accounting, except for caches with |
@@ -1573,8 +1634,9 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
1573 | * above the next power of two: caches with object sizes just above a | 1634 | * above the next power of two: caches with object sizes just above a |
1574 | * power of two have a significant amount of internal fragmentation. | 1635 | * power of two have a significant amount of internal fragmentation. |
1575 | */ | 1636 | */ |
1576 | if ((size < 4096 || fls(size-1) == fls(size-1+3*BYTES_PER_WORD))) | 1637 | if ((size < 4096 |
1577 | flags |= SLAB_RED_ZONE|SLAB_STORE_USER; | 1638 | || fls(size - 1) == fls(size - 1 + 3 * BYTES_PER_WORD))) |
1639 | flags |= SLAB_RED_ZONE | SLAB_STORE_USER; | ||
1578 | if (!(flags & SLAB_DESTROY_BY_RCU)) | 1640 | if (!(flags & SLAB_DESTROY_BY_RCU)) |
1579 | flags |= SLAB_POISON; | 1641 | flags |= SLAB_POISON; |
1580 | #endif | 1642 | #endif |
@@ -1595,9 +1657,9 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
1595 | * unaligned accesses for some archs when redzoning is used, and makes | 1657 | * unaligned accesses for some archs when redzoning is used, and makes |
1596 | * sure any on-slab bufctl's are also correctly aligned. | 1658 | * sure any on-slab bufctl's are also correctly aligned. |
1597 | */ | 1659 | */ |
1598 | if (size & (BYTES_PER_WORD-1)) { | 1660 | if (size & (BYTES_PER_WORD - 1)) { |
1599 | size += (BYTES_PER_WORD-1); | 1661 | size += (BYTES_PER_WORD - 1); |
1600 | size &= ~(BYTES_PER_WORD-1); | 1662 | size &= ~(BYTES_PER_WORD - 1); |
1601 | } | 1663 | } |
1602 | 1664 | ||
1603 | /* calculate out the final buffer alignment: */ | 1665 | /* calculate out the final buffer alignment: */ |
@@ -1608,7 +1670,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
1608 | * objects into one cacheline. | 1670 | * objects into one cacheline. |
1609 | */ | 1671 | */ |
1610 | ralign = cache_line_size(); | 1672 | ralign = cache_line_size(); |
1611 | while (size <= ralign/2) | 1673 | while (size <= ralign / 2) |
1612 | ralign /= 2; | 1674 | ralign /= 2; |
1613 | } else { | 1675 | } else { |
1614 | ralign = BYTES_PER_WORD; | 1676 | ralign = BYTES_PER_WORD; |
@@ -1617,13 +1679,13 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
1617 | if (ralign < ARCH_SLAB_MINALIGN) { | 1679 | if (ralign < ARCH_SLAB_MINALIGN) { |
1618 | ralign = ARCH_SLAB_MINALIGN; | 1680 | ralign = ARCH_SLAB_MINALIGN; |
1619 | if (ralign > BYTES_PER_WORD) | 1681 | if (ralign > BYTES_PER_WORD) |
1620 | flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER); | 1682 | flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); |
1621 | } | 1683 | } |
1622 | /* 3) caller mandated alignment: disables debug if necessary */ | 1684 | /* 3) caller mandated alignment: disables debug if necessary */ |
1623 | if (ralign < align) { | 1685 | if (ralign < align) { |
1624 | ralign = align; | 1686 | ralign = align; |
1625 | if (ralign > BYTES_PER_WORD) | 1687 | if (ralign > BYTES_PER_WORD) |
1626 | flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER); | 1688 | flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); |
1627 | } | 1689 | } |
1628 | /* 4) Store it. Note that the debug code below can reduce | 1690 | /* 4) Store it. Note that the debug code below can reduce |
1629 | * the alignment to BYTES_PER_WORD. | 1691 | * the alignment to BYTES_PER_WORD. |
@@ -1645,7 +1707,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
1645 | 1707 | ||
1646 | /* add space for red zone words */ | 1708 | /* add space for red zone words */ |
1647 | cachep->dbghead += BYTES_PER_WORD; | 1709 | cachep->dbghead += BYTES_PER_WORD; |
1648 | size += 2*BYTES_PER_WORD; | 1710 | size += 2 * BYTES_PER_WORD; |
1649 | } | 1711 | } |
1650 | if (flags & SLAB_STORE_USER) { | 1712 | if (flags & SLAB_STORE_USER) { |
1651 | /* user store requires word alignment and | 1713 | /* user store requires word alignment and |
@@ -1656,7 +1718,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
1656 | size += BYTES_PER_WORD; | 1718 | size += BYTES_PER_WORD; |
1657 | } | 1719 | } |
1658 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 1720 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) |
1659 | if (size >= malloc_sizes[INDEX_L3+1].cs_size && cachep->reallen > cache_line_size() && size < PAGE_SIZE) { | 1721 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size |
1722 | && cachep->reallen > cache_line_size() && size < PAGE_SIZE) { | ||
1660 | cachep->dbghead += PAGE_SIZE - size; | 1723 | cachep->dbghead += PAGE_SIZE - size; |
1661 | size = PAGE_SIZE; | 1724 | size = PAGE_SIZE; |
1662 | } | 1725 | } |
@@ -1664,7 +1727,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
1664 | #endif | 1727 | #endif |
1665 | 1728 | ||
1666 | /* Determine if the slab management is 'on' or 'off' slab. */ | 1729 | /* Determine if the slab management is 'on' or 'off' slab. */ |
1667 | if (size >= (PAGE_SIZE>>3)) | 1730 | if (size >= (PAGE_SIZE >> 3)) |
1668 | /* | 1731 | /* |
1669 | * Size is large, assume best to place the slab management obj | 1732 | * Size is large, assume best to place the slab management obj |
1670 | * off-slab (should allow better packing of objs). | 1733 | * off-slab (should allow better packing of objs). |
@@ -1681,47 +1744,9 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
1681 | */ | 1744 | */ |
1682 | cachep->gfporder = 0; | 1745 | cachep->gfporder = 0; |
1683 | cache_estimate(cachep->gfporder, size, align, flags, | 1746 | cache_estimate(cachep->gfporder, size, align, flags, |
1684 | &left_over, &cachep->num); | 1747 | &left_over, &cachep->num); |
1685 | } else { | 1748 | } else |
1686 | /* | 1749 | left_over = calculate_slab_order(cachep, size, align, flags); |
1687 | * Calculate size (in pages) of slabs, and the num of objs per | ||
1688 | * slab. This could be made much more intelligent. For now, | ||
1689 | * try to avoid using high page-orders for slabs. When the | ||
1690 | * gfp() funcs are more friendly towards high-order requests, | ||
1691 | * this should be changed. | ||
1692 | */ | ||
1693 | do { | ||
1694 | unsigned int break_flag = 0; | ||
1695 | cal_wastage: | ||
1696 | cache_estimate(cachep->gfporder, size, align, flags, | ||
1697 | &left_over, &cachep->num); | ||
1698 | if (break_flag) | ||
1699 | break; | ||
1700 | if (cachep->gfporder >= MAX_GFP_ORDER) | ||
1701 | break; | ||
1702 | if (!cachep->num) | ||
1703 | goto next; | ||
1704 | if (flags & CFLGS_OFF_SLAB && | ||
1705 | cachep->num > offslab_limit) { | ||
1706 | /* This num of objs will cause problems. */ | ||
1707 | cachep->gfporder--; | ||
1708 | break_flag++; | ||
1709 | goto cal_wastage; | ||
1710 | } | ||
1711 | |||
1712 | /* | ||
1713 | * Large num of objs is good, but v. large slabs are | ||
1714 | * currently bad for the gfp()s. | ||
1715 | */ | ||
1716 | if (cachep->gfporder >= slab_break_gfp_order) | ||
1717 | break; | ||
1718 | |||
1719 | if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder)) | ||
1720 | break; /* Acceptable internal fragmentation. */ | ||
1721 | next: | ||
1722 | cachep->gfporder++; | ||
1723 | } while (1); | ||
1724 | } | ||
1725 | 1750 | ||
1726 | if (!cachep->num) { | 1751 | if (!cachep->num) { |
1727 | printk("kmem_cache_create: couldn't create cache %s.\n", name); | 1752 | printk("kmem_cache_create: couldn't create cache %s.\n", name); |
@@ -1729,8 +1754,8 @@ next: | |||
1729 | cachep = NULL; | 1754 | cachep = NULL; |
1730 | goto oops; | 1755 | goto oops; |
1731 | } | 1756 | } |
1732 | slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t) | 1757 | slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) |
1733 | + sizeof(struct slab), align); | 1758 | + sizeof(struct slab), align); |
1734 | 1759 | ||
1735 | /* | 1760 | /* |
1736 | * If the slab has been placed off-slab, and we have enough space then | 1761 | * If the slab has been placed off-slab, and we have enough space then |
@@ -1743,14 +1768,15 @@ next: | |||
1743 | 1768 | ||
1744 | if (flags & CFLGS_OFF_SLAB) { | 1769 | if (flags & CFLGS_OFF_SLAB) { |
1745 | /* really off slab. No need for manual alignment */ | 1770 | /* really off slab. No need for manual alignment */ |
1746 | slab_size = cachep->num*sizeof(kmem_bufctl_t)+sizeof(struct slab); | 1771 | slab_size = |
1772 | cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); | ||
1747 | } | 1773 | } |
1748 | 1774 | ||
1749 | cachep->colour_off = cache_line_size(); | 1775 | cachep->colour_off = cache_line_size(); |
1750 | /* Offset must be a multiple of the alignment. */ | 1776 | /* Offset must be a multiple of the alignment. */ |
1751 | if (cachep->colour_off < align) | 1777 | if (cachep->colour_off < align) |
1752 | cachep->colour_off = align; | 1778 | cachep->colour_off = align; |
1753 | cachep->colour = left_over/cachep->colour_off; | 1779 | cachep->colour = left_over / cachep->colour_off; |
1754 | cachep->slab_size = slab_size; | 1780 | cachep->slab_size = slab_size; |
1755 | cachep->flags = flags; | 1781 | cachep->flags = flags; |
1756 | cachep->gfpflags = 0; | 1782 | cachep->gfpflags = 0; |
@@ -1777,7 +1803,7 @@ next: | |||
1777 | * the creation of further caches will BUG(). | 1803 | * the creation of further caches will BUG(). |
1778 | */ | 1804 | */ |
1779 | cachep->array[smp_processor_id()] = | 1805 | cachep->array[smp_processor_id()] = |
1780 | &initarray_generic.cache; | 1806 | &initarray_generic.cache; |
1781 | 1807 | ||
1782 | /* If the cache that's used by | 1808 | /* If the cache that's used by |
1783 | * kmalloc(sizeof(kmem_list3)) is the first cache, | 1809 | * kmalloc(sizeof(kmem_list3)) is the first cache, |
@@ -1791,8 +1817,7 @@ next: | |||
1791 | g_cpucache_up = PARTIAL_AC; | 1817 | g_cpucache_up = PARTIAL_AC; |
1792 | } else { | 1818 | } else { |
1793 | cachep->array[smp_processor_id()] = | 1819 | cachep->array[smp_processor_id()] = |
1794 | kmalloc(sizeof(struct arraycache_init), | 1820 | kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); |
1795 | GFP_KERNEL); | ||
1796 | 1821 | ||
1797 | if (g_cpucache_up == PARTIAL_AC) { | 1822 | if (g_cpucache_up == PARTIAL_AC) { |
1798 | set_up_list3s(cachep, SIZE_L3); | 1823 | set_up_list3s(cachep, SIZE_L3); |
@@ -1802,16 +1827,18 @@ next: | |||
1802 | for_each_online_node(node) { | 1827 | for_each_online_node(node) { |
1803 | 1828 | ||
1804 | cachep->nodelists[node] = | 1829 | cachep->nodelists[node] = |
1805 | kmalloc_node(sizeof(struct kmem_list3), | 1830 | kmalloc_node(sizeof |
1806 | GFP_KERNEL, node); | 1831 | (struct kmem_list3), |
1832 | GFP_KERNEL, node); | ||
1807 | BUG_ON(!cachep->nodelists[node]); | 1833 | BUG_ON(!cachep->nodelists[node]); |
1808 | kmem_list3_init(cachep->nodelists[node]); | 1834 | kmem_list3_init(cachep-> |
1835 | nodelists[node]); | ||
1809 | } | 1836 | } |
1810 | } | 1837 | } |
1811 | } | 1838 | } |
1812 | cachep->nodelists[numa_node_id()]->next_reap = | 1839 | cachep->nodelists[numa_node_id()]->next_reap = |
1813 | jiffies + REAPTIMEOUT_LIST3 + | 1840 | jiffies + REAPTIMEOUT_LIST3 + |
1814 | ((unsigned long)cachep)%REAPTIMEOUT_LIST3; | 1841 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; |
1815 | 1842 | ||
1816 | BUG_ON(!ac_data(cachep)); | 1843 | BUG_ON(!ac_data(cachep)); |
1817 | ac_data(cachep)->avail = 0; | 1844 | ac_data(cachep)->avail = 0; |
@@ -1820,15 +1847,15 @@ next: | |||
1820 | ac_data(cachep)->touched = 0; | 1847 | ac_data(cachep)->touched = 0; |
1821 | cachep->batchcount = 1; | 1848 | cachep->batchcount = 1; |
1822 | cachep->limit = BOOT_CPUCACHE_ENTRIES; | 1849 | cachep->limit = BOOT_CPUCACHE_ENTRIES; |
1823 | } | 1850 | } |
1824 | 1851 | ||
1825 | /* cache setup completed, link it into the list */ | 1852 | /* cache setup completed, link it into the list */ |
1826 | list_add(&cachep->next, &cache_chain); | 1853 | list_add(&cachep->next, &cache_chain); |
1827 | unlock_cpu_hotplug(); | 1854 | unlock_cpu_hotplug(); |
1828 | oops: | 1855 | oops: |
1829 | if (!cachep && (flags & SLAB_PANIC)) | 1856 | if (!cachep && (flags & SLAB_PANIC)) |
1830 | panic("kmem_cache_create(): failed to create slab `%s'\n", | 1857 | panic("kmem_cache_create(): failed to create slab `%s'\n", |
1831 | name); | 1858 | name); |
1832 | up(&cache_chain_sem); | 1859 | up(&cache_chain_sem); |
1833 | return cachep; | 1860 | return cachep; |
1834 | } | 1861 | } |
@@ -1871,7 +1898,7 @@ static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node) | |||
1871 | /* | 1898 | /* |
1872 | * Waits for all CPUs to execute func(). | 1899 | * Waits for all CPUs to execute func(). |
1873 | */ | 1900 | */ |
1874 | static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg) | 1901 | static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg) |
1875 | { | 1902 | { |
1876 | check_irq_on(); | 1903 | check_irq_on(); |
1877 | preempt_disable(); | 1904 | preempt_disable(); |
@@ -1886,12 +1913,12 @@ static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg) | |||
1886 | preempt_enable(); | 1913 | preempt_enable(); |
1887 | } | 1914 | } |
1888 | 1915 | ||
1889 | static void drain_array_locked(kmem_cache_t* cachep, | 1916 | static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac, |
1890 | struct array_cache *ac, int force, int node); | 1917 | int force, int node); |
1891 | 1918 | ||
1892 | static void do_drain(void *arg) | 1919 | static void do_drain(void *arg) |
1893 | { | 1920 | { |
1894 | kmem_cache_t *cachep = (kmem_cache_t*)arg; | 1921 | kmem_cache_t *cachep = (kmem_cache_t *) arg; |
1895 | struct array_cache *ac; | 1922 | struct array_cache *ac; |
1896 | int node = numa_node_id(); | 1923 | int node = numa_node_id(); |
1897 | 1924 | ||
@@ -1911,7 +1938,7 @@ static void drain_cpu_caches(kmem_cache_t *cachep) | |||
1911 | smp_call_function_all_cpus(do_drain, cachep); | 1938 | smp_call_function_all_cpus(do_drain, cachep); |
1912 | check_irq_on(); | 1939 | check_irq_on(); |
1913 | spin_lock_irq(&cachep->spinlock); | 1940 | spin_lock_irq(&cachep->spinlock); |
1914 | for_each_online_node(node) { | 1941 | for_each_online_node(node) { |
1915 | l3 = cachep->nodelists[node]; | 1942 | l3 = cachep->nodelists[node]; |
1916 | if (l3) { | 1943 | if (l3) { |
1917 | spin_lock(&l3->list_lock); | 1944 | spin_lock(&l3->list_lock); |
@@ -1949,8 +1976,7 @@ static int __node_shrink(kmem_cache_t *cachep, int node) | |||
1949 | slab_destroy(cachep, slabp); | 1976 | slab_destroy(cachep, slabp); |
1950 | spin_lock_irq(&l3->list_lock); | 1977 | spin_lock_irq(&l3->list_lock); |
1951 | } | 1978 | } |
1952 | ret = !list_empty(&l3->slabs_full) || | 1979 | ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial); |
1953 | !list_empty(&l3->slabs_partial); | ||
1954 | return ret; | 1980 | return ret; |
1955 | } | 1981 | } |
1956 | 1982 | ||
@@ -2006,7 +2032,7 @@ EXPORT_SYMBOL(kmem_cache_shrink); | |||
2006 | * The caller must guarantee that noone will allocate memory from the cache | 2032 | * The caller must guarantee that noone will allocate memory from the cache |
2007 | * during the kmem_cache_destroy(). | 2033 | * during the kmem_cache_destroy(). |
2008 | */ | 2034 | */ |
2009 | int kmem_cache_destroy(kmem_cache_t * cachep) | 2035 | int kmem_cache_destroy(kmem_cache_t *cachep) |
2010 | { | 2036 | { |
2011 | int i; | 2037 | int i; |
2012 | struct kmem_list3 *l3; | 2038 | struct kmem_list3 *l3; |
@@ -2028,7 +2054,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep) | |||
2028 | if (__cache_shrink(cachep)) { | 2054 | if (__cache_shrink(cachep)) { |
2029 | slab_error(cachep, "Can't free all objects"); | 2055 | slab_error(cachep, "Can't free all objects"); |
2030 | down(&cache_chain_sem); | 2056 | down(&cache_chain_sem); |
2031 | list_add(&cachep->next,&cache_chain); | 2057 | list_add(&cachep->next, &cache_chain); |
2032 | up(&cache_chain_sem); | 2058 | up(&cache_chain_sem); |
2033 | unlock_cpu_hotplug(); | 2059 | unlock_cpu_hotplug(); |
2034 | return 1; | 2060 | return 1; |
@@ -2038,7 +2064,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep) | |||
2038 | synchronize_rcu(); | 2064 | synchronize_rcu(); |
2039 | 2065 | ||
2040 | for_each_online_cpu(i) | 2066 | for_each_online_cpu(i) |
2041 | kfree(cachep->array[i]); | 2067 | kfree(cachep->array[i]); |
2042 | 2068 | ||
2043 | /* NUMA: free the list3 structures */ | 2069 | /* NUMA: free the list3 structures */ |
2044 | for_each_online_node(i) { | 2070 | for_each_online_node(i) { |
@@ -2057,39 +2083,39 @@ int kmem_cache_destroy(kmem_cache_t * cachep) | |||
2057 | EXPORT_SYMBOL(kmem_cache_destroy); | 2083 | EXPORT_SYMBOL(kmem_cache_destroy); |
2058 | 2084 | ||
2059 | /* Get the memory for a slab management obj. */ | 2085 | /* Get the memory for a slab management obj. */ |
2060 | static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, | 2086 | static struct slab *alloc_slabmgmt(kmem_cache_t *cachep, void *objp, |
2061 | int colour_off, gfp_t local_flags) | 2087 | int colour_off, gfp_t local_flags) |
2062 | { | 2088 | { |
2063 | struct slab *slabp; | 2089 | struct slab *slabp; |
2064 | 2090 | ||
2065 | if (OFF_SLAB(cachep)) { | 2091 | if (OFF_SLAB(cachep)) { |
2066 | /* Slab management obj is off-slab. */ | 2092 | /* Slab management obj is off-slab. */ |
2067 | slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags); | 2093 | slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags); |
2068 | if (!slabp) | 2094 | if (!slabp) |
2069 | return NULL; | 2095 | return NULL; |
2070 | } else { | 2096 | } else { |
2071 | slabp = objp+colour_off; | 2097 | slabp = objp + colour_off; |
2072 | colour_off += cachep->slab_size; | 2098 | colour_off += cachep->slab_size; |
2073 | } | 2099 | } |
2074 | slabp->inuse = 0; | 2100 | slabp->inuse = 0; |
2075 | slabp->colouroff = colour_off; | 2101 | slabp->colouroff = colour_off; |
2076 | slabp->s_mem = objp+colour_off; | 2102 | slabp->s_mem = objp + colour_off; |
2077 | 2103 | ||
2078 | return slabp; | 2104 | return slabp; |
2079 | } | 2105 | } |
2080 | 2106 | ||
2081 | static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) | 2107 | static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) |
2082 | { | 2108 | { |
2083 | return (kmem_bufctl_t *)(slabp+1); | 2109 | return (kmem_bufctl_t *) (slabp + 1); |
2084 | } | 2110 | } |
2085 | 2111 | ||
2086 | static void cache_init_objs(kmem_cache_t *cachep, | 2112 | static void cache_init_objs(kmem_cache_t *cachep, |
2087 | struct slab *slabp, unsigned long ctor_flags) | 2113 | struct slab *slabp, unsigned long ctor_flags) |
2088 | { | 2114 | { |
2089 | int i; | 2115 | int i; |
2090 | 2116 | ||
2091 | for (i = 0; i < cachep->num; i++) { | 2117 | for (i = 0; i < cachep->num; i++) { |
2092 | void *objp = slabp->s_mem+cachep->objsize*i; | 2118 | void *objp = slabp->s_mem + cachep->objsize * i; |
2093 | #if DEBUG | 2119 | #if DEBUG |
2094 | /* need to poison the objs? */ | 2120 | /* need to poison the objs? */ |
2095 | if (cachep->flags & SLAB_POISON) | 2121 | if (cachep->flags & SLAB_POISON) |
@@ -2107,25 +2133,28 @@ static void cache_init_objs(kmem_cache_t *cachep, | |||
2107 | * Otherwise, deadlock. They must also be threaded. | 2133 | * Otherwise, deadlock. They must also be threaded. |
2108 | */ | 2134 | */ |
2109 | if (cachep->ctor && !(cachep->flags & SLAB_POISON)) | 2135 | if (cachep->ctor && !(cachep->flags & SLAB_POISON)) |
2110 | cachep->ctor(objp+obj_dbghead(cachep), cachep, ctor_flags); | 2136 | cachep->ctor(objp + obj_dbghead(cachep), cachep, |
2137 | ctor_flags); | ||
2111 | 2138 | ||
2112 | if (cachep->flags & SLAB_RED_ZONE) { | 2139 | if (cachep->flags & SLAB_RED_ZONE) { |
2113 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) | 2140 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) |
2114 | slab_error(cachep, "constructor overwrote the" | 2141 | slab_error(cachep, "constructor overwrote the" |
2115 | " end of an object"); | 2142 | " end of an object"); |
2116 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) | 2143 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) |
2117 | slab_error(cachep, "constructor overwrote the" | 2144 | slab_error(cachep, "constructor overwrote the" |
2118 | " start of an object"); | 2145 | " start of an object"); |
2119 | } | 2146 | } |
2120 | if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) | 2147 | if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep) |
2121 | kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0); | 2148 | && cachep->flags & SLAB_POISON) |
2149 | kernel_map_pages(virt_to_page(objp), | ||
2150 | cachep->objsize / PAGE_SIZE, 0); | ||
2122 | #else | 2151 | #else |
2123 | if (cachep->ctor) | 2152 | if (cachep->ctor) |
2124 | cachep->ctor(objp, cachep, ctor_flags); | 2153 | cachep->ctor(objp, cachep, ctor_flags); |
2125 | #endif | 2154 | #endif |
2126 | slab_bufctl(slabp)[i] = i+1; | 2155 | slab_bufctl(slabp)[i] = i + 1; |
2127 | } | 2156 | } |
2128 | slab_bufctl(slabp)[i-1] = BUFCTL_END; | 2157 | slab_bufctl(slabp)[i - 1] = BUFCTL_END; |
2129 | slabp->free = 0; | 2158 | slabp->free = 0; |
2130 | } | 2159 | } |
2131 | 2160 | ||
@@ -2161,17 +2190,17 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) | |||
2161 | */ | 2190 | */ |
2162 | static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) | 2191 | static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) |
2163 | { | 2192 | { |
2164 | struct slab *slabp; | 2193 | struct slab *slabp; |
2165 | void *objp; | 2194 | void *objp; |
2166 | size_t offset; | 2195 | size_t offset; |
2167 | gfp_t local_flags; | 2196 | gfp_t local_flags; |
2168 | unsigned long ctor_flags; | 2197 | unsigned long ctor_flags; |
2169 | struct kmem_list3 *l3; | 2198 | struct kmem_list3 *l3; |
2170 | 2199 | ||
2171 | /* Be lazy and only check for valid flags here, | 2200 | /* Be lazy and only check for valid flags here, |
2172 | * keeping it out of the critical path in kmem_cache_alloc(). | 2201 | * keeping it out of the critical path in kmem_cache_alloc(). |
2173 | */ | 2202 | */ |
2174 | if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW)) | 2203 | if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)) |
2175 | BUG(); | 2204 | BUG(); |
2176 | if (flags & SLAB_NO_GROW) | 2205 | if (flags & SLAB_NO_GROW) |
2177 | return 0; | 2206 | return 0; |
@@ -2237,9 +2266,9 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) | |||
2237 | l3->free_objects += cachep->num; | 2266 | l3->free_objects += cachep->num; |
2238 | spin_unlock(&l3->list_lock); | 2267 | spin_unlock(&l3->list_lock); |
2239 | return 1; | 2268 | return 1; |
2240 | opps1: | 2269 | opps1: |
2241 | kmem_freepages(cachep, objp); | 2270 | kmem_freepages(cachep, objp); |
2242 | failed: | 2271 | failed: |
2243 | if (local_flags & __GFP_WAIT) | 2272 | if (local_flags & __GFP_WAIT) |
2244 | local_irq_disable(); | 2273 | local_irq_disable(); |
2245 | return 0; | 2274 | return 0; |
@@ -2259,18 +2288,19 @@ static void kfree_debugcheck(const void *objp) | |||
2259 | 2288 | ||
2260 | if (!virt_addr_valid(objp)) { | 2289 | if (!virt_addr_valid(objp)) { |
2261 | printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", | 2290 | printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", |
2262 | (unsigned long)objp); | 2291 | (unsigned long)objp); |
2263 | BUG(); | 2292 | BUG(); |
2264 | } | 2293 | } |
2265 | page = virt_to_page(objp); | 2294 | page = virt_to_page(objp); |
2266 | if (!PageSlab(page)) { | 2295 | if (!PageSlab(page)) { |
2267 | printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", (unsigned long)objp); | 2296 | printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", |
2297 | (unsigned long)objp); | ||
2268 | BUG(); | 2298 | BUG(); |
2269 | } | 2299 | } |
2270 | } | 2300 | } |
2271 | 2301 | ||
2272 | static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, | 2302 | static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, |
2273 | void *caller) | 2303 | void *caller) |
2274 | { | 2304 | { |
2275 | struct page *page; | 2305 | struct page *page; |
2276 | unsigned int objnr; | 2306 | unsigned int objnr; |
@@ -2281,20 +2311,26 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, | |||
2281 | page = virt_to_page(objp); | 2311 | page = virt_to_page(objp); |
2282 | 2312 | ||
2283 | if (page_get_cache(page) != cachep) { | 2313 | if (page_get_cache(page) != cachep) { |
2284 | printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n", | 2314 | printk(KERN_ERR |
2285 | page_get_cache(page),cachep); | 2315 | "mismatch in kmem_cache_free: expected cache %p, got %p\n", |
2316 | page_get_cache(page), cachep); | ||
2286 | printk(KERN_ERR "%p is %s.\n", cachep, cachep->name); | 2317 | printk(KERN_ERR "%p is %s.\n", cachep, cachep->name); |
2287 | printk(KERN_ERR "%p is %s.\n", page_get_cache(page), page_get_cache(page)->name); | 2318 | printk(KERN_ERR "%p is %s.\n", page_get_cache(page), |
2319 | page_get_cache(page)->name); | ||
2288 | WARN_ON(1); | 2320 | WARN_ON(1); |
2289 | } | 2321 | } |
2290 | slabp = page_get_slab(page); | 2322 | slabp = page_get_slab(page); |
2291 | 2323 | ||
2292 | if (cachep->flags & SLAB_RED_ZONE) { | 2324 | if (cachep->flags & SLAB_RED_ZONE) { |
2293 | if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) { | 2325 | if (*dbg_redzone1(cachep, objp) != RED_ACTIVE |
2294 | slab_error(cachep, "double free, or memory outside" | 2326 | || *dbg_redzone2(cachep, objp) != RED_ACTIVE) { |
2295 | " object was overwritten"); | 2327 | slab_error(cachep, |
2296 | printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n", | 2328 | "double free, or memory outside" |
2297 | objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); | 2329 | " object was overwritten"); |
2330 | printk(KERN_ERR | ||
2331 | "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n", | ||
2332 | objp, *dbg_redzone1(cachep, objp), | ||
2333 | *dbg_redzone2(cachep, objp)); | ||
2298 | } | 2334 | } |
2299 | *dbg_redzone1(cachep, objp) = RED_INACTIVE; | 2335 | *dbg_redzone1(cachep, objp) = RED_INACTIVE; |
2300 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; | 2336 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; |
@@ -2302,30 +2338,31 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, | |||
2302 | if (cachep->flags & SLAB_STORE_USER) | 2338 | if (cachep->flags & SLAB_STORE_USER) |
2303 | *dbg_userword(cachep, objp) = caller; | 2339 | *dbg_userword(cachep, objp) = caller; |
2304 | 2340 | ||
2305 | objnr = (objp-slabp->s_mem)/cachep->objsize; | 2341 | objnr = (objp - slabp->s_mem) / cachep->objsize; |
2306 | 2342 | ||
2307 | BUG_ON(objnr >= cachep->num); | 2343 | BUG_ON(objnr >= cachep->num); |
2308 | BUG_ON(objp != slabp->s_mem + objnr*cachep->objsize); | 2344 | BUG_ON(objp != slabp->s_mem + objnr * cachep->objsize); |
2309 | 2345 | ||
2310 | if (cachep->flags & SLAB_DEBUG_INITIAL) { | 2346 | if (cachep->flags & SLAB_DEBUG_INITIAL) { |
2311 | /* Need to call the slab's constructor so the | 2347 | /* Need to call the slab's constructor so the |
2312 | * caller can perform a verify of its state (debugging). | 2348 | * caller can perform a verify of its state (debugging). |
2313 | * Called without the cache-lock held. | 2349 | * Called without the cache-lock held. |
2314 | */ | 2350 | */ |
2315 | cachep->ctor(objp+obj_dbghead(cachep), | 2351 | cachep->ctor(objp + obj_dbghead(cachep), |
2316 | cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY); | 2352 | cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY); |
2317 | } | 2353 | } |
2318 | if (cachep->flags & SLAB_POISON && cachep->dtor) { | 2354 | if (cachep->flags & SLAB_POISON && cachep->dtor) { |
2319 | /* we want to cache poison the object, | 2355 | /* we want to cache poison the object, |
2320 | * call the destruction callback | 2356 | * call the destruction callback |
2321 | */ | 2357 | */ |
2322 | cachep->dtor(objp+obj_dbghead(cachep), cachep, 0); | 2358 | cachep->dtor(objp + obj_dbghead(cachep), cachep, 0); |
2323 | } | 2359 | } |
2324 | if (cachep->flags & SLAB_POISON) { | 2360 | if (cachep->flags & SLAB_POISON) { |
2325 | #ifdef CONFIG_DEBUG_PAGEALLOC | 2361 | #ifdef CONFIG_DEBUG_PAGEALLOC |
2326 | if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) { | 2362 | if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) { |
2327 | store_stackinfo(cachep, objp, (unsigned long)caller); | 2363 | store_stackinfo(cachep, objp, (unsigned long)caller); |
2328 | kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0); | 2364 | kernel_map_pages(virt_to_page(objp), |
2365 | cachep->objsize / PAGE_SIZE, 0); | ||
2329 | } else { | 2366 | } else { |
2330 | poison_obj(cachep, objp, POISON_FREE); | 2367 | poison_obj(cachep, objp, POISON_FREE); |
2331 | } | 2368 | } |
@@ -2340,7 +2377,7 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp) | |||
2340 | { | 2377 | { |
2341 | kmem_bufctl_t i; | 2378 | kmem_bufctl_t i; |
2342 | int entries = 0; | 2379 | int entries = 0; |
2343 | 2380 | ||
2344 | /* Check slab's freelist to see if this obj is there. */ | 2381 | /* Check slab's freelist to see if this obj is there. */ |
2345 | for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { | 2382 | for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { |
2346 | entries++; | 2383 | entries++; |
@@ -2348,13 +2385,16 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp) | |||
2348 | goto bad; | 2385 | goto bad; |
2349 | } | 2386 | } |
2350 | if (entries != cachep->num - slabp->inuse) { | 2387 | if (entries != cachep->num - slabp->inuse) { |
2351 | bad: | 2388 | bad: |
2352 | printk(KERN_ERR "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n", | 2389 | printk(KERN_ERR |
2353 | cachep->name, cachep->num, slabp, slabp->inuse); | 2390 | "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n", |
2354 | for (i=0;i<sizeof(slabp)+cachep->num*sizeof(kmem_bufctl_t);i++) { | 2391 | cachep->name, cachep->num, slabp, slabp->inuse); |
2355 | if ((i%16)==0) | 2392 | for (i = 0; |
2393 | i < sizeof(slabp) + cachep->num * sizeof(kmem_bufctl_t); | ||
2394 | i++) { | ||
2395 | if ((i % 16) == 0) | ||
2356 | printk("\n%03x:", i); | 2396 | printk("\n%03x:", i); |
2357 | printk(" %02x", ((unsigned char*)slabp)[i]); | 2397 | printk(" %02x", ((unsigned char *)slabp)[i]); |
2358 | } | 2398 | } |
2359 | printk("\n"); | 2399 | printk("\n"); |
2360 | BUG(); | 2400 | BUG(); |
@@ -2374,7 +2414,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) | |||
2374 | 2414 | ||
2375 | check_irq_off(); | 2415 | check_irq_off(); |
2376 | ac = ac_data(cachep); | 2416 | ac = ac_data(cachep); |
2377 | retry: | 2417 | retry: |
2378 | batchcount = ac->batchcount; | 2418 | batchcount = ac->batchcount; |
2379 | if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { | 2419 | if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { |
2380 | /* if there was little recent activity on this | 2420 | /* if there was little recent activity on this |
@@ -2396,8 +2436,8 @@ retry: | |||
2396 | shared_array->avail -= batchcount; | 2436 | shared_array->avail -= batchcount; |
2397 | ac->avail = batchcount; | 2437 | ac->avail = batchcount; |
2398 | memcpy(ac->entry, | 2438 | memcpy(ac->entry, |
2399 | &(shared_array->entry[shared_array->avail]), | 2439 | &(shared_array->entry[shared_array->avail]), |
2400 | sizeof(void*)*batchcount); | 2440 | sizeof(void *) * batchcount); |
2401 | shared_array->touched = 1; | 2441 | shared_array->touched = 1; |
2402 | goto alloc_done; | 2442 | goto alloc_done; |
2403 | } | 2443 | } |
@@ -2425,7 +2465,7 @@ retry: | |||
2425 | 2465 | ||
2426 | /* get obj pointer */ | 2466 | /* get obj pointer */ |
2427 | ac->entry[ac->avail++] = slabp->s_mem + | 2467 | ac->entry[ac->avail++] = slabp->s_mem + |
2428 | slabp->free*cachep->objsize; | 2468 | slabp->free * cachep->objsize; |
2429 | 2469 | ||
2430 | slabp->inuse++; | 2470 | slabp->inuse++; |
2431 | next = slab_bufctl(slabp)[slabp->free]; | 2471 | next = slab_bufctl(slabp)[slabp->free]; |
@@ -2433,7 +2473,7 @@ retry: | |||
2433 | slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; | 2473 | slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; |
2434 | WARN_ON(numa_node_id() != slabp->nodeid); | 2474 | WARN_ON(numa_node_id() != slabp->nodeid); |
2435 | #endif | 2475 | #endif |
2436 | slabp->free = next; | 2476 | slabp->free = next; |
2437 | } | 2477 | } |
2438 | check_slabp(cachep, slabp); | 2478 | check_slabp(cachep, slabp); |
2439 | 2479 | ||
@@ -2445,9 +2485,9 @@ retry: | |||
2445 | list_add(&slabp->list, &l3->slabs_partial); | 2485 | list_add(&slabp->list, &l3->slabs_partial); |
2446 | } | 2486 | } |
2447 | 2487 | ||
2448 | must_grow: | 2488 | must_grow: |
2449 | l3->free_objects -= ac->avail; | 2489 | l3->free_objects -= ac->avail; |
2450 | alloc_done: | 2490 | alloc_done: |
2451 | spin_unlock(&l3->list_lock); | 2491 | spin_unlock(&l3->list_lock); |
2452 | 2492 | ||
2453 | if (unlikely(!ac->avail)) { | 2493 | if (unlikely(!ac->avail)) { |
@@ -2459,7 +2499,7 @@ alloc_done: | |||
2459 | if (!x && ac->avail == 0) // no objects in sight? abort | 2499 | if (!x && ac->avail == 0) // no objects in sight? abort |
2460 | return NULL; | 2500 | return NULL; |
2461 | 2501 | ||
2462 | if (!ac->avail) // objects refilled by interrupt? | 2502 | if (!ac->avail) // objects refilled by interrupt? |
2463 | goto retry; | 2503 | goto retry; |
2464 | } | 2504 | } |
2465 | ac->touched = 1; | 2505 | ac->touched = 1; |
@@ -2476,16 +2516,16 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) | |||
2476 | } | 2516 | } |
2477 | 2517 | ||
2478 | #if DEBUG | 2518 | #if DEBUG |
2479 | static void * | 2519 | static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags, |
2480 | cache_alloc_debugcheck_after(kmem_cache_t *cachep, | 2520 | void *objp, void *caller) |
2481 | gfp_t flags, void *objp, void *caller) | ||
2482 | { | 2521 | { |
2483 | if (!objp) | 2522 | if (!objp) |
2484 | return objp; | 2523 | return objp; |
2485 | if (cachep->flags & SLAB_POISON) { | 2524 | if (cachep->flags & SLAB_POISON) { |
2486 | #ifdef CONFIG_DEBUG_PAGEALLOC | 2525 | #ifdef CONFIG_DEBUG_PAGEALLOC |
2487 | if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) | 2526 | if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) |
2488 | kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 1); | 2527 | kernel_map_pages(virt_to_page(objp), |
2528 | cachep->objsize / PAGE_SIZE, 1); | ||
2489 | else | 2529 | else |
2490 | check_poison_obj(cachep, objp); | 2530 | check_poison_obj(cachep, objp); |
2491 | #else | 2531 | #else |
@@ -2497,24 +2537,28 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep, | |||
2497 | *dbg_userword(cachep, objp) = caller; | 2537 | *dbg_userword(cachep, objp) = caller; |
2498 | 2538 | ||
2499 | if (cachep->flags & SLAB_RED_ZONE) { | 2539 | if (cachep->flags & SLAB_RED_ZONE) { |
2500 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) { | 2540 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE |
2501 | slab_error(cachep, "double free, or memory outside" | 2541 | || *dbg_redzone2(cachep, objp) != RED_INACTIVE) { |
2502 | " object was overwritten"); | 2542 | slab_error(cachep, |
2503 | printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n", | 2543 | "double free, or memory outside" |
2504 | objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); | 2544 | " object was overwritten"); |
2545 | printk(KERN_ERR | ||
2546 | "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n", | ||
2547 | objp, *dbg_redzone1(cachep, objp), | ||
2548 | *dbg_redzone2(cachep, objp)); | ||
2505 | } | 2549 | } |
2506 | *dbg_redzone1(cachep, objp) = RED_ACTIVE; | 2550 | *dbg_redzone1(cachep, objp) = RED_ACTIVE; |
2507 | *dbg_redzone2(cachep, objp) = RED_ACTIVE; | 2551 | *dbg_redzone2(cachep, objp) = RED_ACTIVE; |
2508 | } | 2552 | } |
2509 | objp += obj_dbghead(cachep); | 2553 | objp += obj_dbghead(cachep); |
2510 | if (cachep->ctor && cachep->flags & SLAB_POISON) { | 2554 | if (cachep->ctor && cachep->flags & SLAB_POISON) { |
2511 | unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; | 2555 | unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; |
2512 | 2556 | ||
2513 | if (!(flags & __GFP_WAIT)) | 2557 | if (!(flags & __GFP_WAIT)) |
2514 | ctor_flags |= SLAB_CTOR_ATOMIC; | 2558 | ctor_flags |= SLAB_CTOR_ATOMIC; |
2515 | 2559 | ||
2516 | cachep->ctor(objp, cachep, ctor_flags); | 2560 | cachep->ctor(objp, cachep, ctor_flags); |
2517 | } | 2561 | } |
2518 | return objp; | 2562 | return objp; |
2519 | } | 2563 | } |
2520 | #else | 2564 | #else |
@@ -2523,7 +2567,7 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep, | |||
2523 | 2567 | ||
2524 | static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) | 2568 | static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) |
2525 | { | 2569 | { |
2526 | void* objp; | 2570 | void *objp; |
2527 | struct array_cache *ac; | 2571 | struct array_cache *ac; |
2528 | 2572 | ||
2529 | check_irq_off(); | 2573 | check_irq_off(); |
@@ -2542,7 +2586,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) | |||
2542 | static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) | 2586 | static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) |
2543 | { | 2587 | { |
2544 | unsigned long save_flags; | 2588 | unsigned long save_flags; |
2545 | void* objp; | 2589 | void *objp; |
2546 | 2590 | ||
2547 | cache_alloc_debugcheck_before(cachep, flags); | 2591 | cache_alloc_debugcheck_before(cachep, flags); |
2548 | 2592 | ||
@@ -2550,7 +2594,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) | |||
2550 | objp = ____cache_alloc(cachep, flags); | 2594 | objp = ____cache_alloc(cachep, flags); |
2551 | local_irq_restore(save_flags); | 2595 | local_irq_restore(save_flags); |
2552 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, | 2596 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, |
2553 | __builtin_return_address(0)); | 2597 | __builtin_return_address(0)); |
2554 | prefetchw(objp); | 2598 | prefetchw(objp); |
2555 | return objp; | 2599 | return objp; |
2556 | } | 2600 | } |
@@ -2562,74 +2606,75 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) | |||
2562 | static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) | 2606 | static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) |
2563 | { | 2607 | { |
2564 | struct list_head *entry; | 2608 | struct list_head *entry; |
2565 | struct slab *slabp; | 2609 | struct slab *slabp; |
2566 | struct kmem_list3 *l3; | 2610 | struct kmem_list3 *l3; |
2567 | void *obj; | 2611 | void *obj; |
2568 | kmem_bufctl_t next; | 2612 | kmem_bufctl_t next; |
2569 | int x; | 2613 | int x; |
2570 | 2614 | ||
2571 | l3 = cachep->nodelists[nodeid]; | 2615 | l3 = cachep->nodelists[nodeid]; |
2572 | BUG_ON(!l3); | 2616 | BUG_ON(!l3); |
2573 | 2617 | ||
2574 | retry: | 2618 | retry: |
2575 | spin_lock(&l3->list_lock); | 2619 | spin_lock(&l3->list_lock); |
2576 | entry = l3->slabs_partial.next; | 2620 | entry = l3->slabs_partial.next; |
2577 | if (entry == &l3->slabs_partial) { | 2621 | if (entry == &l3->slabs_partial) { |
2578 | l3->free_touched = 1; | 2622 | l3->free_touched = 1; |
2579 | entry = l3->slabs_free.next; | 2623 | entry = l3->slabs_free.next; |
2580 | if (entry == &l3->slabs_free) | 2624 | if (entry == &l3->slabs_free) |
2581 | goto must_grow; | 2625 | goto must_grow; |
2582 | } | 2626 | } |
2583 | 2627 | ||
2584 | slabp = list_entry(entry, struct slab, list); | 2628 | slabp = list_entry(entry, struct slab, list); |
2585 | check_spinlock_acquired_node(cachep, nodeid); | 2629 | check_spinlock_acquired_node(cachep, nodeid); |
2586 | check_slabp(cachep, slabp); | 2630 | check_slabp(cachep, slabp); |
2587 | 2631 | ||
2588 | STATS_INC_NODEALLOCS(cachep); | 2632 | STATS_INC_NODEALLOCS(cachep); |
2589 | STATS_INC_ACTIVE(cachep); | 2633 | STATS_INC_ACTIVE(cachep); |
2590 | STATS_SET_HIGH(cachep); | 2634 | STATS_SET_HIGH(cachep); |
2591 | 2635 | ||
2592 | BUG_ON(slabp->inuse == cachep->num); | 2636 | BUG_ON(slabp->inuse == cachep->num); |
2593 | 2637 | ||
2594 | /* get obj pointer */ | 2638 | /* get obj pointer */ |
2595 | obj = slabp->s_mem + slabp->free*cachep->objsize; | 2639 | obj = slabp->s_mem + slabp->free * cachep->objsize; |
2596 | slabp->inuse++; | 2640 | slabp->inuse++; |
2597 | next = slab_bufctl(slabp)[slabp->free]; | 2641 | next = slab_bufctl(slabp)[slabp->free]; |
2598 | #if DEBUG | 2642 | #if DEBUG |
2599 | slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; | 2643 | slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; |
2600 | #endif | 2644 | #endif |
2601 | slabp->free = next; | 2645 | slabp->free = next; |
2602 | check_slabp(cachep, slabp); | 2646 | check_slabp(cachep, slabp); |
2603 | l3->free_objects--; | 2647 | l3->free_objects--; |
2604 | /* move slabp to correct slabp list: */ | 2648 | /* move slabp to correct slabp list: */ |
2605 | list_del(&slabp->list); | 2649 | list_del(&slabp->list); |
2606 | 2650 | ||
2607 | if (slabp->free == BUFCTL_END) { | 2651 | if (slabp->free == BUFCTL_END) { |
2608 | list_add(&slabp->list, &l3->slabs_full); | 2652 | list_add(&slabp->list, &l3->slabs_full); |
2609 | } else { | 2653 | } else { |
2610 | list_add(&slabp->list, &l3->slabs_partial); | 2654 | list_add(&slabp->list, &l3->slabs_partial); |
2611 | } | 2655 | } |
2612 | 2656 | ||
2613 | spin_unlock(&l3->list_lock); | 2657 | spin_unlock(&l3->list_lock); |
2614 | goto done; | 2658 | goto done; |
2615 | 2659 | ||
2616 | must_grow: | 2660 | must_grow: |
2617 | spin_unlock(&l3->list_lock); | 2661 | spin_unlock(&l3->list_lock); |
2618 | x = cache_grow(cachep, flags, nodeid); | 2662 | x = cache_grow(cachep, flags, nodeid); |
2619 | 2663 | ||
2620 | if (!x) | 2664 | if (!x) |
2621 | return NULL; | 2665 | return NULL; |
2622 | 2666 | ||
2623 | goto retry; | 2667 | goto retry; |
2624 | done: | 2668 | done: |
2625 | return obj; | 2669 | return obj; |
2626 | } | 2670 | } |
2627 | #endif | 2671 | #endif |
2628 | 2672 | ||
2629 | /* | 2673 | /* |
2630 | * Caller needs to acquire correct kmem_list's list_lock | 2674 | * Caller needs to acquire correct kmem_list's list_lock |
2631 | */ | 2675 | */ |
2632 | static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node) | 2676 | static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, |
2677 | int node) | ||
2633 | { | 2678 | { |
2634 | int i; | 2679 | int i; |
2635 | struct kmem_list3 *l3; | 2680 | struct kmem_list3 *l3; |
@@ -2652,7 +2697,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int n | |||
2652 | 2697 | ||
2653 | if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) { | 2698 | if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) { |
2654 | printk(KERN_ERR "slab: double free detected in cache " | 2699 | printk(KERN_ERR "slab: double free detected in cache " |
2655 | "'%s', objp %p\n", cachep->name, objp); | 2700 | "'%s', objp %p\n", cachep->name, objp); |
2656 | BUG(); | 2701 | BUG(); |
2657 | } | 2702 | } |
2658 | #endif | 2703 | #endif |
@@ -2696,20 +2741,19 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) | |||
2696 | spin_lock(&l3->list_lock); | 2741 | spin_lock(&l3->list_lock); |
2697 | if (l3->shared) { | 2742 | if (l3->shared) { |
2698 | struct array_cache *shared_array = l3->shared; | 2743 | struct array_cache *shared_array = l3->shared; |
2699 | int max = shared_array->limit-shared_array->avail; | 2744 | int max = shared_array->limit - shared_array->avail; |
2700 | if (max) { | 2745 | if (max) { |
2701 | if (batchcount > max) | 2746 | if (batchcount > max) |
2702 | batchcount = max; | 2747 | batchcount = max; |
2703 | memcpy(&(shared_array->entry[shared_array->avail]), | 2748 | memcpy(&(shared_array->entry[shared_array->avail]), |
2704 | ac->entry, | 2749 | ac->entry, sizeof(void *) * batchcount); |
2705 | sizeof(void*)*batchcount); | ||
2706 | shared_array->avail += batchcount; | 2750 | shared_array->avail += batchcount; |
2707 | goto free_done; | 2751 | goto free_done; |
2708 | } | 2752 | } |
2709 | } | 2753 | } |
2710 | 2754 | ||
2711 | free_block(cachep, ac->entry, batchcount, node); | 2755 | free_block(cachep, ac->entry, batchcount, node); |
2712 | free_done: | 2756 | free_done: |
2713 | #if STATS | 2757 | #if STATS |
2714 | { | 2758 | { |
2715 | int i = 0; | 2759 | int i = 0; |
@@ -2731,10 +2775,9 @@ free_done: | |||
2731 | spin_unlock(&l3->list_lock); | 2775 | spin_unlock(&l3->list_lock); |
2732 | ac->avail -= batchcount; | 2776 | ac->avail -= batchcount; |
2733 | memmove(ac->entry, &(ac->entry[batchcount]), | 2777 | memmove(ac->entry, &(ac->entry[batchcount]), |
2734 | sizeof(void*)*ac->avail); | 2778 | sizeof(void *) * ac->avail); |
2735 | } | 2779 | } |
2736 | 2780 | ||
2737 | |||
2738 | /* | 2781 | /* |
2739 | * __cache_free | 2782 | * __cache_free |
2740 | * Release an obj back to its cache. If the obj has a constructed | 2783 | * Release an obj back to its cache. If the obj has a constructed |
@@ -2759,7 +2802,8 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) | |||
2759 | if (unlikely(slabp->nodeid != numa_node_id())) { | 2802 | if (unlikely(slabp->nodeid != numa_node_id())) { |
2760 | struct array_cache *alien = NULL; | 2803 | struct array_cache *alien = NULL; |
2761 | int nodeid = slabp->nodeid; | 2804 | int nodeid = slabp->nodeid; |
2762 | struct kmem_list3 *l3 = cachep->nodelists[numa_node_id()]; | 2805 | struct kmem_list3 *l3 = |
2806 | cachep->nodelists[numa_node_id()]; | ||
2763 | 2807 | ||
2764 | STATS_INC_NODEFREES(cachep); | 2808 | STATS_INC_NODEFREES(cachep); |
2765 | if (l3->alien && l3->alien[nodeid]) { | 2809 | if (l3->alien && l3->alien[nodeid]) { |
@@ -2767,15 +2811,15 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) | |||
2767 | spin_lock(&alien->lock); | 2811 | spin_lock(&alien->lock); |
2768 | if (unlikely(alien->avail == alien->limit)) | 2812 | if (unlikely(alien->avail == alien->limit)) |
2769 | __drain_alien_cache(cachep, | 2813 | __drain_alien_cache(cachep, |
2770 | alien, nodeid); | 2814 | alien, nodeid); |
2771 | alien->entry[alien->avail++] = objp; | 2815 | alien->entry[alien->avail++] = objp; |
2772 | spin_unlock(&alien->lock); | 2816 | spin_unlock(&alien->lock); |
2773 | } else { | 2817 | } else { |
2774 | spin_lock(&(cachep->nodelists[nodeid])-> | 2818 | spin_lock(&(cachep->nodelists[nodeid])-> |
2775 | list_lock); | 2819 | list_lock); |
2776 | free_block(cachep, &objp, 1, nodeid); | 2820 | free_block(cachep, &objp, 1, nodeid); |
2777 | spin_unlock(&(cachep->nodelists[nodeid])-> | 2821 | spin_unlock(&(cachep->nodelists[nodeid])-> |
2778 | list_lock); | 2822 | list_lock); |
2779 | } | 2823 | } |
2780 | return; | 2824 | return; |
2781 | } | 2825 | } |
@@ -2822,9 +2866,9 @@ EXPORT_SYMBOL(kmem_cache_alloc); | |||
2822 | */ | 2866 | */ |
2823 | int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) | 2867 | int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) |
2824 | { | 2868 | { |
2825 | unsigned long addr = (unsigned long) ptr; | 2869 | unsigned long addr = (unsigned long)ptr; |
2826 | unsigned long min_addr = PAGE_OFFSET; | 2870 | unsigned long min_addr = PAGE_OFFSET; |
2827 | unsigned long align_mask = BYTES_PER_WORD-1; | 2871 | unsigned long align_mask = BYTES_PER_WORD - 1; |
2828 | unsigned long size = cachep->objsize; | 2872 | unsigned long size = cachep->objsize; |
2829 | struct page *page; | 2873 | struct page *page; |
2830 | 2874 | ||
@@ -2844,7 +2888,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) | |||
2844 | if (unlikely(page_get_cache(page) != cachep)) | 2888 | if (unlikely(page_get_cache(page) != cachep)) |
2845 | goto out; | 2889 | goto out; |
2846 | return 1; | 2890 | return 1; |
2847 | out: | 2891 | out: |
2848 | return 0; | 2892 | return 0; |
2849 | } | 2893 | } |
2850 | 2894 | ||
@@ -2871,8 +2915,10 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) | |||
2871 | 2915 | ||
2872 | if (unlikely(!cachep->nodelists[nodeid])) { | 2916 | if (unlikely(!cachep->nodelists[nodeid])) { |
2873 | /* Fall back to __cache_alloc if we run into trouble */ | 2917 | /* Fall back to __cache_alloc if we run into trouble */ |
2874 | printk(KERN_WARNING "slab: not allocating in inactive node %d for cache %s\n", nodeid, cachep->name); | 2918 | printk(KERN_WARNING |
2875 | return __cache_alloc(cachep,flags); | 2919 | "slab: not allocating in inactive node %d for cache %s\n", |
2920 | nodeid, cachep->name); | ||
2921 | return __cache_alloc(cachep, flags); | ||
2876 | } | 2922 | } |
2877 | 2923 | ||
2878 | cache_alloc_debugcheck_before(cachep, flags); | 2924 | cache_alloc_debugcheck_before(cachep, flags); |
@@ -2882,7 +2928,9 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) | |||
2882 | else | 2928 | else |
2883 | ptr = __cache_alloc_node(cachep, flags, nodeid); | 2929 | ptr = __cache_alloc_node(cachep, flags, nodeid); |
2884 | local_irq_restore(save_flags); | 2930 | local_irq_restore(save_flags); |
2885 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0)); | 2931 | ptr = |
2932 | cache_alloc_debugcheck_after(cachep, flags, ptr, | ||
2933 | __builtin_return_address(0)); | ||
2886 | 2934 | ||
2887 | return ptr; | 2935 | return ptr; |
2888 | } | 2936 | } |
@@ -2944,12 +2992,11 @@ EXPORT_SYMBOL(__kmalloc); | |||
2944 | * Objects should be dereferenced using the per_cpu_ptr macro only. | 2992 | * Objects should be dereferenced using the per_cpu_ptr macro only. |
2945 | * | 2993 | * |
2946 | * @size: how many bytes of memory are required. | 2994 | * @size: how many bytes of memory are required. |
2947 | * @align: the alignment, which can't be greater than SMP_CACHE_BYTES. | ||
2948 | */ | 2995 | */ |
2949 | void *__alloc_percpu(size_t size, size_t align) | 2996 | void *__alloc_percpu(size_t size) |
2950 | { | 2997 | { |
2951 | int i; | 2998 | int i; |
2952 | struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL); | 2999 | struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL); |
2953 | 3000 | ||
2954 | if (!pdata) | 3001 | if (!pdata) |
2955 | return NULL; | 3002 | return NULL; |
@@ -2973,9 +3020,9 @@ void *__alloc_percpu(size_t size, size_t align) | |||
2973 | } | 3020 | } |
2974 | 3021 | ||
2975 | /* Catch derefs w/o wrappers */ | 3022 | /* Catch derefs w/o wrappers */ |
2976 | return (void *) (~(unsigned long) pdata); | 3023 | return (void *)(~(unsigned long)pdata); |
2977 | 3024 | ||
2978 | unwind_oom: | 3025 | unwind_oom: |
2979 | while (--i >= 0) { | 3026 | while (--i >= 0) { |
2980 | if (!cpu_possible(i)) | 3027 | if (!cpu_possible(i)) |
2981 | continue; | 3028 | continue; |
@@ -3006,20 +3053,6 @@ void kmem_cache_free(kmem_cache_t *cachep, void *objp) | |||
3006 | EXPORT_SYMBOL(kmem_cache_free); | 3053 | EXPORT_SYMBOL(kmem_cache_free); |
3007 | 3054 | ||
3008 | /** | 3055 | /** |
3009 | * kzalloc - allocate memory. The memory is set to zero. | ||
3010 | * @size: how many bytes of memory are required. | ||
3011 | * @flags: the type of memory to allocate. | ||
3012 | */ | ||
3013 | void *kzalloc(size_t size, gfp_t flags) | ||
3014 | { | ||
3015 | void *ret = kmalloc(size, flags); | ||
3016 | if (ret) | ||
3017 | memset(ret, 0, size); | ||
3018 | return ret; | ||
3019 | } | ||
3020 | EXPORT_SYMBOL(kzalloc); | ||
3021 | |||
3022 | /** | ||
3023 | * kfree - free previously allocated memory | 3056 | * kfree - free previously allocated memory |
3024 | * @objp: pointer returned by kmalloc. | 3057 | * @objp: pointer returned by kmalloc. |
3025 | * | 3058 | * |
@@ -3038,7 +3071,7 @@ void kfree(const void *objp) | |||
3038 | local_irq_save(flags); | 3071 | local_irq_save(flags); |
3039 | kfree_debugcheck(objp); | 3072 | kfree_debugcheck(objp); |
3040 | c = page_get_cache(virt_to_page(objp)); | 3073 | c = page_get_cache(virt_to_page(objp)); |
3041 | __cache_free(c, (void*)objp); | 3074 | __cache_free(c, (void *)objp); |
3042 | local_irq_restore(flags); | 3075 | local_irq_restore(flags); |
3043 | } | 3076 | } |
3044 | EXPORT_SYMBOL(kfree); | 3077 | EXPORT_SYMBOL(kfree); |
@@ -3051,17 +3084,16 @@ EXPORT_SYMBOL(kfree); | |||
3051 | * Don't free memory not originally allocated by alloc_percpu() | 3084 | * Don't free memory not originally allocated by alloc_percpu() |
3052 | * The complemented objp is to check for that. | 3085 | * The complemented objp is to check for that. |
3053 | */ | 3086 | */ |
3054 | void | 3087 | void free_percpu(const void *objp) |
3055 | free_percpu(const void *objp) | ||
3056 | { | 3088 | { |
3057 | int i; | 3089 | int i; |
3058 | struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp); | 3090 | struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp); |
3059 | 3091 | ||
3060 | /* | 3092 | /* |
3061 | * We allocate for all cpus so we cannot use for online cpu here. | 3093 | * We allocate for all cpus so we cannot use for online cpu here. |
3062 | */ | 3094 | */ |
3063 | for_each_cpu(i) | 3095 | for_each_cpu(i) |
3064 | kfree(p->ptrs[i]); | 3096 | kfree(p->ptrs[i]); |
3065 | kfree(p); | 3097 | kfree(p); |
3066 | } | 3098 | } |
3067 | EXPORT_SYMBOL(free_percpu); | 3099 | EXPORT_SYMBOL(free_percpu); |
@@ -3095,44 +3127,44 @@ static int alloc_kmemlist(kmem_cache_t *cachep) | |||
3095 | if (!(new_alien = alloc_alien_cache(node, cachep->limit))) | 3127 | if (!(new_alien = alloc_alien_cache(node, cachep->limit))) |
3096 | goto fail; | 3128 | goto fail; |
3097 | #endif | 3129 | #endif |
3098 | if (!(new = alloc_arraycache(node, (cachep->shared* | 3130 | if (!(new = alloc_arraycache(node, (cachep->shared * |
3099 | cachep->batchcount), 0xbaadf00d))) | 3131 | cachep->batchcount), |
3132 | 0xbaadf00d))) | ||
3100 | goto fail; | 3133 | goto fail; |
3101 | if ((l3 = cachep->nodelists[node])) { | 3134 | if ((l3 = cachep->nodelists[node])) { |
3102 | 3135 | ||
3103 | spin_lock_irq(&l3->list_lock); | 3136 | spin_lock_irq(&l3->list_lock); |
3104 | 3137 | ||
3105 | if ((nc = cachep->nodelists[node]->shared)) | 3138 | if ((nc = cachep->nodelists[node]->shared)) |
3106 | free_block(cachep, nc->entry, | 3139 | free_block(cachep, nc->entry, nc->avail, node); |
3107 | nc->avail, node); | ||
3108 | 3140 | ||
3109 | l3->shared = new; | 3141 | l3->shared = new; |
3110 | if (!cachep->nodelists[node]->alien) { | 3142 | if (!cachep->nodelists[node]->alien) { |
3111 | l3->alien = new_alien; | 3143 | l3->alien = new_alien; |
3112 | new_alien = NULL; | 3144 | new_alien = NULL; |
3113 | } | 3145 | } |
3114 | l3->free_limit = (1 + nr_cpus_node(node))* | 3146 | l3->free_limit = (1 + nr_cpus_node(node)) * |
3115 | cachep->batchcount + cachep->num; | 3147 | cachep->batchcount + cachep->num; |
3116 | spin_unlock_irq(&l3->list_lock); | 3148 | spin_unlock_irq(&l3->list_lock); |
3117 | kfree(nc); | 3149 | kfree(nc); |
3118 | free_alien_cache(new_alien); | 3150 | free_alien_cache(new_alien); |
3119 | continue; | 3151 | continue; |
3120 | } | 3152 | } |
3121 | if (!(l3 = kmalloc_node(sizeof(struct kmem_list3), | 3153 | if (!(l3 = kmalloc_node(sizeof(struct kmem_list3), |
3122 | GFP_KERNEL, node))) | 3154 | GFP_KERNEL, node))) |
3123 | goto fail; | 3155 | goto fail; |
3124 | 3156 | ||
3125 | kmem_list3_init(l3); | 3157 | kmem_list3_init(l3); |
3126 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + | 3158 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + |
3127 | ((unsigned long)cachep)%REAPTIMEOUT_LIST3; | 3159 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; |
3128 | l3->shared = new; | 3160 | l3->shared = new; |
3129 | l3->alien = new_alien; | 3161 | l3->alien = new_alien; |
3130 | l3->free_limit = (1 + nr_cpus_node(node))* | 3162 | l3->free_limit = (1 + nr_cpus_node(node)) * |
3131 | cachep->batchcount + cachep->num; | 3163 | cachep->batchcount + cachep->num; |
3132 | cachep->nodelists[node] = l3; | 3164 | cachep->nodelists[node] = l3; |
3133 | } | 3165 | } |
3134 | return err; | 3166 | return err; |
3135 | fail: | 3167 | fail: |
3136 | err = -ENOMEM; | 3168 | err = -ENOMEM; |
3137 | return err; | 3169 | return err; |
3138 | } | 3170 | } |
@@ -3154,18 +3186,19 @@ static void do_ccupdate_local(void *info) | |||
3154 | new->new[smp_processor_id()] = old; | 3186 | new->new[smp_processor_id()] = old; |
3155 | } | 3187 | } |
3156 | 3188 | ||
3157 | |||
3158 | static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, | 3189 | static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, |
3159 | int shared) | 3190 | int shared) |
3160 | { | 3191 | { |
3161 | struct ccupdate_struct new; | 3192 | struct ccupdate_struct new; |
3162 | int i, err; | 3193 | int i, err; |
3163 | 3194 | ||
3164 | memset(&new.new,0,sizeof(new.new)); | 3195 | memset(&new.new, 0, sizeof(new.new)); |
3165 | for_each_online_cpu(i) { | 3196 | for_each_online_cpu(i) { |
3166 | new.new[i] = alloc_arraycache(cpu_to_node(i), limit, batchcount); | 3197 | new.new[i] = |
3198 | alloc_arraycache(cpu_to_node(i), limit, batchcount); | ||
3167 | if (!new.new[i]) { | 3199 | if (!new.new[i]) { |
3168 | for (i--; i >= 0; i--) kfree(new.new[i]); | 3200 | for (i--; i >= 0; i--) |
3201 | kfree(new.new[i]); | ||
3169 | return -ENOMEM; | 3202 | return -ENOMEM; |
3170 | } | 3203 | } |
3171 | } | 3204 | } |
@@ -3193,13 +3226,12 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, | |||
3193 | err = alloc_kmemlist(cachep); | 3226 | err = alloc_kmemlist(cachep); |
3194 | if (err) { | 3227 | if (err) { |
3195 | printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n", | 3228 | printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n", |
3196 | cachep->name, -err); | 3229 | cachep->name, -err); |
3197 | BUG(); | 3230 | BUG(); |
3198 | } | 3231 | } |
3199 | return 0; | 3232 | return 0; |
3200 | } | 3233 | } |
3201 | 3234 | ||
3202 | |||
3203 | static void enable_cpucache(kmem_cache_t *cachep) | 3235 | static void enable_cpucache(kmem_cache_t *cachep) |
3204 | { | 3236 | { |
3205 | int err; | 3237 | int err; |
@@ -3246,14 +3278,14 @@ static void enable_cpucache(kmem_cache_t *cachep) | |||
3246 | if (limit > 32) | 3278 | if (limit > 32) |
3247 | limit = 32; | 3279 | limit = 32; |
3248 | #endif | 3280 | #endif |
3249 | err = do_tune_cpucache(cachep, limit, (limit+1)/2, shared); | 3281 | err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); |
3250 | if (err) | 3282 | if (err) |
3251 | printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", | 3283 | printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", |
3252 | cachep->name, -err); | 3284 | cachep->name, -err); |
3253 | } | 3285 | } |
3254 | 3286 | ||
3255 | static void drain_array_locked(kmem_cache_t *cachep, | 3287 | static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac, |
3256 | struct array_cache *ac, int force, int node) | 3288 | int force, int node) |
3257 | { | 3289 | { |
3258 | int tofree; | 3290 | int tofree; |
3259 | 3291 | ||
@@ -3261,14 +3293,14 @@ static void drain_array_locked(kmem_cache_t *cachep, | |||
3261 | if (ac->touched && !force) { | 3293 | if (ac->touched && !force) { |
3262 | ac->touched = 0; | 3294 | ac->touched = 0; |
3263 | } else if (ac->avail) { | 3295 | } else if (ac->avail) { |
3264 | tofree = force ? ac->avail : (ac->limit+4)/5; | 3296 | tofree = force ? ac->avail : (ac->limit + 4) / 5; |
3265 | if (tofree > ac->avail) { | 3297 | if (tofree > ac->avail) { |
3266 | tofree = (ac->avail+1)/2; | 3298 | tofree = (ac->avail + 1) / 2; |
3267 | } | 3299 | } |
3268 | free_block(cachep, ac->entry, tofree, node); | 3300 | free_block(cachep, ac->entry, tofree, node); |
3269 | ac->avail -= tofree; | 3301 | ac->avail -= tofree; |
3270 | memmove(ac->entry, &(ac->entry[tofree]), | 3302 | memmove(ac->entry, &(ac->entry[tofree]), |
3271 | sizeof(void*)*ac->avail); | 3303 | sizeof(void *) * ac->avail); |
3272 | } | 3304 | } |
3273 | } | 3305 | } |
3274 | 3306 | ||
@@ -3291,13 +3323,14 @@ static void cache_reap(void *unused) | |||
3291 | 3323 | ||
3292 | if (down_trylock(&cache_chain_sem)) { | 3324 | if (down_trylock(&cache_chain_sem)) { |
3293 | /* Give up. Setup the next iteration. */ | 3325 | /* Give up. Setup the next iteration. */ |
3294 | schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); | 3326 | schedule_delayed_work(&__get_cpu_var(reap_work), |
3327 | REAPTIMEOUT_CPUC); | ||
3295 | return; | 3328 | return; |
3296 | } | 3329 | } |
3297 | 3330 | ||
3298 | list_for_each(walk, &cache_chain) { | 3331 | list_for_each(walk, &cache_chain) { |
3299 | kmem_cache_t *searchp; | 3332 | kmem_cache_t *searchp; |
3300 | struct list_head* p; | 3333 | struct list_head *p; |
3301 | int tofree; | 3334 | int tofree; |
3302 | struct slab *slabp; | 3335 | struct slab *slabp; |
3303 | 3336 | ||
@@ -3314,7 +3347,7 @@ static void cache_reap(void *unused) | |||
3314 | spin_lock_irq(&l3->list_lock); | 3347 | spin_lock_irq(&l3->list_lock); |
3315 | 3348 | ||
3316 | drain_array_locked(searchp, ac_data(searchp), 0, | 3349 | drain_array_locked(searchp, ac_data(searchp), 0, |
3317 | numa_node_id()); | 3350 | numa_node_id()); |
3318 | 3351 | ||
3319 | if (time_after(l3->next_reap, jiffies)) | 3352 | if (time_after(l3->next_reap, jiffies)) |
3320 | goto next_unlock; | 3353 | goto next_unlock; |
@@ -3323,14 +3356,16 @@ static void cache_reap(void *unused) | |||
3323 | 3356 | ||
3324 | if (l3->shared) | 3357 | if (l3->shared) |
3325 | drain_array_locked(searchp, l3->shared, 0, | 3358 | drain_array_locked(searchp, l3->shared, 0, |
3326 | numa_node_id()); | 3359 | numa_node_id()); |
3327 | 3360 | ||
3328 | if (l3->free_touched) { | 3361 | if (l3->free_touched) { |
3329 | l3->free_touched = 0; | 3362 | l3->free_touched = 0; |
3330 | goto next_unlock; | 3363 | goto next_unlock; |
3331 | } | 3364 | } |
3332 | 3365 | ||
3333 | tofree = (l3->free_limit+5*searchp->num-1)/(5*searchp->num); | 3366 | tofree = |
3367 | (l3->free_limit + 5 * searchp->num - | ||
3368 | 1) / (5 * searchp->num); | ||
3334 | do { | 3369 | do { |
3335 | p = l3->slabs_free.next; | 3370 | p = l3->slabs_free.next; |
3336 | if (p == &(l3->slabs_free)) | 3371 | if (p == &(l3->slabs_free)) |
@@ -3350,10 +3385,10 @@ static void cache_reap(void *unused) | |||
3350 | spin_unlock_irq(&l3->list_lock); | 3385 | spin_unlock_irq(&l3->list_lock); |
3351 | slab_destroy(searchp, slabp); | 3386 | slab_destroy(searchp, slabp); |
3352 | spin_lock_irq(&l3->list_lock); | 3387 | spin_lock_irq(&l3->list_lock); |
3353 | } while(--tofree > 0); | 3388 | } while (--tofree > 0); |
3354 | next_unlock: | 3389 | next_unlock: |
3355 | spin_unlock_irq(&l3->list_lock); | 3390 | spin_unlock_irq(&l3->list_lock); |
3356 | next: | 3391 | next: |
3357 | cond_resched(); | 3392 | cond_resched(); |
3358 | } | 3393 | } |
3359 | check_irq_on(); | 3394 | check_irq_on(); |
@@ -3365,32 +3400,37 @@ next: | |||
3365 | 3400 | ||
3366 | #ifdef CONFIG_PROC_FS | 3401 | #ifdef CONFIG_PROC_FS |
3367 | 3402 | ||
3368 | static void *s_start(struct seq_file *m, loff_t *pos) | 3403 | static void print_slabinfo_header(struct seq_file *m) |
3369 | { | 3404 | { |
3370 | loff_t n = *pos; | 3405 | /* |
3371 | struct list_head *p; | 3406 | * Output format version, so at least we can change it |
3372 | 3407 | * without _too_ many complaints. | |
3373 | down(&cache_chain_sem); | 3408 | */ |
3374 | if (!n) { | ||
3375 | /* | ||
3376 | * Output format version, so at least we can change it | ||
3377 | * without _too_ many complaints. | ||
3378 | */ | ||
3379 | #if STATS | 3409 | #if STATS |
3380 | seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); | 3410 | seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); |
3381 | #else | 3411 | #else |
3382 | seq_puts(m, "slabinfo - version: 2.1\n"); | 3412 | seq_puts(m, "slabinfo - version: 2.1\n"); |
3383 | #endif | 3413 | #endif |
3384 | seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>"); | 3414 | seq_puts(m, "# name <active_objs> <num_objs> <objsize> " |
3385 | seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); | 3415 | "<objperslab> <pagesperslab>"); |
3386 | seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); | 3416 | seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); |
3417 | seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); | ||
3387 | #if STATS | 3418 | #if STATS |
3388 | seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped>" | 3419 | seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " |
3389 | " <error> <maxfreeable> <nodeallocs> <remotefrees>"); | 3420 | "<error> <maxfreeable> <nodeallocs> <remotefrees>"); |
3390 | seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); | 3421 | seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); |
3391 | #endif | 3422 | #endif |
3392 | seq_putc(m, '\n'); | 3423 | seq_putc(m, '\n'); |
3393 | } | 3424 | } |
3425 | |||
3426 | static void *s_start(struct seq_file *m, loff_t *pos) | ||
3427 | { | ||
3428 | loff_t n = *pos; | ||
3429 | struct list_head *p; | ||
3430 | |||
3431 | down(&cache_chain_sem); | ||
3432 | if (!n) | ||
3433 | print_slabinfo_header(m); | ||
3394 | p = cache_chain.next; | 3434 | p = cache_chain.next; |
3395 | while (n--) { | 3435 | while (n--) { |
3396 | p = p->next; | 3436 | p = p->next; |
@@ -3405,7 +3445,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos) | |||
3405 | kmem_cache_t *cachep = p; | 3445 | kmem_cache_t *cachep = p; |
3406 | ++*pos; | 3446 | ++*pos; |
3407 | return cachep->next.next == &cache_chain ? NULL | 3447 | return cachep->next.next == &cache_chain ? NULL |
3408 | : list_entry(cachep->next.next, kmem_cache_t, next); | 3448 | : list_entry(cachep->next.next, kmem_cache_t, next); |
3409 | } | 3449 | } |
3410 | 3450 | ||
3411 | static void s_stop(struct seq_file *m, void *p) | 3451 | static void s_stop(struct seq_file *m, void *p) |
@@ -3417,11 +3457,11 @@ static int s_show(struct seq_file *m, void *p) | |||
3417 | { | 3457 | { |
3418 | kmem_cache_t *cachep = p; | 3458 | kmem_cache_t *cachep = p; |
3419 | struct list_head *q; | 3459 | struct list_head *q; |
3420 | struct slab *slabp; | 3460 | struct slab *slabp; |
3421 | unsigned long active_objs; | 3461 | unsigned long active_objs; |
3422 | unsigned long num_objs; | 3462 | unsigned long num_objs; |
3423 | unsigned long active_slabs = 0; | 3463 | unsigned long active_slabs = 0; |
3424 | unsigned long num_slabs, free_objects = 0, shared_avail = 0; | 3464 | unsigned long num_slabs, free_objects = 0, shared_avail = 0; |
3425 | const char *name; | 3465 | const char *name; |
3426 | char *error = NULL; | 3466 | char *error = NULL; |
3427 | int node; | 3467 | int node; |
@@ -3438,14 +3478,14 @@ static int s_show(struct seq_file *m, void *p) | |||
3438 | 3478 | ||
3439 | spin_lock(&l3->list_lock); | 3479 | spin_lock(&l3->list_lock); |
3440 | 3480 | ||
3441 | list_for_each(q,&l3->slabs_full) { | 3481 | list_for_each(q, &l3->slabs_full) { |
3442 | slabp = list_entry(q, struct slab, list); | 3482 | slabp = list_entry(q, struct slab, list); |
3443 | if (slabp->inuse != cachep->num && !error) | 3483 | if (slabp->inuse != cachep->num && !error) |
3444 | error = "slabs_full accounting error"; | 3484 | error = "slabs_full accounting error"; |
3445 | active_objs += cachep->num; | 3485 | active_objs += cachep->num; |
3446 | active_slabs++; | 3486 | active_slabs++; |
3447 | } | 3487 | } |
3448 | list_for_each(q,&l3->slabs_partial) { | 3488 | list_for_each(q, &l3->slabs_partial) { |
3449 | slabp = list_entry(q, struct slab, list); | 3489 | slabp = list_entry(q, struct slab, list); |
3450 | if (slabp->inuse == cachep->num && !error) | 3490 | if (slabp->inuse == cachep->num && !error) |
3451 | error = "slabs_partial inuse accounting error"; | 3491 | error = "slabs_partial inuse accounting error"; |
@@ -3454,7 +3494,7 @@ static int s_show(struct seq_file *m, void *p) | |||
3454 | active_objs += slabp->inuse; | 3494 | active_objs += slabp->inuse; |
3455 | active_slabs++; | 3495 | active_slabs++; |
3456 | } | 3496 | } |
3457 | list_for_each(q,&l3->slabs_free) { | 3497 | list_for_each(q, &l3->slabs_free) { |
3458 | slabp = list_entry(q, struct slab, list); | 3498 | slabp = list_entry(q, struct slab, list); |
3459 | if (slabp->inuse && !error) | 3499 | if (slabp->inuse && !error) |
3460 | error = "slabs_free/inuse accounting error"; | 3500 | error = "slabs_free/inuse accounting error"; |
@@ -3465,25 +3505,24 @@ static int s_show(struct seq_file *m, void *p) | |||
3465 | 3505 | ||
3466 | spin_unlock(&l3->list_lock); | 3506 | spin_unlock(&l3->list_lock); |
3467 | } | 3507 | } |
3468 | num_slabs+=active_slabs; | 3508 | num_slabs += active_slabs; |
3469 | num_objs = num_slabs*cachep->num; | 3509 | num_objs = num_slabs * cachep->num; |
3470 | if (num_objs - active_objs != free_objects && !error) | 3510 | if (num_objs - active_objs != free_objects && !error) |
3471 | error = "free_objects accounting error"; | 3511 | error = "free_objects accounting error"; |
3472 | 3512 | ||
3473 | name = cachep->name; | 3513 | name = cachep->name; |
3474 | if (error) | 3514 | if (error) |
3475 | printk(KERN_ERR "slab: cache %s error: %s\n", name, error); | 3515 | printk(KERN_ERR "slab: cache %s error: %s\n", name, error); |
3476 | 3516 | ||
3477 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", | 3517 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", |
3478 | name, active_objs, num_objs, cachep->objsize, | 3518 | name, active_objs, num_objs, cachep->objsize, |
3479 | cachep->num, (1<<cachep->gfporder)); | 3519 | cachep->num, (1 << cachep->gfporder)); |
3480 | seq_printf(m, " : tunables %4u %4u %4u", | 3520 | seq_printf(m, " : tunables %4u %4u %4u", |
3481 | cachep->limit, cachep->batchcount, | 3521 | cachep->limit, cachep->batchcount, cachep->shared); |
3482 | cachep->shared); | ||
3483 | seq_printf(m, " : slabdata %6lu %6lu %6lu", | 3522 | seq_printf(m, " : slabdata %6lu %6lu %6lu", |
3484 | active_slabs, num_slabs, shared_avail); | 3523 | active_slabs, num_slabs, shared_avail); |
3485 | #if STATS | 3524 | #if STATS |
3486 | { /* list3 stats */ | 3525 | { /* list3 stats */ |
3487 | unsigned long high = cachep->high_mark; | 3526 | unsigned long high = cachep->high_mark; |
3488 | unsigned long allocs = cachep->num_allocations; | 3527 | unsigned long allocs = cachep->num_allocations; |
3489 | unsigned long grown = cachep->grown; | 3528 | unsigned long grown = cachep->grown; |
@@ -3494,9 +3533,7 @@ static int s_show(struct seq_file *m, void *p) | |||
3494 | unsigned long node_frees = cachep->node_frees; | 3533 | unsigned long node_frees = cachep->node_frees; |
3495 | 3534 | ||
3496 | seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ | 3535 | seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ |
3497 | %4lu %4lu %4lu %4lu", | 3536 | %4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees); |
3498 | allocs, high, grown, reaped, errors, | ||
3499 | max_freeable, node_allocs, node_frees); | ||
3500 | } | 3537 | } |
3501 | /* cpu stats */ | 3538 | /* cpu stats */ |
3502 | { | 3539 | { |
@@ -3506,7 +3543,7 @@ static int s_show(struct seq_file *m, void *p) | |||
3506 | unsigned long freemiss = atomic_read(&cachep->freemiss); | 3543 | unsigned long freemiss = atomic_read(&cachep->freemiss); |
3507 | 3544 | ||
3508 | seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", | 3545 | seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", |
3509 | allochit, allocmiss, freehit, freemiss); | 3546 | allochit, allocmiss, freehit, freemiss); |
3510 | } | 3547 | } |
3511 | #endif | 3548 | #endif |
3512 | seq_putc(m, '\n'); | 3549 | seq_putc(m, '\n'); |
@@ -3529,10 +3566,10 @@ static int s_show(struct seq_file *m, void *p) | |||
3529 | */ | 3566 | */ |
3530 | 3567 | ||
3531 | struct seq_operations slabinfo_op = { | 3568 | struct seq_operations slabinfo_op = { |
3532 | .start = s_start, | 3569 | .start = s_start, |
3533 | .next = s_next, | 3570 | .next = s_next, |
3534 | .stop = s_stop, | 3571 | .stop = s_stop, |
3535 | .show = s_show, | 3572 | .show = s_show, |
3536 | }; | 3573 | }; |
3537 | 3574 | ||
3538 | #define MAX_SLABINFO_WRITE 128 | 3575 | #define MAX_SLABINFO_WRITE 128 |
@@ -3543,18 +3580,18 @@ struct seq_operations slabinfo_op = { | |||
3543 | * @count: data length | 3580 | * @count: data length |
3544 | * @ppos: unused | 3581 | * @ppos: unused |
3545 | */ | 3582 | */ |
3546 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, | 3583 | ssize_t slabinfo_write(struct file *file, const char __user * buffer, |
3547 | size_t count, loff_t *ppos) | 3584 | size_t count, loff_t *ppos) |
3548 | { | 3585 | { |
3549 | char kbuf[MAX_SLABINFO_WRITE+1], *tmp; | 3586 | char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; |
3550 | int limit, batchcount, shared, res; | 3587 | int limit, batchcount, shared, res; |
3551 | struct list_head *p; | 3588 | struct list_head *p; |
3552 | 3589 | ||
3553 | if (count > MAX_SLABINFO_WRITE) | 3590 | if (count > MAX_SLABINFO_WRITE) |
3554 | return -EINVAL; | 3591 | return -EINVAL; |
3555 | if (copy_from_user(&kbuf, buffer, count)) | 3592 | if (copy_from_user(&kbuf, buffer, count)) |
3556 | return -EFAULT; | 3593 | return -EFAULT; |
3557 | kbuf[MAX_SLABINFO_WRITE] = '\0'; | 3594 | kbuf[MAX_SLABINFO_WRITE] = '\0'; |
3558 | 3595 | ||
3559 | tmp = strchr(kbuf, ' '); | 3596 | tmp = strchr(kbuf, ' '); |
3560 | if (!tmp) | 3597 | if (!tmp) |
@@ -3567,18 +3604,17 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, | |||
3567 | /* Find the cache in the chain of caches. */ | 3604 | /* Find the cache in the chain of caches. */ |
3568 | down(&cache_chain_sem); | 3605 | down(&cache_chain_sem); |
3569 | res = -EINVAL; | 3606 | res = -EINVAL; |
3570 | list_for_each(p,&cache_chain) { | 3607 | list_for_each(p, &cache_chain) { |
3571 | kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next); | 3608 | kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next); |
3572 | 3609 | ||
3573 | if (!strcmp(cachep->name, kbuf)) { | 3610 | if (!strcmp(cachep->name, kbuf)) { |
3574 | if (limit < 1 || | 3611 | if (limit < 1 || |
3575 | batchcount < 1 || | 3612 | batchcount < 1 || |
3576 | batchcount > limit || | 3613 | batchcount > limit || shared < 0) { |
3577 | shared < 0) { | ||
3578 | res = 0; | 3614 | res = 0; |
3579 | } else { | 3615 | } else { |
3580 | res = do_tune_cpucache(cachep, limit, | 3616 | res = do_tune_cpucache(cachep, limit, |
3581 | batchcount, shared); | 3617 | batchcount, shared); |
3582 | } | 3618 | } |
3583 | break; | 3619 | break; |
3584 | } | 3620 | } |
@@ -3609,26 +3645,3 @@ unsigned int ksize(const void *objp) | |||
3609 | 3645 | ||
3610 | return obj_reallen(page_get_cache(virt_to_page(objp))); | 3646 | return obj_reallen(page_get_cache(virt_to_page(objp))); |
3611 | } | 3647 | } |
3612 | |||
3613 | |||
3614 | /* | ||
3615 | * kstrdup - allocate space for and copy an existing string | ||
3616 | * | ||
3617 | * @s: the string to duplicate | ||
3618 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | ||
3619 | */ | ||
3620 | char *kstrdup(const char *s, gfp_t gfp) | ||
3621 | { | ||
3622 | size_t len; | ||
3623 | char *buf; | ||
3624 | |||
3625 | if (!s) | ||
3626 | return NULL; | ||
3627 | |||
3628 | len = strlen(s) + 1; | ||
3629 | buf = kmalloc(len, gfp); | ||
3630 | if (buf) | ||
3631 | memcpy(buf, s, len); | ||
3632 | return buf; | ||
3633 | } | ||
3634 | EXPORT_SYMBOL(kstrdup); | ||