aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-07-17 07:03:19 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 13:23:01 -0400
commit5b95a4acf157eee552e013795b54eaa2ab1ee4a1 (patch)
tree8222033f1db996fd6ce28ce3685edc8b17793b0b
parent2492268472e7d326a6fe10f92f9211c4578f2482 (diff)
SLUB: use list_for_each_entry for loops over all slabs
Use list_for_each_entry() instead of list_for_each(). Get rid of for_all_slabs(). It had only one user. So fold it into the callback. This also gets rid of cpu_slab_flush. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slub.c51
1 files changed, 13 insertions, 38 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2b9e656f1cb3..3c9e98f0999a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2573,7 +2573,7 @@ static struct kmem_cache *find_mergeable(size_t size,
2573 size_t align, unsigned long flags, 2573 size_t align, unsigned long flags,
2574 void (*ctor)(void *, struct kmem_cache *, unsigned long)) 2574 void (*ctor)(void *, struct kmem_cache *, unsigned long))
2575{ 2575{
2576 struct list_head *h; 2576 struct kmem_cache *s;
2577 2577
2578 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 2578 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
2579 return NULL; 2579 return NULL;
@@ -2585,10 +2585,7 @@ static struct kmem_cache *find_mergeable(size_t size,
2585 align = calculate_alignment(flags, align, size); 2585 align = calculate_alignment(flags, align, size);
2586 size = ALIGN(size, align); 2586 size = ALIGN(size, align);
2587 2587
2588 list_for_each(h, &slab_caches) { 2588 list_for_each_entry(s, &slab_caches, list) {
2589 struct kmem_cache *s =
2590 container_of(h, struct kmem_cache, list);
2591
2592 if (slab_unmergeable(s)) 2589 if (slab_unmergeable(s))
2593 continue; 2590 continue;
2594 2591
@@ -2670,33 +2667,6 @@ void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
2670EXPORT_SYMBOL(kmem_cache_zalloc); 2667EXPORT_SYMBOL(kmem_cache_zalloc);
2671 2668
2672#ifdef CONFIG_SMP 2669#ifdef CONFIG_SMP
2673static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu)
2674{
2675 struct list_head *h;
2676
2677 down_read(&slub_lock);
2678 list_for_each(h, &slab_caches) {
2679 struct kmem_cache *s =
2680 container_of(h, struct kmem_cache, list);
2681
2682 func(s, cpu);
2683 }
2684 up_read(&slub_lock);
2685}
2686
2687/*
2688 * Version of __flush_cpu_slab for the case that interrupts
2689 * are enabled.
2690 */
2691static void cpu_slab_flush(struct kmem_cache *s, int cpu)
2692{
2693 unsigned long flags;
2694
2695 local_irq_save(flags);
2696 __flush_cpu_slab(s, cpu);
2697 local_irq_restore(flags);
2698}
2699
2700/* 2670/*
2701 * Use the cpu notifier to insure that the cpu slabs are flushed when 2671 * Use the cpu notifier to insure that the cpu slabs are flushed when
2702 * necessary. 2672 * necessary.
@@ -2705,13 +2675,21 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
2705 unsigned long action, void *hcpu) 2675 unsigned long action, void *hcpu)
2706{ 2676{
2707 long cpu = (long)hcpu; 2677 long cpu = (long)hcpu;
2678 struct kmem_cache *s;
2679 unsigned long flags;
2708 2680
2709 switch (action) { 2681 switch (action) {
2710 case CPU_UP_CANCELED: 2682 case CPU_UP_CANCELED:
2711 case CPU_UP_CANCELED_FROZEN: 2683 case CPU_UP_CANCELED_FROZEN:
2712 case CPU_DEAD: 2684 case CPU_DEAD:
2713 case CPU_DEAD_FROZEN: 2685 case CPU_DEAD_FROZEN:
2714 for_all_slabs(cpu_slab_flush, cpu); 2686 down_read(&slub_lock);
2687 list_for_each_entry(s, &slab_caches, list) {
2688 local_irq_save(flags);
2689 __flush_cpu_slab(s, cpu);
2690 local_irq_restore(flags);
2691 }
2692 up_read(&slub_lock);
2715 break; 2693 break;
2716 default: 2694 default:
2717 break; 2695 break;
@@ -3736,7 +3714,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
3736 3714
3737static int __init slab_sysfs_init(void) 3715static int __init slab_sysfs_init(void)
3738{ 3716{
3739 struct list_head *h; 3717 struct kmem_cache *s;
3740 int err; 3718 int err;
3741 3719
3742 err = subsystem_register(&slab_subsys); 3720 err = subsystem_register(&slab_subsys);
@@ -3747,10 +3725,7 @@ static int __init slab_sysfs_init(void)
3747 3725
3748 slab_state = SYSFS; 3726 slab_state = SYSFS;
3749 3727
3750 list_for_each(h, &slab_caches) { 3728 list_for_each_entry(s, &slab_caches, list) {
3751 struct kmem_cache *s =
3752 container_of(h, struct kmem_cache, list);
3753
3754 err = sysfs_slab_add(s); 3729 err = sysfs_slab_add(s);
3755 BUG_ON(err); 3730 BUG_ON(err);
3756 } 3731 }