diff options
author | Christoph Hellwig <hch@lst.de> | 2006-06-23 05:03:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 10:42:48 -0400 |
commit | 7a7c381d25067b9a2bfe025dfcb16459daec0373 (patch) | |
tree | 4c6cbd121a74e55e6147f0ac0541981d271b8371 /mm/slab.c | |
parent | e1b6aa6f1404f162697650df2cdb6c374b1d6a5b (diff) |
[PATCH] slab: stop using list_for_each
Use the _entry variant everywhere to clean the code up a tiny bit.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 38 |
1 files changed, 11 insertions, 27 deletions
@@ -1950,8 +1950,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
1950 | void (*dtor)(void*, struct kmem_cache *, unsigned long)) | 1950 | void (*dtor)(void*, struct kmem_cache *, unsigned long)) |
1951 | { | 1951 | { |
1952 | size_t left_over, slab_size, ralign; | 1952 | size_t left_over, slab_size, ralign; |
1953 | struct kmem_cache *cachep = NULL; | 1953 | struct kmem_cache *cachep = NULL, *pc; |
1954 | struct list_head *p; | ||
1955 | 1954 | ||
1956 | /* | 1955 | /* |
1957 | * Sanity checks... these are all serious usage bugs. | 1956 | * Sanity checks... these are all serious usage bugs. |
@@ -1971,8 +1970,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
1971 | 1970 | ||
1972 | mutex_lock(&cache_chain_mutex); | 1971 | mutex_lock(&cache_chain_mutex); |
1973 | 1972 | ||
1974 | list_for_each(p, &cache_chain) { | 1973 | list_for_each_entry(pc, &cache_chain, next) { |
1975 | struct kmem_cache *pc = list_entry(p, struct kmem_cache, next); | ||
1976 | mm_segment_t old_fs = get_fs(); | 1974 | mm_segment_t old_fs = get_fs(); |
1977 | char tmp; | 1975 | char tmp; |
1978 | int res; | 1976 | int res; |
@@ -3690,7 +3688,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | |||
3690 | */ | 3688 | */ |
3691 | static void cache_reap(void *unused) | 3689 | static void cache_reap(void *unused) |
3692 | { | 3690 | { |
3693 | struct list_head *walk; | 3691 | struct kmem_cache *searchp; |
3694 | struct kmem_list3 *l3; | 3692 | struct kmem_list3 *l3; |
3695 | int node = numa_node_id(); | 3693 | int node = numa_node_id(); |
3696 | 3694 | ||
@@ -3701,13 +3699,11 @@ static void cache_reap(void *unused) | |||
3701 | return; | 3699 | return; |
3702 | } | 3700 | } |
3703 | 3701 | ||
3704 | list_for_each(walk, &cache_chain) { | 3702 | list_for_each_entry(searchp, &cache_chain, next) { |
3705 | struct kmem_cache *searchp; | ||
3706 | struct list_head *p; | 3703 | struct list_head *p; |
3707 | int tofree; | 3704 | int tofree; |
3708 | struct slab *slabp; | 3705 | struct slab *slabp; |
3709 | 3706 | ||
3710 | searchp = list_entry(walk, struct kmem_cache, next); | ||
3711 | check_irq_on(); | 3707 | check_irq_on(); |
3712 | 3708 | ||
3713 | /* | 3709 | /* |
@@ -3835,7 +3831,6 @@ static void s_stop(struct seq_file *m, void *p) | |||
3835 | static int s_show(struct seq_file *m, void *p) | 3831 | static int s_show(struct seq_file *m, void *p) |
3836 | { | 3832 | { |
3837 | struct kmem_cache *cachep = p; | 3833 | struct kmem_cache *cachep = p; |
3838 | struct list_head *q; | ||
3839 | struct slab *slabp; | 3834 | struct slab *slabp; |
3840 | unsigned long active_objs; | 3835 | unsigned long active_objs; |
3841 | unsigned long num_objs; | 3836 | unsigned long num_objs; |
@@ -3856,15 +3851,13 @@ static int s_show(struct seq_file *m, void *p) | |||
3856 | check_irq_on(); | 3851 | check_irq_on(); |
3857 | spin_lock_irq(&l3->list_lock); | 3852 | spin_lock_irq(&l3->list_lock); |
3858 | 3853 | ||
3859 | list_for_each(q, &l3->slabs_full) { | 3854 | list_for_each_entry(slabp, &l3->slabs_full, list) { |
3860 | slabp = list_entry(q, struct slab, list); | ||
3861 | if (slabp->inuse != cachep->num && !error) | 3855 | if (slabp->inuse != cachep->num && !error) |
3862 | error = "slabs_full accounting error"; | 3856 | error = "slabs_full accounting error"; |
3863 | active_objs += cachep->num; | 3857 | active_objs += cachep->num; |
3864 | active_slabs++; | 3858 | active_slabs++; |
3865 | } | 3859 | } |
3866 | list_for_each(q, &l3->slabs_partial) { | 3860 | list_for_each_entry(slabp, &l3->slabs_partial, list) { |
3867 | slabp = list_entry(q, struct slab, list); | ||
3868 | if (slabp->inuse == cachep->num && !error) | 3861 | if (slabp->inuse == cachep->num && !error) |
3869 | error = "slabs_partial inuse accounting error"; | 3862 | error = "slabs_partial inuse accounting error"; |
3870 | if (!slabp->inuse && !error) | 3863 | if (!slabp->inuse && !error) |
@@ -3872,8 +3865,7 @@ static int s_show(struct seq_file *m, void *p) | |||
3872 | active_objs += slabp->inuse; | 3865 | active_objs += slabp->inuse; |
3873 | active_slabs++; | 3866 | active_slabs++; |
3874 | } | 3867 | } |
3875 | list_for_each(q, &l3->slabs_free) { | 3868 | list_for_each_entry(slabp, &l3->slabs_free, list) { |
3876 | slabp = list_entry(q, struct slab, list); | ||
3877 | if (slabp->inuse && !error) | 3869 | if (slabp->inuse && !error) |
3878 | error = "slabs_free/inuse accounting error"; | 3870 | error = "slabs_free/inuse accounting error"; |
3879 | num_slabs++; | 3871 | num_slabs++; |
@@ -3966,7 +3958,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, | |||
3966 | { | 3958 | { |
3967 | char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; | 3959 | char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; |
3968 | int limit, batchcount, shared, res; | 3960 | int limit, batchcount, shared, res; |
3969 | struct list_head *p; | 3961 | struct kmem_cache *cachep; |
3970 | 3962 | ||
3971 | if (count > MAX_SLABINFO_WRITE) | 3963 | if (count > MAX_SLABINFO_WRITE) |
3972 | return -EINVAL; | 3964 | return -EINVAL; |
@@ -3985,10 +3977,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, | |||
3985 | /* Find the cache in the chain of caches. */ | 3977 | /* Find the cache in the chain of caches. */ |
3986 | mutex_lock(&cache_chain_mutex); | 3978 | mutex_lock(&cache_chain_mutex); |
3987 | res = -EINVAL; | 3979 | res = -EINVAL; |
3988 | list_for_each(p, &cache_chain) { | 3980 | list_for_each_entry(cachep, &cache_chain, next) { |
3989 | struct kmem_cache *cachep; | ||
3990 | |||
3991 | cachep = list_entry(p, struct kmem_cache, next); | ||
3992 | if (!strcmp(cachep->name, kbuf)) { | 3981 | if (!strcmp(cachep->name, kbuf)) { |
3993 | if (limit < 1 || batchcount < 1 || | 3982 | if (limit < 1 || batchcount < 1 || |
3994 | batchcount > limit || shared < 0) { | 3983 | batchcount > limit || shared < 0) { |
@@ -4090,7 +4079,6 @@ static void show_symbol(struct seq_file *m, unsigned long address) | |||
4090 | static int leaks_show(struct seq_file *m, void *p) | 4079 | static int leaks_show(struct seq_file *m, void *p) |
4091 | { | 4080 | { |
4092 | struct kmem_cache *cachep = p; | 4081 | struct kmem_cache *cachep = p; |
4093 | struct list_head *q; | ||
4094 | struct slab *slabp; | 4082 | struct slab *slabp; |
4095 | struct kmem_list3 *l3; | 4083 | struct kmem_list3 *l3; |
4096 | const char *name; | 4084 | const char *name; |
@@ -4115,14 +4103,10 @@ static int leaks_show(struct seq_file *m, void *p) | |||
4115 | check_irq_on(); | 4103 | check_irq_on(); |
4116 | spin_lock_irq(&l3->list_lock); | 4104 | spin_lock_irq(&l3->list_lock); |
4117 | 4105 | ||
4118 | list_for_each(q, &l3->slabs_full) { | 4106 | list_for_each_entry(slabp, &l3->slabs_full, list) |
4119 | slabp = list_entry(q, struct slab, list); | ||
4120 | handle_slab(n, cachep, slabp); | 4107 | handle_slab(n, cachep, slabp); |
4121 | } | 4108 | list_for_each_entry(slabp, &l3->slabs_partial, list) |
4122 | list_for_each(q, &l3->slabs_partial) { | ||
4123 | slabp = list_entry(q, struct slab, list); | ||
4124 | handle_slab(n, cachep, slabp); | 4109 | handle_slab(n, cachep, slabp); |
4125 | } | ||
4126 | spin_unlock_irq(&l3->list_lock); | 4110 | spin_unlock_irq(&l3->list_lock); |
4127 | } | 4111 | } |
4128 | name = cachep->name; | 4112 | name = cachep->name; |