aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-04-14 12:11:31 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2008-04-27 11:28:17 -0400
commit224a88be40c45c0da5bdc45a8118004a37c60e8a (patch)
tree9489ff039b8124a25af0e7f0f12c25c5911c6e1e /mm/slub.c
parent39b264641a0c3b5e0e742e2046b49e92d1f3be88 (diff)
slub: for_each_object must be passed the number of objects in a slab
Pass the number of objects to the for_each_object macro. Most of these are debug related. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 6641025c597f..67f7d6068934 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -327,8 +327,8 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
327} 327}
328 328
329/* Loop over all objects in a slab */ 329/* Loop over all objects in a slab */
330#define for_each_object(__p, __s, __addr) \ 330#define for_each_object(__p, __s, __addr, __objects) \
331 for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\ 331 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
332 __p += (__s)->size) 332 __p += (__s)->size)
333 333
334/* Scan freelist */ 334/* Scan freelist */
@@ -774,6 +774,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
774 int nr = 0; 774 int nr = 0;
775 void *fp = page->freelist; 775 void *fp = page->freelist;
776 void *object = NULL; 776 void *object = NULL;
777 unsigned long max_objects;
777 778
778 while (fp && nr <= page->objects) { 779 while (fp && nr <= page->objects) {
779 if (fp == search) 780 if (fp == search)
@@ -798,6 +799,16 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
798 nr++; 799 nr++;
799 } 800 }
800 801
802 max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
803 if (max_objects > 65535)
804 max_objects = 65535;
805
806 if (page->objects != max_objects) {
807 slab_err(s, page, "Wrong number of objects. Found %d but "
808 "should be %d", page->objects, max_objects);
809 page->objects = max_objects;
810 slab_fix(s, "Number of objects adjusted.");
811 }
801 if (page->inuse != page->objects - nr) { 812 if (page->inuse != page->objects - nr) {
802 slab_err(s, page, "Wrong object count. Counter is %d but " 813 slab_err(s, page, "Wrong object count. Counter is %d but "
803 "counted were %d", page->inuse, page->objects - nr); 814 "counted were %d", page->inuse, page->objects - nr);
@@ -1135,7 +1146,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1135 memset(start, POISON_INUSE, PAGE_SIZE << s->order); 1146 memset(start, POISON_INUSE, PAGE_SIZE << s->order);
1136 1147
1137 last = start; 1148 last = start;
1138 for_each_object(p, s, start) { 1149 for_each_object(p, s, start, page->objects) {
1139 setup_object(s, page, last); 1150 setup_object(s, page, last);
1140 set_freepointer(s, last, p); 1151 set_freepointer(s, last, p);
1141 last = p; 1152 last = p;
@@ -1157,7 +1168,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1157 void *p; 1168 void *p;
1158 1169
1159 slab_pad_check(s, page); 1170 slab_pad_check(s, page);
1160 for_each_object(p, s, page_address(page)) 1171 for_each_object(p, s, page_address(page),
1172 page->objects)
1161 check_object(s, page, p, 0); 1173 check_object(s, page, p, 0);
1162 ClearSlabDebug(page); 1174 ClearSlabDebug(page);
1163 } 1175 }
@@ -3273,7 +3285,7 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
3273 return 0; 3285 return 0;
3274 } 3286 }
3275 3287
3276 for_each_object(p, s, addr) 3288 for_each_object(p, s, addr, page->objects)
3277 if (!test_bit(slab_index(p, s, addr), map)) 3289 if (!test_bit(slab_index(p, s, addr), map))
3278 if (!check_object(s, page, p, 1)) 3290 if (!check_object(s, page, p, 1))
3279 return 0; 3291 return 0;
@@ -3549,7 +3561,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
3549 for_each_free_object(p, s, page->freelist) 3561 for_each_free_object(p, s, page->freelist)
3550 set_bit(slab_index(p, s, addr), map); 3562 set_bit(slab_index(p, s, addr), map);
3551 3563
3552 for_each_object(p, s, addr) 3564 for_each_object(p, s, addr, page->objects)
3553 if (!test_bit(slab_index(p, s, addr), map)) 3565 if (!test_bit(slab_index(p, s, addr), map))
3554 add_location(t, s, get_track(s, p, alloc)); 3566 add_location(t, s, get_track(s, p, alloc));
3555} 3567}