aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-06 17:49:47 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:54 -0400
commit70d71228af9360cc4a0198ecd6351a1b34fa6d01 (patch)
treead85cc79ad684062cbb3cf2e56fafa7d3df65ee1 /mm/slub.c
parent2086d26a05a4b5bda4a2f677bc143933bbdfa9f8 (diff)
slub: remove object activities out of checking functions
Make sure that the check function really only check things and do not perform activities. Extract the tracing and object seeding out of the two check functions and place them into slab_alloc and slab_free Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c108
1 files changed, 47 insertions, 61 deletions
diff --git a/mm/slub.c b/mm/slub.c
index ed2846240f96..3904002bdb35 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -459,7 +459,7 @@ static int check_valid_pointer(struct kmem_cache *s, struct page *page,
459static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 459static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
460 void *from, void *to) 460 void *from, void *to)
461{ 461{
462 printk(KERN_ERR "@@@ SLUB: %s Restoring %s (0x%x) from 0x%p-0x%p\n", 462 printk(KERN_ERR "@@@ SLUB %s: Restoring %s (0x%x) from 0x%p-0x%p\n",
463 s->name, message, data, from, to - 1); 463 s->name, message, data, from, to - 1);
464 memset(from, data, to - from); 464 memset(from, data, to - from);
465} 465}
@@ -506,9 +506,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
506 return 1; 506 return 1;
507 507
508 if (!check_bytes(p + length, POISON_INUSE, remainder)) { 508 if (!check_bytes(p + length, POISON_INUSE, remainder)) {
509 printk(KERN_ERR "SLUB: %s slab 0x%p: Padding fails check\n", 509 slab_err(s, page, "Padding check failed");
510 s->name, p);
511 dump_stack();
512 restore_bytes(s, "slab padding", POISON_INUSE, p + length, 510 restore_bytes(s, "slab padding", POISON_INUSE, p + length,
513 p + length + remainder); 511 p + length + remainder);
514 return 0; 512 return 0;
@@ -594,30 +592,25 @@ static int check_slab(struct kmem_cache *s, struct page *page)
594 VM_BUG_ON(!irqs_disabled()); 592 VM_BUG_ON(!irqs_disabled());
595 593
596 if (!PageSlab(page)) { 594 if (!PageSlab(page)) {
597 printk(KERN_ERR "SLUB: %s Not a valid slab page @0x%p " 595 slab_err(s, page, "Not a valid slab page flags=%lx "
598 "flags=%lx mapping=0x%p count=%d \n", 596 "mapping=0x%p count=%d", page->flags, page->mapping,
599 s->name, page, page->flags, page->mapping,
600 page_count(page)); 597 page_count(page));
601 return 0; 598 return 0;
602 } 599 }
603 if (page->offset * sizeof(void *) != s->offset) { 600 if (page->offset * sizeof(void *) != s->offset) {
604 printk(KERN_ERR "SLUB: %s Corrupted offset %lu in slab @0x%p" 601 slab_err(s, page, "Corrupted offset %lu flags=0x%lx "
605 " flags=0x%lx mapping=0x%p count=%d\n", 602 "mapping=0x%p count=%d",
606 s->name,
607 (unsigned long)(page->offset * sizeof(void *)), 603 (unsigned long)(page->offset * sizeof(void *)),
608 page,
609 page->flags, 604 page->flags,
610 page->mapping, 605 page->mapping,
611 page_count(page)); 606 page_count(page));
612 dump_stack();
613 return 0; 607 return 0;
614 } 608 }
615 if (page->inuse > s->objects) { 609 if (page->inuse > s->objects) {
616 printk(KERN_ERR "SLUB: %s Inuse %u > max %u in slab " 610 slab_err(s, page, "inuse %u > max %u @0x%p flags=%lx "
617 "page @0x%p flags=%lx mapping=0x%p count=%d\n", 611 "mapping=0x%p count=%d",
618 s->name, page->inuse, s->objects, page, page->flags, 612 s->name, page->inuse, s->objects, page->flags,
619 page->mapping, page_count(page)); 613 page->mapping, page_count(page));
620 dump_stack();
621 return 0; 614 return 0;
622 } 615 }
623 /* Slab_pad_check fixes things up after itself */ 616 /* Slab_pad_check fixes things up after itself */
@@ -646,12 +639,13 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
646 set_freepointer(s, object, NULL); 639 set_freepointer(s, object, NULL);
647 break; 640 break;
648 } else { 641 } else {
649 printk(KERN_ERR "SLUB: %s slab 0x%p " 642 slab_err(s, page, "Freepointer 0x%p corrupt",
650 "freepointer 0x%p corrupted.\n", 643 fp);
651 s->name, page, fp);
652 dump_stack();
653 page->freelist = NULL; 644 page->freelist = NULL;
654 page->inuse = s->objects; 645 page->inuse = s->objects;
646 printk(KERN_ERR "@@@ SLUB %s: Freelist "
647 "cleared. Slab 0x%p\n",
648 s->name, page);
655 return 0; 649 return 0;
656 } 650 }
657 break; 651 break;
@@ -662,11 +656,12 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
662 } 656 }
663 657
664 if (page->inuse != s->objects - nr) { 658 if (page->inuse != s->objects - nr) {
665 printk(KERN_ERR "slab %s: page 0x%p wrong object count." 659 slab_err(s, page, "Wrong object count. Counter is %d but "
666 " counter is %d but counted were %d\n", 660 "counted were %d", s, page, page->inuse,
667 s->name, page, page->inuse, 661 s->objects - nr);
668 s->objects - nr);
669 page->inuse = s->objects - nr; 662 page->inuse = s->objects - nr;
663 printk(KERN_ERR "@@@ SLUB %s: Object count adjusted. "
664 "Slab @0x%p\n", s->name, page);
670 } 665 }
671 return search == NULL; 666 return search == NULL;
672} 667}
@@ -702,15 +697,13 @@ static int alloc_object_checks(struct kmem_cache *s, struct page *page,
702 goto bad; 697 goto bad;
703 698
704 if (object && !on_freelist(s, page, object)) { 699 if (object && !on_freelist(s, page, object)) {
705 printk(KERN_ERR "SLUB: %s Object 0x%p@0x%p " 700 slab_err(s, page, "Object 0x%p already allocated", object);
706 "already allocated.\n", 701 goto bad;
707 s->name, object, page);
708 goto dump;
709 } 702 }
710 703
711 if (!check_valid_pointer(s, page, object)) { 704 if (!check_valid_pointer(s, page, object)) {
712 object_err(s, page, object, "Freelist Pointer check fails"); 705 object_err(s, page, object, "Freelist Pointer check fails");
713 goto dump; 706 goto bad;
714 } 707 }
715 708
716 if (!object) 709 if (!object)
@@ -718,17 +711,8 @@ static int alloc_object_checks(struct kmem_cache *s, struct page *page,
718 711
719 if (!check_object(s, page, object, 0)) 712 if (!check_object(s, page, object, 0))
720 goto bad; 713 goto bad;
721 init_object(s, object, 1);
722 714
723 if (s->flags & SLAB_TRACE) {
724 printk(KERN_INFO "TRACE %s alloc 0x%p inuse=%d fp=0x%p\n",
725 s->name, object, page->inuse,
726 page->freelist);
727 dump_stack();
728 }
729 return 1; 715 return 1;
730dump:
731 dump_stack();
732bad: 716bad:
733 if (PageSlab(page)) { 717 if (PageSlab(page)) {
734 /* 718 /*
@@ -753,15 +737,12 @@ static int free_object_checks(struct kmem_cache *s, struct page *page,
753 goto fail; 737 goto fail;
754 738
755 if (!check_valid_pointer(s, page, object)) { 739 if (!check_valid_pointer(s, page, object)) {
756 printk(KERN_ERR "SLUB: %s slab 0x%p invalid " 740 slab_err(s, page, "Invalid object pointer 0x%p", object);
757 "object pointer 0x%p\n",
758 s->name, page, object);
759 goto fail; 741 goto fail;
760 } 742 }
761 743
762 if (on_freelist(s, page, object)) { 744 if (on_freelist(s, page, object)) {
763 printk(KERN_ERR "SLUB: %s slab 0x%p object " 745 slab_err(s, page, "Object 0x%p already free", object);
764 "0x%p already free.\n", s->name, page, object);
765 goto fail; 746 goto fail;
766 } 747 }
767 748
@@ -770,32 +751,22 @@ static int free_object_checks(struct kmem_cache *s, struct page *page,
770 751
771 if (unlikely(s != page->slab)) { 752 if (unlikely(s != page->slab)) {
772 if (!PageSlab(page)) 753 if (!PageSlab(page))
773 printk(KERN_ERR "slab_free %s size %d: attempt to" 754 slab_err(s, page, "Attempt to free object(0x%p) "
774 "free object(0x%p) outside of slab.\n", 755 "outside of slab", object);
775 s->name, s->size, object);
776 else 756 else
777 if (!page->slab) 757 if (!page->slab) {
778 printk(KERN_ERR 758 printk(KERN_ERR
779 "slab_free : no slab(NULL) for object 0x%p.\n", 759 "SLUB <none>: no slab for object 0x%p.\n",
780 object); 760 object);
761 dump_stack();
762 }
781 else 763 else
782 printk(KERN_ERR "slab_free %s(%d): object at 0x%p" 764 slab_err(s, page, "object at 0x%p belongs "
783 " belongs to slab %s(%d)\n", 765 "to slab %s", object, page->slab->name);
784 s->name, s->size, object,
785 page->slab->name, page->slab->size);
786 goto fail; 766 goto fail;
787 } 767 }
788 if (s->flags & SLAB_TRACE) {
789 printk(KERN_INFO "TRACE %s free 0x%p inuse=%d fp=0x%p\n",
790 s->name, object, page->inuse,
791 page->freelist);
792 print_section("Object", object, s->objsize);
793 dump_stack();
794 }
795 init_object(s, object, 0);
796 return 1; 768 return 1;
797fail: 769fail:
798 dump_stack();
799 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", 770 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n",
800 s->name, page, object); 771 s->name, page, object);
801 return 0; 772 return 0;
@@ -1294,6 +1265,13 @@ debug:
1294 goto another_slab; 1265 goto another_slab;
1295 if (s->flags & SLAB_STORE_USER) 1266 if (s->flags & SLAB_STORE_USER)
1296 set_track(s, object, TRACK_ALLOC, addr); 1267 set_track(s, object, TRACK_ALLOC, addr);
1268 if (s->flags & SLAB_TRACE) {
1269 printk(KERN_INFO "TRACE %s alloc 0x%p inuse=%d fp=0x%p\n",
1270 s->name, object, page->inuse,
1271 page->freelist);
1272 dump_stack();
1273 }
1274 init_object(s, object, 1);
1297 goto have_object; 1275 goto have_object;
1298} 1276}
1299 1277
@@ -1376,6 +1354,14 @@ debug:
1376 remove_full(s, page); 1354 remove_full(s, page);
1377 if (s->flags & SLAB_STORE_USER) 1355 if (s->flags & SLAB_STORE_USER)
1378 set_track(s, x, TRACK_FREE, addr); 1356 set_track(s, x, TRACK_FREE, addr);
1357 if (s->flags & SLAB_TRACE) {
1358 printk(KERN_INFO "TRACE %s free 0x%p inuse=%d fp=0x%p\n",
1359 s->name, object, page->inuse,
1360 page->freelist);
1361 print_section("Object", (void *)object, s->objsize);
1362 dump_stack();
1363 }
1364 init_object(s, object, 0);
1379 goto checks_ok; 1365 goto checks_ok;
1380} 1366}
1381 1367