aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c277
1 files changed, 154 insertions, 123 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 6aea48942c29..2b9e656f1cb3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -344,7 +344,7 @@ static void print_section(char *text, u8 *addr, unsigned int length)
344 344
345 for (i = 0; i < length; i++) { 345 for (i = 0; i < length; i++) {
346 if (newline) { 346 if (newline) {
347 printk(KERN_ERR "%10s 0x%p: ", text, addr + i); 347 printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
348 newline = 0; 348 newline = 0;
349 } 349 }
350 printk(" %02x", addr[i]); 350 printk(" %02x", addr[i]);
@@ -401,10 +401,11 @@ static void set_track(struct kmem_cache *s, void *object,
401 401
402static void init_tracking(struct kmem_cache *s, void *object) 402static void init_tracking(struct kmem_cache *s, void *object)
403{ 403{
404 if (s->flags & SLAB_STORE_USER) { 404 if (!(s->flags & SLAB_STORE_USER))
405 set_track(s, object, TRACK_FREE, NULL); 405 return;
406 set_track(s, object, TRACK_ALLOC, NULL); 406
407 } 407 set_track(s, object, TRACK_FREE, NULL);
408 set_track(s, object, TRACK_ALLOC, NULL);
408} 409}
409 410
410static void print_track(const char *s, struct track *t) 411static void print_track(const char *s, struct track *t)
@@ -412,65 +413,106 @@ static void print_track(const char *s, struct track *t)
412 if (!t->addr) 413 if (!t->addr)
413 return; 414 return;
414 415
415 printk(KERN_ERR "%s: ", s); 416 printk(KERN_ERR "INFO: %s in ", s);
416 __print_symbol("%s", (unsigned long)t->addr); 417 __print_symbol("%s", (unsigned long)t->addr);
417 printk(" jiffies_ago=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid); 418 printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
419}
420
421static void print_tracking(struct kmem_cache *s, void *object)
422{
423 if (!(s->flags & SLAB_STORE_USER))
424 return;
425
426 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
427 print_track("Freed", get_track(s, object, TRACK_FREE));
418} 428}
419 429
420static void print_trailer(struct kmem_cache *s, u8 *p) 430static void print_page_info(struct page *page)
431{
432 printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
433 page, page->inuse, page->freelist, page->flags);
434
435}
436
437static void slab_bug(struct kmem_cache *s, char *fmt, ...)
438{
439 va_list args;
440 char buf[100];
441
442 va_start(args, fmt);
443 vsnprintf(buf, sizeof(buf), fmt, args);
444 va_end(args);
445 printk(KERN_ERR "========================================"
446 "=====================================\n");
447 printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
448 printk(KERN_ERR "----------------------------------------"
449 "-------------------------------------\n\n");
450}
451
452static void slab_fix(struct kmem_cache *s, char *fmt, ...)
453{
454 va_list args;
455 char buf[100];
456
457 va_start(args, fmt);
458 vsnprintf(buf, sizeof(buf), fmt, args);
459 va_end(args);
460 printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
461}
462
463static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
421{ 464{
422 unsigned int off; /* Offset of last byte */ 465 unsigned int off; /* Offset of last byte */
466 u8 *addr = page_address(page);
467
468 print_tracking(s, p);
469
470 print_page_info(page);
471
472 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
473 p, p - addr, get_freepointer(s, p));
474
475 if (p > addr + 16)
476 print_section("Bytes b4", p - 16, 16);
477
478 print_section("Object", p, min(s->objsize, 128));
423 479
424 if (s->flags & SLAB_RED_ZONE) 480 if (s->flags & SLAB_RED_ZONE)
425 print_section("Redzone", p + s->objsize, 481 print_section("Redzone", p + s->objsize,
426 s->inuse - s->objsize); 482 s->inuse - s->objsize);
427 483
428 printk(KERN_ERR "FreePointer 0x%p -> 0x%p\n",
429 p + s->offset,
430 get_freepointer(s, p));
431
432 if (s->offset) 484 if (s->offset)
433 off = s->offset + sizeof(void *); 485 off = s->offset + sizeof(void *);
434 else 486 else
435 off = s->inuse; 487 off = s->inuse;
436 488
437 if (s->flags & SLAB_STORE_USER) { 489 if (s->flags & SLAB_STORE_USER)
438 print_track("Last alloc", get_track(s, p, TRACK_ALLOC));
439 print_track("Last free ", get_track(s, p, TRACK_FREE));
440 off += 2 * sizeof(struct track); 490 off += 2 * sizeof(struct track);
441 }
442 491
443 if (off != s->size) 492 if (off != s->size)
444 /* Beginning of the filler is the free pointer */ 493 /* Beginning of the filler is the free pointer */
445 print_section("Filler", p + off, s->size - off); 494 print_section("Padding", p + off, s->size - off);
495
496 dump_stack();
446} 497}
447 498
448static void object_err(struct kmem_cache *s, struct page *page, 499static void object_err(struct kmem_cache *s, struct page *page,
449 u8 *object, char *reason) 500 u8 *object, char *reason)
450{ 501{
451 u8 *addr = page_address(page); 502 slab_bug(s, reason);
452 503 print_trailer(s, page, object);
453 printk(KERN_ERR "*** SLUB %s: %s@0x%p slab 0x%p\n",
454 s->name, reason, object, page);
455 printk(KERN_ERR " offset=%tu flags=0x%04lx inuse=%u freelist=0x%p\n",
456 object - addr, page->flags, page->inuse, page->freelist);
457 if (object > addr + 16)
458 print_section("Bytes b4", object - 16, 16);
459 print_section("Object", object, min(s->objsize, 128));
460 print_trailer(s, object);
461 dump_stack();
462} 504}
463 505
464static void slab_err(struct kmem_cache *s, struct page *page, char *reason, ...) 506static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
465{ 507{
466 va_list args; 508 va_list args;
467 char buf[100]; 509 char buf[100];
468 510
469 va_start(args, reason); 511 va_start(args, fmt);
470 vsnprintf(buf, sizeof(buf), reason, args); 512 vsnprintf(buf, sizeof(buf), fmt, args);
471 va_end(args); 513 va_end(args);
472 printk(KERN_ERR "*** SLUB %s: %s in slab @0x%p\n", s->name, buf, 514 slab_bug(s, fmt);
473 page); 515 print_page_info(page);
474 dump_stack(); 516 dump_stack();
475} 517}
476 518
@@ -489,15 +531,46 @@ static void init_object(struct kmem_cache *s, void *object, int active)
489 s->inuse - s->objsize); 531 s->inuse - s->objsize);
490} 532}
491 533
492static int check_bytes(u8 *start, unsigned int value, unsigned int bytes) 534static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
493{ 535{
494 while (bytes) { 536 while (bytes) {
495 if (*start != (u8)value) 537 if (*start != (u8)value)
496 return 0; 538 return start;
497 start++; 539 start++;
498 bytes--; 540 bytes--;
499 } 541 }
500 return 1; 542 return NULL;
543}
544
545static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
546 void *from, void *to)
547{
548 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
549 memset(from, data, to - from);
550}
551
552static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
553 u8 *object, char *what,
554 u8* start, unsigned int value, unsigned int bytes)
555{
556 u8 *fault;
557 u8 *end;
558
559 fault = check_bytes(start, value, bytes);
560 if (!fault)
561 return 1;
562
563 end = start + bytes;
564 while (end > fault && end[-1] == value)
565 end--;
566
567 slab_bug(s, "%s overwritten", what);
568 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
569 fault, end - 1, fault[0], value);
570 print_trailer(s, page, object);
571
572 restore_bytes(s, what, value, fault, end);
573 return 0;
501} 574}
502 575
503/* 576/*
@@ -538,14 +611,6 @@ static int check_bytes(u8 *start, unsigned int value, unsigned int bytes)
538 * may be used with merged slabcaches. 611 * may be used with merged slabcaches.
539 */ 612 */
540 613
541static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
542 void *from, void *to)
543{
544 printk(KERN_ERR "@@@ SLUB %s: Restoring %s (0x%x) from 0x%p-0x%p\n",
545 s->name, message, data, from, to - 1);
546 memset(from, data, to - from);
547}
548
549static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 614static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
550{ 615{
551 unsigned long off = s->inuse; /* The end of info */ 616 unsigned long off = s->inuse; /* The end of info */
@@ -561,39 +626,39 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
561 if (s->size == off) 626 if (s->size == off)
562 return 1; 627 return 1;
563 628
564 if (check_bytes(p + off, POISON_INUSE, s->size - off)) 629 return check_bytes_and_report(s, page, p, "Object padding",
565 return 1; 630 p + off, POISON_INUSE, s->size - off);
566
567 object_err(s, page, p, "Object padding check fails");
568
569 /*
570 * Restore padding
571 */
572 restore_bytes(s, "object padding", POISON_INUSE, p + off, p + s->size);
573 return 0;
574} 631}
575 632
576static int slab_pad_check(struct kmem_cache *s, struct page *page) 633static int slab_pad_check(struct kmem_cache *s, struct page *page)
577{ 634{
578 u8 *p; 635 u8 *start;
579 int length, remainder; 636 u8 *fault;
637 u8 *end;
638 int length;
639 int remainder;
580 640
581 if (!(s->flags & SLAB_POISON)) 641 if (!(s->flags & SLAB_POISON))
582 return 1; 642 return 1;
583 643
584 p = page_address(page); 644 start = page_address(page);
645 end = start + (PAGE_SIZE << s->order);
585 length = s->objects * s->size; 646 length = s->objects * s->size;
586 remainder = (PAGE_SIZE << s->order) - length; 647 remainder = end - (start + length);
587 if (!remainder) 648 if (!remainder)
588 return 1; 649 return 1;
589 650
590 if (!check_bytes(p + length, POISON_INUSE, remainder)) { 651 fault = check_bytes(start + length, POISON_INUSE, remainder);
591 slab_err(s, page, "Padding check failed"); 652 if (!fault)
592 restore_bytes(s, "slab padding", POISON_INUSE, p + length, 653 return 1;
593 p + length + remainder); 654 while (end > fault && end[-1] == POISON_INUSE)
594 return 0; 655 end--;
595 } 656
596 return 1; 657 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
658 print_section("Padding", start, length);
659
660 restore_bytes(s, "slab padding", POISON_INUSE, start, end);
661 return 0;
597} 662}
598 663
599static int check_object(struct kmem_cache *s, struct page *page, 664static int check_object(struct kmem_cache *s, struct page *page,
@@ -606,41 +671,22 @@ static int check_object(struct kmem_cache *s, struct page *page,
606 unsigned int red = 671 unsigned int red =
607 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE; 672 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
608 673
609 if (!check_bytes(endobject, red, s->inuse - s->objsize)) { 674 if (!check_bytes_and_report(s, page, object, "Redzone",
610 object_err(s, page, object, 675 endobject, red, s->inuse - s->objsize))
611 active ? "Redzone Active" : "Redzone Inactive");
612 restore_bytes(s, "redzone", red,
613 endobject, object + s->inuse);
614 return 0; 676 return 0;
615 }
616 } else { 677 } else {
617 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse && 678 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse)
618 !check_bytes(endobject, POISON_INUSE, 679 check_bytes_and_report(s, page, p, "Alignment padding", endobject,
619 s->inuse - s->objsize)) { 680 POISON_INUSE, s->inuse - s->objsize);
620 object_err(s, page, p, "Alignment padding check fails");
621 /*
622 * Fix it so that there will not be another report.
623 *
624 * Hmmm... We may be corrupting an object that now expects
625 * to be longer than allowed.
626 */
627 restore_bytes(s, "alignment padding", POISON_INUSE,
628 endobject, object + s->inuse);
629 }
630 } 681 }
631 682
632 if (s->flags & SLAB_POISON) { 683 if (s->flags & SLAB_POISON) {
633 if (!active && (s->flags & __OBJECT_POISON) && 684 if (!active && (s->flags & __OBJECT_POISON) &&
634 (!check_bytes(p, POISON_FREE, s->objsize - 1) || 685 (!check_bytes_and_report(s, page, p, "Poison", p,
635 p[s->objsize - 1] != POISON_END)) { 686 POISON_FREE, s->objsize - 1) ||
636 687 !check_bytes_and_report(s, page, p, "Poison",
637 object_err(s, page, p, "Poison check failed"); 688 p + s->objsize -1, POISON_END, 1)))
638 restore_bytes(s, "Poison", POISON_FREE,
639 p, p + s->objsize -1);
640 restore_bytes(s, "Poison", POISON_END,
641 p + s->objsize - 1, p + s->objsize);
642 return 0; 689 return 0;
643 }
644 /* 690 /*
645 * check_pad_bytes cleans up on its own. 691 * check_pad_bytes cleans up on its own.
646 */ 692 */
@@ -673,25 +719,17 @@ static int check_slab(struct kmem_cache *s, struct page *page)
673 VM_BUG_ON(!irqs_disabled()); 719 VM_BUG_ON(!irqs_disabled());
674 720
675 if (!PageSlab(page)) { 721 if (!PageSlab(page)) {
676 slab_err(s, page, "Not a valid slab page flags=%lx " 722 slab_err(s, page, "Not a valid slab page");
677 "mapping=0x%p count=%d", page->flags, page->mapping,
678 page_count(page));
679 return 0; 723 return 0;
680 } 724 }
681 if (page->offset * sizeof(void *) != s->offset) { 725 if (page->offset * sizeof(void *) != s->offset) {
682 slab_err(s, page, "Corrupted offset %lu flags=0x%lx " 726 slab_err(s, page, "Corrupted offset %lu",
683 "mapping=0x%p count=%d", 727 (unsigned long)(page->offset * sizeof(void *)));
684 (unsigned long)(page->offset * sizeof(void *)),
685 page->flags,
686 page->mapping,
687 page_count(page));
688 return 0; 728 return 0;
689 } 729 }
690 if (page->inuse > s->objects) { 730 if (page->inuse > s->objects) {
691 slab_err(s, page, "inuse %u > max %u @0x%p flags=%lx " 731 slab_err(s, page, "inuse %u > max %u",
692 "mapping=0x%p count=%d", 732 s->name, page->inuse, s->objects);
693 s->name, page->inuse, s->objects, page->flags,
694 page->mapping, page_count(page));
695 return 0; 733 return 0;
696 } 734 }
697 /* Slab_pad_check fixes things up after itself */ 735 /* Slab_pad_check fixes things up after itself */
@@ -719,13 +757,10 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
719 set_freepointer(s, object, NULL); 757 set_freepointer(s, object, NULL);
720 break; 758 break;
721 } else { 759 } else {
722 slab_err(s, page, "Freepointer 0x%p corrupt", 760 slab_err(s, page, "Freepointer corrupt");
723 fp);
724 page->freelist = NULL; 761 page->freelist = NULL;
725 page->inuse = s->objects; 762 page->inuse = s->objects;
726 printk(KERN_ERR "@@@ SLUB %s: Freelist " 763 slab_fix(s, "Freelist cleared");
727 "cleared. Slab 0x%p\n",
728 s->name, page);
729 return 0; 764 return 0;
730 } 765 }
731 break; 766 break;
@@ -737,11 +772,9 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
737 772
738 if (page->inuse != s->objects - nr) { 773 if (page->inuse != s->objects - nr) {
739 slab_err(s, page, "Wrong object count. Counter is %d but " 774 slab_err(s, page, "Wrong object count. Counter is %d but "
740 "counted were %d", s, page, page->inuse, 775 "counted were %d", page->inuse, s->objects - nr);
741 s->objects - nr);
742 page->inuse = s->objects - nr; 776 page->inuse = s->objects - nr;
743 printk(KERN_ERR "@@@ SLUB %s: Object count adjusted. " 777 slab_fix(s, "Object count adjusted.");
744 "Slab @0x%p\n", s->name, page);
745 } 778 }
746 return search == NULL; 779 return search == NULL;
747} 780}
@@ -803,7 +836,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
803 goto bad; 836 goto bad;
804 837
805 if (object && !on_freelist(s, page, object)) { 838 if (object && !on_freelist(s, page, object)) {
806 slab_err(s, page, "Object 0x%p already allocated", object); 839 object_err(s, page, object, "Object already allocated");
807 goto bad; 840 goto bad;
808 } 841 }
809 842
@@ -829,8 +862,7 @@ bad:
829 * to avoid issues in the future. Marking all objects 862 * to avoid issues in the future. Marking all objects
830 * as used avoids touching the remaining objects. 863 * as used avoids touching the remaining objects.
831 */ 864 */
832 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n", 865 slab_fix(s, "Marking all objects used");
833 s->name, page);
834 page->inuse = s->objects; 866 page->inuse = s->objects;
835 page->freelist = NULL; 867 page->freelist = NULL;
836 /* Fix up fields that may be corrupted */ 868 /* Fix up fields that may be corrupted */
@@ -851,7 +883,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
851 } 883 }
852 884
853 if (on_freelist(s, page, object)) { 885 if (on_freelist(s, page, object)) {
854 slab_err(s, page, "Object 0x%p already free", object); 886 object_err(s, page, object, "Object already free");
855 goto fail; 887 goto fail;
856 } 888 }
857 889
@@ -870,8 +902,8 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
870 dump_stack(); 902 dump_stack();
871 } 903 }
872 else 904 else
873 slab_err(s, page, "object at 0x%p belongs " 905 object_err(s, page, object,
874 "to slab %s", object, page->slab->name); 906 "page slab pointer corrupt.");
875 goto fail; 907 goto fail;
876 } 908 }
877 909
@@ -885,8 +917,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
885 return 1; 917 return 1;
886 918
887fail: 919fail:
888 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", 920 slab_fix(s, "Object at 0x%p not freed", object);
889 s->name, page, object);
890 return 0; 921 return 0;
891} 922}
892 923