aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-07-17 07:03:18 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 13:23:01 -0400
commit2492268472e7d326a6fe10f92f9211c4578f2482 (patch)
tree5f668469190b96bc0db13f836d774ae73cf385ca
parent8e1f936b73150f5095448a0fee6d4f30a1f9001d (diff)
SLUB: change error reporting format to follow lockdep loosely
Changes the error reporting format to loosely follow lockdep. If data corruption is detected then we generate the following lines: ============================================ BUG <slab-cache>: <problem> -------------------------------------------- INFO: <more information> [possibly multiple times] <object dump> FIX <slab-cache>: <remedial action> This also adds some more intelligence to the data corruption detection. Its now capable of figuring out the start and end. Add a comment on how to configure SLUB so that a production system may continue to operate even though occasional slab corruption occur through a misbehaving kernel component. See "Emergency operations" in Documentation/vm/slub.txt. [akpm@linux-foundation.org: build fix] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--Documentation/vm/slub.txt137
-rw-r--r--mm/slub.c277
2 files changed, 243 insertions, 171 deletions
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index df812b03b65d..d17f324db9f5 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -127,13 +127,20 @@ SLUB Debug output
127 127
128Here is a sample of slub debug output: 128Here is a sample of slub debug output:
129 129
130*** SLUB kmalloc-8: Redzone Active@0xc90f6d20 slab 0xc528c530 offset=3360 flags=0x400000c3 inuse=61 freelist=0xc90f6d58 130====================================================================
131 Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ 131BUG kmalloc-8: Redzone overwritten
132 Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005 132--------------------------------------------------------------------
133 Redzone 0xc90f6d28: 00 cc cc cc . 133
134FreePointer 0xc90f6d2c -> 0xc90f6d58 134INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
135Last alloc: get_modalias+0x61/0xf5 jiffies_ago=53 cpu=1 pid=554 135INFO: Slab 0xc528c530 flags=0x400000c3 inuse=61 fp=0xc90f6d58
136Filler 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ 136INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
137INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
138
139Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
140 Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005
141 Redzone 0xc90f6d28: 00 cc cc cc .
142 Padding 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
143
137 [<c010523d>] dump_trace+0x63/0x1eb 144 [<c010523d>] dump_trace+0x63/0x1eb
138 [<c01053df>] show_trace_log_lvl+0x1a/0x2f 145 [<c01053df>] show_trace_log_lvl+0x1a/0x2f
139 [<c010601d>] show_trace+0x12/0x14 146 [<c010601d>] show_trace+0x12/0x14
@@ -155,74 +162,108 @@ Filler 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
155 [<c0104112>] sysenter_past_esp+0x5f/0x99 162 [<c0104112>] sysenter_past_esp+0x5f/0x99
156 [<b7f7b410>] 0xb7f7b410 163 [<b7f7b410>] 0xb7f7b410
157 ======================= 164 =======================
158@@@ SLUB kmalloc-8: Restoring redzone (0xcc) from 0xc90f6d28-0xc90f6d2b
159 165
166FIX kmalloc-8: Restoring Redzone 0xc90f6d28-0xc90f6d2b=0xcc
160 167
168If SLUB encounters a corrupted object (full detection requires the kernel
169to be booted with slub_debug) then the following output will be dumped
170into the syslog:
161 171
162If SLUB encounters a corrupted object then it will perform the following 1721. Description of the problem encountered
163actions:
164
1651. Isolation and report of the issue
166 173
167This will be a message in the system log starting with 174This will be a message in the system log starting with
168 175
169*** SLUB <slab cache affected>: <What went wrong>@<object address> 176===============================================
170offset=<offset of object into slab> flags=<slabflags> 177BUG <slab cache affected>: <What went wrong>
171inuse=<objects in use in this slab> freelist=<first free object in slab> 178-----------------------------------------------
172 179
1732. Report on how the problem was dealt with in order to ensure the continued 180INFO: <corruption start>-<corruption_end> <more info>
174operation of the system. 181INFO: Slab <address> <slab information>
182INFO: Object <address> <object information>
183INFO: Allocated in <kernel function> age=<jiffies since alloc> cpu=<allocated by
184 cpu> pid=<pid of the process>
185INFO: Freed in <kernel function> age=<jiffies since free> cpu=<freed by cpu>
186 pid=<pid of the process>
175 187
176These are messages in the system log beginning with 188(Object allocation / free information is only available if SLAB_STORE_USER is
177 189set for the slab. slub_debug sets that option)
178@@@ SLUB <slab cache affected>: <corrective action taken>
179 190
1912. The object contents if an object was involved.
180 192
181In the above sample SLUB found that the Redzone of an active object has 193Various types of lines can follow the BUG SLUB line:
182been overwritten. Here a string of 8 characters was written into a slab that
183has the length of 8 characters. However, a 8 character string needs a
184terminating 0. That zero has overwritten the first byte of the Redzone field.
185After reporting the details of the issue encountered the @@@ SLUB message
186tell us that SLUB has restored the redzone to its proper value and then
187system operations continue.
188
189Various types of lines can follow the @@@ SLUB line:
190 194
191Bytes b4 <address> : <bytes> 195Bytes b4 <address> : <bytes>
192 Show a few bytes before the object where the problem was detected. 196 Shows a few bytes before the object where the problem was detected.
193 Can be useful if the corruption does not stop with the start of the 197 Can be useful if the corruption does not stop with the start of the
194 object. 198 object.
195 199
196Object <address> : <bytes> 200Object <address> : <bytes>
197 The bytes of the object. If the object is inactive then the bytes 201 The bytes of the object. If the object is inactive then the bytes
198 typically contain poisoning values. Any non-poison value shows a 202 typically contain poison values. Any non-poison value shows a
199 corruption by a write after free. 203 corruption by a write after free.
200 204
201Redzone <address> : <bytes> 205Redzone <address> : <bytes>
202 The redzone following the object. The redzone is used to detect 206 The Redzone following the object. The Redzone is used to detect
203 writes after the object. All bytes should always have the same 207 writes after the object. All bytes should always have the same
204 value. If there is any deviation then it is due to a write after 208 value. If there is any deviation then it is due to a write after
205 the object boundary. 209 the object boundary.
206 210
207Freepointer 211 (Redzone information is only available if SLAB_RED_ZONE is set.
208 The pointer to the next free object in the slab. May become 212 slub_debug sets that option)
209 corrupted if overwriting continues after the red zone.
210
211Last alloc:
212Last free:
213 Shows the address from which the object was allocated/freed last.
214 We note the pid, the time and the CPU that did so. This is usually
215 the most useful information to figure out where things went wrong.
216 Here get_modalias() did an kmalloc(8) instead of a kmalloc(9).
217 213
218Filler <address> : <bytes> 214Padding <address> : <bytes>
219 Unused data to fill up the space in order to get the next object 215 Unused data to fill up the space in order to get the next object
220 properly aligned. In the debug case we make sure that there are 216 properly aligned. In the debug case we make sure that there are
221 at least 4 bytes of filler. This allow for the detection of writes 217 at least 4 bytes of padding. This allows the detection of writes
222 before the object. 218 before the object.
223 219
224Following the filler will be a stackdump. That stackdump describes the 2203. A stackdump
225location where the error was detected. The cause of the corruption is more 221
226likely to be found by looking at the information about the last alloc / free. 222The stackdump describes the location where the error was detected. The cause
223of the corruption is may be more likely found by looking at the function that
224allocated or freed the object.
225
2264. Report on how the problem was dealt with in order to ensure the continued
227operation of the system.
228
229These are messages in the system log beginning with
230
231FIX <slab cache affected>: <corrective action taken>
232
233In the above sample SLUB found that the Redzone of an active object has
234been overwritten. Here a string of 8 characters was written into a slab that
235has the length of 8 characters. However, a 8 character string needs a
236terminating 0. That zero has overwritten the first byte of the Redzone field.
237After reporting the details of the issue encountered the FIX SLUB message
238tell us that SLUB has restored the Redzone to its proper value and then
239system operations continue.
240
241Emergency operations:
242---------------------
243
244Minimal debugging (sanity checks alone) can be enabled by booting with
245
246 slub_debug=F
247
248This will be generally be enough to enable the resiliency features of slub
249which will keep the system running even if a bad kernel component will
250keep corrupting objects. This may be important for production systems.
251Performance will be impacted by the sanity checks and there will be a
252continual stream of error messages to the syslog but no additional memory
253will be used (unlike full debugging).
254
255No guarantees. The kernel component still needs to be fixed. Performance
256may be optimized further by locating the slab that experiences corruption
257and enabling debugging only for that cache
258
259I.e.
260
261 slub_debug=F,dentry
262
263If the corruption occurs by writing after the end of the object then it
264may be advisable to enable a Redzone to avoid corrupting the beginning
265of other objects.
266
267 slub_debug=FZ,dentry
227 268
228Christoph Lameter, <clameter@sgi.com>, May 23, 2007 269Christoph Lameter, <clameter@sgi.com>, May 30, 2007
diff --git a/mm/slub.c b/mm/slub.c
index 6aea48942c29..2b9e656f1cb3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -344,7 +344,7 @@ static void print_section(char *text, u8 *addr, unsigned int length)
344 344
345 for (i = 0; i < length; i++) { 345 for (i = 0; i < length; i++) {
346 if (newline) { 346 if (newline) {
347 printk(KERN_ERR "%10s 0x%p: ", text, addr + i); 347 printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
348 newline = 0; 348 newline = 0;
349 } 349 }
350 printk(" %02x", addr[i]); 350 printk(" %02x", addr[i]);
@@ -401,10 +401,11 @@ static void set_track(struct kmem_cache *s, void *object,
401 401
402static void init_tracking(struct kmem_cache *s, void *object) 402static void init_tracking(struct kmem_cache *s, void *object)
403{ 403{
404 if (s->flags & SLAB_STORE_USER) { 404 if (!(s->flags & SLAB_STORE_USER))
405 set_track(s, object, TRACK_FREE, NULL); 405 return;
406 set_track(s, object, TRACK_ALLOC, NULL); 406
407 } 407 set_track(s, object, TRACK_FREE, NULL);
408 set_track(s, object, TRACK_ALLOC, NULL);
408} 409}
409 410
410static void print_track(const char *s, struct track *t) 411static void print_track(const char *s, struct track *t)
@@ -412,65 +413,106 @@ static void print_track(const char *s, struct track *t)
412 if (!t->addr) 413 if (!t->addr)
413 return; 414 return;
414 415
415 printk(KERN_ERR "%s: ", s); 416 printk(KERN_ERR "INFO: %s in ", s);
416 __print_symbol("%s", (unsigned long)t->addr); 417 __print_symbol("%s", (unsigned long)t->addr);
417 printk(" jiffies_ago=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid); 418 printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
419}
420
421static void print_tracking(struct kmem_cache *s, void *object)
422{
423 if (!(s->flags & SLAB_STORE_USER))
424 return;
425
426 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
427 print_track("Freed", get_track(s, object, TRACK_FREE));
418} 428}
419 429
420static void print_trailer(struct kmem_cache *s, u8 *p) 430static void print_page_info(struct page *page)
431{
432 printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
433 page, page->inuse, page->freelist, page->flags);
434
435}
436
437static void slab_bug(struct kmem_cache *s, char *fmt, ...)
438{
439 va_list args;
440 char buf[100];
441
442 va_start(args, fmt);
443 vsnprintf(buf, sizeof(buf), fmt, args);
444 va_end(args);
445 printk(KERN_ERR "========================================"
446 "=====================================\n");
447 printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
448 printk(KERN_ERR "----------------------------------------"
449 "-------------------------------------\n\n");
450}
451
452static void slab_fix(struct kmem_cache *s, char *fmt, ...)
453{
454 va_list args;
455 char buf[100];
456
457 va_start(args, fmt);
458 vsnprintf(buf, sizeof(buf), fmt, args);
459 va_end(args);
460 printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
461}
462
463static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
421{ 464{
422 unsigned int off; /* Offset of last byte */ 465 unsigned int off; /* Offset of last byte */
466 u8 *addr = page_address(page);
467
468 print_tracking(s, p);
469
470 print_page_info(page);
471
472 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
473 p, p - addr, get_freepointer(s, p));
474
475 if (p > addr + 16)
476 print_section("Bytes b4", p - 16, 16);
477
478 print_section("Object", p, min(s->objsize, 128));
423 479
424 if (s->flags & SLAB_RED_ZONE) 480 if (s->flags & SLAB_RED_ZONE)
425 print_section("Redzone", p + s->objsize, 481 print_section("Redzone", p + s->objsize,
426 s->inuse - s->objsize); 482 s->inuse - s->objsize);
427 483
428 printk(KERN_ERR "FreePointer 0x%p -> 0x%p\n",
429 p + s->offset,
430 get_freepointer(s, p));
431
432 if (s->offset) 484 if (s->offset)
433 off = s->offset + sizeof(void *); 485 off = s->offset + sizeof(void *);
434 else 486 else
435 off = s->inuse; 487 off = s->inuse;
436 488
437 if (s->flags & SLAB_STORE_USER) { 489 if (s->flags & SLAB_STORE_USER)
438 print_track("Last alloc", get_track(s, p, TRACK_ALLOC));
439 print_track("Last free ", get_track(s, p, TRACK_FREE));
440 off += 2 * sizeof(struct track); 490 off += 2 * sizeof(struct track);
441 }
442 491
443 if (off != s->size) 492 if (off != s->size)
444 /* Beginning of the filler is the free pointer */ 493 /* Beginning of the filler is the free pointer */
445 print_section("Filler", p + off, s->size - off); 494 print_section("Padding", p + off, s->size - off);
495
496 dump_stack();
446} 497}
447 498
448static void object_err(struct kmem_cache *s, struct page *page, 499static void object_err(struct kmem_cache *s, struct page *page,
449 u8 *object, char *reason) 500 u8 *object, char *reason)
450{ 501{
451 u8 *addr = page_address(page); 502 slab_bug(s, reason);
452 503 print_trailer(s, page, object);
453 printk(KERN_ERR "*** SLUB %s: %s@0x%p slab 0x%p\n",
454 s->name, reason, object, page);
455 printk(KERN_ERR " offset=%tu flags=0x%04lx inuse=%u freelist=0x%p\n",
456 object - addr, page->flags, page->inuse, page->freelist);
457 if (object > addr + 16)
458 print_section("Bytes b4", object - 16, 16);
459 print_section("Object", object, min(s->objsize, 128));
460 print_trailer(s, object);
461 dump_stack();
462} 504}
463 505
464static void slab_err(struct kmem_cache *s, struct page *page, char *reason, ...) 506static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
465{ 507{
466 va_list args; 508 va_list args;
467 char buf[100]; 509 char buf[100];
468 510
469 va_start(args, reason); 511 va_start(args, fmt);
470 vsnprintf(buf, sizeof(buf), reason, args); 512 vsnprintf(buf, sizeof(buf), fmt, args);
471 va_end(args); 513 va_end(args);
472 printk(KERN_ERR "*** SLUB %s: %s in slab @0x%p\n", s->name, buf, 514 slab_bug(s, fmt);
473 page); 515 print_page_info(page);
474 dump_stack(); 516 dump_stack();
475} 517}
476 518
@@ -489,15 +531,46 @@ static void init_object(struct kmem_cache *s, void *object, int active)
489 s->inuse - s->objsize); 531 s->inuse - s->objsize);
490} 532}
491 533
492static int check_bytes(u8 *start, unsigned int value, unsigned int bytes) 534static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
493{ 535{
494 while (bytes) { 536 while (bytes) {
495 if (*start != (u8)value) 537 if (*start != (u8)value)
496 return 0; 538 return start;
497 start++; 539 start++;
498 bytes--; 540 bytes--;
499 } 541 }
500 return 1; 542 return NULL;
543}
544
545static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
546 void *from, void *to)
547{
548 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
549 memset(from, data, to - from);
550}
551
552static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
553 u8 *object, char *what,
554 u8* start, unsigned int value, unsigned int bytes)
555{
556 u8 *fault;
557 u8 *end;
558
559 fault = check_bytes(start, value, bytes);
560 if (!fault)
561 return 1;
562
563 end = start + bytes;
564 while (end > fault && end[-1] == value)
565 end--;
566
567 slab_bug(s, "%s overwritten", what);
568 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
569 fault, end - 1, fault[0], value);
570 print_trailer(s, page, object);
571
572 restore_bytes(s, what, value, fault, end);
573 return 0;
501} 574}
502 575
503/* 576/*
@@ -538,14 +611,6 @@ static int check_bytes(u8 *start, unsigned int value, unsigned int bytes)
538 * may be used with merged slabcaches. 611 * may be used with merged slabcaches.
539 */ 612 */
540 613
541static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
542 void *from, void *to)
543{
544 printk(KERN_ERR "@@@ SLUB %s: Restoring %s (0x%x) from 0x%p-0x%p\n",
545 s->name, message, data, from, to - 1);
546 memset(from, data, to - from);
547}
548
549static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 614static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
550{ 615{
551 unsigned long off = s->inuse; /* The end of info */ 616 unsigned long off = s->inuse; /* The end of info */
@@ -561,39 +626,39 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
561 if (s->size == off) 626 if (s->size == off)
562 return 1; 627 return 1;
563 628
564 if (check_bytes(p + off, POISON_INUSE, s->size - off)) 629 return check_bytes_and_report(s, page, p, "Object padding",
565 return 1; 630 p + off, POISON_INUSE, s->size - off);
566
567 object_err(s, page, p, "Object padding check fails");
568
569 /*
570 * Restore padding
571 */
572 restore_bytes(s, "object padding", POISON_INUSE, p + off, p + s->size);
573 return 0;
574} 631}
575 632
576static int slab_pad_check(struct kmem_cache *s, struct page *page) 633static int slab_pad_check(struct kmem_cache *s, struct page *page)
577{ 634{
578 u8 *p; 635 u8 *start;
579 int length, remainder; 636 u8 *fault;
637 u8 *end;
638 int length;
639 int remainder;
580 640
581 if (!(s->flags & SLAB_POISON)) 641 if (!(s->flags & SLAB_POISON))
582 return 1; 642 return 1;
583 643
584 p = page_address(page); 644 start = page_address(page);
645 end = start + (PAGE_SIZE << s->order);
585 length = s->objects * s->size; 646 length = s->objects * s->size;
586 remainder = (PAGE_SIZE << s->order) - length; 647 remainder = end - (start + length);
587 if (!remainder) 648 if (!remainder)
588 return 1; 649 return 1;
589 650
590 if (!check_bytes(p + length, POISON_INUSE, remainder)) { 651 fault = check_bytes(start + length, POISON_INUSE, remainder);
591 slab_err(s, page, "Padding check failed"); 652 if (!fault)
592 restore_bytes(s, "slab padding", POISON_INUSE, p + length, 653 return 1;
593 p + length + remainder); 654 while (end > fault && end[-1] == POISON_INUSE)
594 return 0; 655 end--;
595 } 656
596 return 1; 657 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
658 print_section("Padding", start, length);
659
660 restore_bytes(s, "slab padding", POISON_INUSE, start, end);
661 return 0;
597} 662}
598 663
599static int check_object(struct kmem_cache *s, struct page *page, 664static int check_object(struct kmem_cache *s, struct page *page,
@@ -606,41 +671,22 @@ static int check_object(struct kmem_cache *s, struct page *page,
606 unsigned int red = 671 unsigned int red =
607 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE; 672 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
608 673
609 if (!check_bytes(endobject, red, s->inuse - s->objsize)) { 674 if (!check_bytes_and_report(s, page, object, "Redzone",
610 object_err(s, page, object, 675 endobject, red, s->inuse - s->objsize))
611 active ? "Redzone Active" : "Redzone Inactive");
612 restore_bytes(s, "redzone", red,
613 endobject, object + s->inuse);
614 return 0; 676 return 0;
615 }
616 } else { 677 } else {
617 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse && 678 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse)
618 !check_bytes(endobject, POISON_INUSE, 679 check_bytes_and_report(s, page, p, "Alignment padding", endobject,
619 s->inuse - s->objsize)) { 680 POISON_INUSE, s->inuse - s->objsize);
620 object_err(s, page, p, "Alignment padding check fails");
621 /*
622 * Fix it so that there will not be another report.
623 *
624 * Hmmm... We may be corrupting an object that now expects
625 * to be longer than allowed.
626 */
627 restore_bytes(s, "alignment padding", POISON_INUSE,
628 endobject, object + s->inuse);
629 }
630 } 681 }
631 682
632 if (s->flags & SLAB_POISON) { 683 if (s->flags & SLAB_POISON) {
633 if (!active && (s->flags & __OBJECT_POISON) && 684 if (!active && (s->flags & __OBJECT_POISON) &&
634 (!check_bytes(p, POISON_FREE, s->objsize - 1) || 685 (!check_bytes_and_report(s, page, p, "Poison", p,
635 p[s->objsize - 1] != POISON_END)) { 686 POISON_FREE, s->objsize - 1) ||
636 687 !check_bytes_and_report(s, page, p, "Poison",
637 object_err(s, page, p, "Poison check failed"); 688 p + s->objsize -1, POISON_END, 1)))
638 restore_bytes(s, "Poison", POISON_FREE,
639 p, p + s->objsize -1);
640 restore_bytes(s, "Poison", POISON_END,
641 p + s->objsize - 1, p + s->objsize);
642 return 0; 689 return 0;
643 }
644 /* 690 /*
645 * check_pad_bytes cleans up on its own. 691 * check_pad_bytes cleans up on its own.
646 */ 692 */
@@ -673,25 +719,17 @@ static int check_slab(struct kmem_cache *s, struct page *page)
673 VM_BUG_ON(!irqs_disabled()); 719 VM_BUG_ON(!irqs_disabled());
674 720
675 if (!PageSlab(page)) { 721 if (!PageSlab(page)) {
676 slab_err(s, page, "Not a valid slab page flags=%lx " 722 slab_err(s, page, "Not a valid slab page");
677 "mapping=0x%p count=%d", page->flags, page->mapping,
678 page_count(page));
679 return 0; 723 return 0;
680 } 724 }
681 if (page->offset * sizeof(void *) != s->offset) { 725 if (page->offset * sizeof(void *) != s->offset) {
682 slab_err(s, page, "Corrupted offset %lu flags=0x%lx " 726 slab_err(s, page, "Corrupted offset %lu",
683 "mapping=0x%p count=%d", 727 (unsigned long)(page->offset * sizeof(void *)));
684 (unsigned long)(page->offset * sizeof(void *)),
685 page->flags,
686 page->mapping,
687 page_count(page));
688 return 0; 728 return 0;
689 } 729 }
690 if (page->inuse > s->objects) { 730 if (page->inuse > s->objects) {
691 slab_err(s, page, "inuse %u > max %u @0x%p flags=%lx " 731 slab_err(s, page, "inuse %u > max %u",
692 "mapping=0x%p count=%d", 732 s->name, page->inuse, s->objects);
693 s->name, page->inuse, s->objects, page->flags,
694 page->mapping, page_count(page));
695 return 0; 733 return 0;
696 } 734 }
697 /* Slab_pad_check fixes things up after itself */ 735 /* Slab_pad_check fixes things up after itself */
@@ -719,13 +757,10 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
719 set_freepointer(s, object, NULL); 757 set_freepointer(s, object, NULL);
720 break; 758 break;
721 } else { 759 } else {
722 slab_err(s, page, "Freepointer 0x%p corrupt", 760 slab_err(s, page, "Freepointer corrupt");
723 fp);
724 page->freelist = NULL; 761 page->freelist = NULL;
725 page->inuse = s->objects; 762 page->inuse = s->objects;
726 printk(KERN_ERR "@@@ SLUB %s: Freelist " 763 slab_fix(s, "Freelist cleared");
727 "cleared. Slab 0x%p\n",
728 s->name, page);
729 return 0; 764 return 0;
730 } 765 }
731 break; 766 break;
@@ -737,11 +772,9 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
737 772
738 if (page->inuse != s->objects - nr) { 773 if (page->inuse != s->objects - nr) {
739 slab_err(s, page, "Wrong object count. Counter is %d but " 774 slab_err(s, page, "Wrong object count. Counter is %d but "
740 "counted were %d", s, page, page->inuse, 775 "counted were %d", page->inuse, s->objects - nr);
741 s->objects - nr);
742 page->inuse = s->objects - nr; 776 page->inuse = s->objects - nr;
743 printk(KERN_ERR "@@@ SLUB %s: Object count adjusted. " 777 slab_fix(s, "Object count adjusted.");
744 "Slab @0x%p\n", s->name, page);
745 } 778 }
746 return search == NULL; 779 return search == NULL;
747} 780}
@@ -803,7 +836,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
803 goto bad; 836 goto bad;
804 837
805 if (object && !on_freelist(s, page, object)) { 838 if (object && !on_freelist(s, page, object)) {
806 slab_err(s, page, "Object 0x%p already allocated", object); 839 object_err(s, page, object, "Object already allocated");
807 goto bad; 840 goto bad;
808 } 841 }
809 842
@@ -829,8 +862,7 @@ bad:
829 * to avoid issues in the future. Marking all objects 862 * to avoid issues in the future. Marking all objects
830 * as used avoids touching the remaining objects. 863 * as used avoids touching the remaining objects.
831 */ 864 */
832 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n", 865 slab_fix(s, "Marking all objects used");
833 s->name, page);
834 page->inuse = s->objects; 866 page->inuse = s->objects;
835 page->freelist = NULL; 867 page->freelist = NULL;
836 /* Fix up fields that may be corrupted */ 868 /* Fix up fields that may be corrupted */
@@ -851,7 +883,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
851 } 883 }
852 884
853 if (on_freelist(s, page, object)) { 885 if (on_freelist(s, page, object)) {
854 slab_err(s, page, "Object 0x%p already free", object); 886 object_err(s, page, object, "Object already free");
855 goto fail; 887 goto fail;
856 } 888 }
857 889
@@ -870,8 +902,8 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
870 dump_stack(); 902 dump_stack();
871 } 903 }
872 else 904 else
873 slab_err(s, page, "object at 0x%p belongs " 905 object_err(s, page, object,
874 "to slab %s", object, page->slab->name); 906 "page slab pointer corrupt.");
875 goto fail; 907 goto fail;
876 } 908 }
877 909
@@ -885,8 +917,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
885 return 1; 917 return 1;
886 918
887fail: 919fail:
888 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", 920 slab_fix(s, "Object at 0x%p not freed", object);
889 s->name, page, object);
890 return 0; 921 return 0;
891} 922}
892 923