diff options
author | Fabian Frederick <fabf@skynet.be> | 2014-06-04 19:06:34 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-04 19:53:55 -0400 |
commit | f9f58285947d9c88079bfb7b7666c987011e3377 (patch) | |
tree | ef92a7d5f483aca57edef85160ff6734690fe886 /mm/slub.c | |
parent | 982792c782ef337381e982fd2047391886f89693 (diff) |
mm/slub.c: convert printk to pr_foo()
All printk(KERN_foo converted to pr_foo()
Default printk converted to pr_warn()
Coalesce format fragments
Signed-off-by: Fabian Frederick <fabf@skynet.be>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Joe Perches <joe@perches.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 129 |
1 files changed, 57 insertions, 72 deletions
@@ -403,7 +403,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page | |||
403 | stat(s, CMPXCHG_DOUBLE_FAIL); | 403 | stat(s, CMPXCHG_DOUBLE_FAIL); |
404 | 404 | ||
405 | #ifdef SLUB_DEBUG_CMPXCHG | 405 | #ifdef SLUB_DEBUG_CMPXCHG |
406 | printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name); | 406 | pr_info("%s %s: cmpxchg double redo ", n, s->name); |
407 | #endif | 407 | #endif |
408 | 408 | ||
409 | return 0; | 409 | return 0; |
@@ -444,7 +444,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, | |||
444 | stat(s, CMPXCHG_DOUBLE_FAIL); | 444 | stat(s, CMPXCHG_DOUBLE_FAIL); |
445 | 445 | ||
446 | #ifdef SLUB_DEBUG_CMPXCHG | 446 | #ifdef SLUB_DEBUG_CMPXCHG |
447 | printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name); | 447 | pr_info("%s %s: cmpxchg double redo ", n, s->name); |
448 | #endif | 448 | #endif |
449 | 449 | ||
450 | return 0; | 450 | return 0; |
@@ -546,14 +546,14 @@ static void print_track(const char *s, struct track *t) | |||
546 | if (!t->addr) | 546 | if (!t->addr) |
547 | return; | 547 | return; |
548 | 548 | ||
549 | printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", | 549 | pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n", |
550 | s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); | 550 | s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); |
551 | #ifdef CONFIG_STACKTRACE | 551 | #ifdef CONFIG_STACKTRACE |
552 | { | 552 | { |
553 | int i; | 553 | int i; |
554 | for (i = 0; i < TRACK_ADDRS_COUNT; i++) | 554 | for (i = 0; i < TRACK_ADDRS_COUNT; i++) |
555 | if (t->addrs[i]) | 555 | if (t->addrs[i]) |
556 | printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]); | 556 | pr_err("\t%pS\n", (void *)t->addrs[i]); |
557 | else | 557 | else |
558 | break; | 558 | break; |
559 | } | 559 | } |
@@ -571,8 +571,7 @@ static void print_tracking(struct kmem_cache *s, void *object) | |||
571 | 571 | ||
572 | static void print_page_info(struct page *page) | 572 | static void print_page_info(struct page *page) |
573 | { | 573 | { |
574 | printk(KERN_ERR | 574 | pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", |
575 | "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", | ||
576 | page, page->objects, page->inuse, page->freelist, page->flags); | 575 | page, page->objects, page->inuse, page->freelist, page->flags); |
577 | 576 | ||
578 | } | 577 | } |
@@ -585,11 +584,9 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...) | |||
585 | va_start(args, fmt); | 584 | va_start(args, fmt); |
586 | vsnprintf(buf, sizeof(buf), fmt, args); | 585 | vsnprintf(buf, sizeof(buf), fmt, args); |
587 | va_end(args); | 586 | va_end(args); |
588 | printk(KERN_ERR "========================================" | 587 | pr_err("=============================================================================\n"); |
589 | "=====================================\n"); | 588 | pr_err("BUG %s (%s): %s\n", s->name, print_tainted(), buf); |
590 | printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf); | 589 | pr_err("-----------------------------------------------------------------------------\n\n"); |
591 | printk(KERN_ERR "----------------------------------------" | ||
592 | "-------------------------------------\n\n"); | ||
593 | 590 | ||
594 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); | 591 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); |
595 | } | 592 | } |
@@ -602,7 +599,7 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...) | |||
602 | va_start(args, fmt); | 599 | va_start(args, fmt); |
603 | vsnprintf(buf, sizeof(buf), fmt, args); | 600 | vsnprintf(buf, sizeof(buf), fmt, args); |
604 | va_end(args); | 601 | va_end(args); |
605 | printk(KERN_ERR "FIX %s: %s\n", s->name, buf); | 602 | pr_err("FIX %s: %s\n", s->name, buf); |
606 | } | 603 | } |
607 | 604 | ||
608 | static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) | 605 | static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) |
@@ -614,8 +611,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) | |||
614 | 611 | ||
615 | print_page_info(page); | 612 | print_page_info(page); |
616 | 613 | ||
617 | printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", | 614 | pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", |
618 | p, p - addr, get_freepointer(s, p)); | 615 | p, p - addr, get_freepointer(s, p)); |
619 | 616 | ||
620 | if (p > addr + 16) | 617 | if (p > addr + 16) |
621 | print_section("Bytes b4 ", p - 16, 16); | 618 | print_section("Bytes b4 ", p - 16, 16); |
@@ -698,7 +695,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, | |||
698 | end--; | 695 | end--; |
699 | 696 | ||
700 | slab_bug(s, "%s overwritten", what); | 697 | slab_bug(s, "%s overwritten", what); |
701 | printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n", | 698 | pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n", |
702 | fault, end - 1, fault[0], value); | 699 | fault, end - 1, fault[0], value); |
703 | print_trailer(s, page, object); | 700 | print_trailer(s, page, object); |
704 | 701 | ||
@@ -931,7 +928,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, | |||
931 | int alloc) | 928 | int alloc) |
932 | { | 929 | { |
933 | if (s->flags & SLAB_TRACE) { | 930 | if (s->flags & SLAB_TRACE) { |
934 | printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", | 931 | pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", |
935 | s->name, | 932 | s->name, |
936 | alloc ? "alloc" : "free", | 933 | alloc ? "alloc" : "free", |
937 | object, page->inuse, | 934 | object, page->inuse, |
@@ -1134,9 +1131,8 @@ static noinline struct kmem_cache_node *free_debug_processing( | |||
1134 | slab_err(s, page, "Attempt to free object(0x%p) " | 1131 | slab_err(s, page, "Attempt to free object(0x%p) " |
1135 | "outside of slab", object); | 1132 | "outside of slab", object); |
1136 | } else if (!page->slab_cache) { | 1133 | } else if (!page->slab_cache) { |
1137 | printk(KERN_ERR | 1134 | pr_err("SLUB <none>: no slab for object 0x%p.\n", |
1138 | "SLUB <none>: no slab for object 0x%p.\n", | 1135 | object); |
1139 | object); | ||
1140 | dump_stack(); | 1136 | dump_stack(); |
1141 | } else | 1137 | } else |
1142 | object_err(s, page, object, | 1138 | object_err(s, page, object, |
@@ -1219,8 +1215,8 @@ static int __init setup_slub_debug(char *str) | |||
1219 | slub_debug |= SLAB_FAILSLAB; | 1215 | slub_debug |= SLAB_FAILSLAB; |
1220 | break; | 1216 | break; |
1221 | default: | 1217 | default: |
1222 | printk(KERN_ERR "slub_debug option '%c' " | 1218 | pr_err("slub_debug option '%c' unknown. skipped\n", |
1223 | "unknown. skipped\n", *str); | 1219 | *str); |
1224 | } | 1220 | } |
1225 | } | 1221 | } |
1226 | 1222 | ||
@@ -1770,19 +1766,19 @@ static inline void note_cmpxchg_failure(const char *n, | |||
1770 | #ifdef SLUB_DEBUG_CMPXCHG | 1766 | #ifdef SLUB_DEBUG_CMPXCHG |
1771 | unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); | 1767 | unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); |
1772 | 1768 | ||
1773 | printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name); | 1769 | pr_info("%s %s: cmpxchg redo ", n, s->name); |
1774 | 1770 | ||
1775 | #ifdef CONFIG_PREEMPT | 1771 | #ifdef CONFIG_PREEMPT |
1776 | if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) | 1772 | if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) |
1777 | printk("due to cpu change %d -> %d\n", | 1773 | pr_warn("due to cpu change %d -> %d\n", |
1778 | tid_to_cpu(tid), tid_to_cpu(actual_tid)); | 1774 | tid_to_cpu(tid), tid_to_cpu(actual_tid)); |
1779 | else | 1775 | else |
1780 | #endif | 1776 | #endif |
1781 | if (tid_to_event(tid) != tid_to_event(actual_tid)) | 1777 | if (tid_to_event(tid) != tid_to_event(actual_tid)) |
1782 | printk("due to cpu running other code. Event %ld->%ld\n", | 1778 | pr_warn("due to cpu running other code. Event %ld->%ld\n", |
1783 | tid_to_event(tid), tid_to_event(actual_tid)); | 1779 | tid_to_event(tid), tid_to_event(actual_tid)); |
1784 | else | 1780 | else |
1785 | printk("for unknown reason: actual=%lx was=%lx target=%lx\n", | 1781 | pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", |
1786 | actual_tid, tid, next_tid(tid)); | 1782 | actual_tid, tid, next_tid(tid)); |
1787 | #endif | 1783 | #endif |
1788 | stat(s, CMPXCHG_DOUBLE_CPU_FAIL); | 1784 | stat(s, CMPXCHG_DOUBLE_CPU_FAIL); |
@@ -2154,16 +2150,15 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) | |||
2154 | { | 2150 | { |
2155 | int node; | 2151 | int node; |
2156 | 2152 | ||
2157 | printk(KERN_WARNING | 2153 | pr_warn("SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", |
2158 | "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", | ||
2159 | nid, gfpflags); | 2154 | nid, gfpflags); |
2160 | printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, " | 2155 | pr_warn(" cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n", |
2161 | "default order: %d, min order: %d\n", s->name, s->object_size, | 2156 | s->name, s->object_size, s->size, oo_order(s->oo), |
2162 | s->size, oo_order(s->oo), oo_order(s->min)); | 2157 | oo_order(s->min)); |
2163 | 2158 | ||
2164 | if (oo_order(s->min) > get_order(s->object_size)) | 2159 | if (oo_order(s->min) > get_order(s->object_size)) |
2165 | printk(KERN_WARNING " %s debugging increased min order, use " | 2160 | pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n", |
2166 | "slub_debug=O to disable.\n", s->name); | 2161 | s->name); |
2167 | 2162 | ||
2168 | for_each_online_node(node) { | 2163 | for_each_online_node(node) { |
2169 | struct kmem_cache_node *n = get_node(s, node); | 2164 | struct kmem_cache_node *n = get_node(s, node); |
@@ -2178,8 +2173,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) | |||
2178 | nr_slabs = node_nr_slabs(n); | 2173 | nr_slabs = node_nr_slabs(n); |
2179 | nr_objs = node_nr_objs(n); | 2174 | nr_objs = node_nr_objs(n); |
2180 | 2175 | ||
2181 | printk(KERN_WARNING | 2176 | pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", |
2182 | " node %d: slabs: %ld, objs: %ld, free: %ld\n", | ||
2183 | node, nr_slabs, nr_objs, nr_free); | 2177 | node, nr_slabs, nr_objs, nr_free); |
2184 | } | 2178 | } |
2185 | } | 2179 | } |
@@ -2894,10 +2888,8 @@ static void early_kmem_cache_node_alloc(int node) | |||
2894 | 2888 | ||
2895 | BUG_ON(!page); | 2889 | BUG_ON(!page); |
2896 | if (page_to_nid(page) != node) { | 2890 | if (page_to_nid(page) != node) { |
2897 | printk(KERN_ERR "SLUB: Unable to allocate memory from " | 2891 | pr_err("SLUB: Unable to allocate memory from node %d\n", node); |
2898 | "node %d\n", node); | 2892 | pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); |
2899 | printk(KERN_ERR "SLUB: Allocating a useless per node structure " | ||
2900 | "in order to be able to continue\n"); | ||
2901 | } | 2893 | } |
2902 | 2894 | ||
2903 | n = page->freelist; | 2895 | n = page->freelist; |
@@ -3182,8 +3174,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, | |||
3182 | for_each_object(p, s, addr, page->objects) { | 3174 | for_each_object(p, s, addr, page->objects) { |
3183 | 3175 | ||
3184 | if (!test_bit(slab_index(p, s, addr), map)) { | 3176 | if (!test_bit(slab_index(p, s, addr), map)) { |
3185 | printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n", | 3177 | pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr); |
3186 | p, p - addr); | ||
3187 | print_tracking(s, p); | 3178 | print_tracking(s, p); |
3188 | } | 3179 | } |
3189 | } | 3180 | } |
@@ -3650,9 +3641,7 @@ void __init kmem_cache_init(void) | |||
3650 | register_cpu_notifier(&slab_notifier); | 3641 | register_cpu_notifier(&slab_notifier); |
3651 | #endif | 3642 | #endif |
3652 | 3643 | ||
3653 | printk(KERN_INFO | 3644 | pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%d, Nodes=%d\n", |
3654 | "SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d," | ||
3655 | " CPUs=%d, Nodes=%d\n", | ||
3656 | cache_line_size(), | 3645 | cache_line_size(), |
3657 | slub_min_order, slub_max_order, slub_min_objects, | 3646 | slub_min_order, slub_max_order, slub_min_objects, |
3658 | nr_cpu_ids, nr_node_ids); | 3647 | nr_cpu_ids, nr_node_ids); |
@@ -3934,8 +3923,8 @@ static int validate_slab_node(struct kmem_cache *s, | |||
3934 | count++; | 3923 | count++; |
3935 | } | 3924 | } |
3936 | if (count != n->nr_partial) | 3925 | if (count != n->nr_partial) |
3937 | printk(KERN_ERR "SLUB %s: %ld partial slabs counted but " | 3926 | pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", |
3938 | "counter=%ld\n", s->name, count, n->nr_partial); | 3927 | s->name, count, n->nr_partial); |
3939 | 3928 | ||
3940 | if (!(s->flags & SLAB_STORE_USER)) | 3929 | if (!(s->flags & SLAB_STORE_USER)) |
3941 | goto out; | 3930 | goto out; |
@@ -3945,9 +3934,8 @@ static int validate_slab_node(struct kmem_cache *s, | |||
3945 | count++; | 3934 | count++; |
3946 | } | 3935 | } |
3947 | if (count != atomic_long_read(&n->nr_slabs)) | 3936 | if (count != atomic_long_read(&n->nr_slabs)) |
3948 | printk(KERN_ERR "SLUB: %s %ld slabs counted but " | 3937 | pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", |
3949 | "counter=%ld\n", s->name, count, | 3938 | s->name, count, atomic_long_read(&n->nr_slabs)); |
3950 | atomic_long_read(&n->nr_slabs)); | ||
3951 | 3939 | ||
3952 | out: | 3940 | out: |
3953 | spin_unlock_irqrestore(&n->list_lock, flags); | 3941 | spin_unlock_irqrestore(&n->list_lock, flags); |
@@ -4211,53 +4199,50 @@ static void resiliency_test(void) | |||
4211 | 4199 | ||
4212 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10); | 4200 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10); |
4213 | 4201 | ||
4214 | printk(KERN_ERR "SLUB resiliency testing\n"); | 4202 | pr_err("SLUB resiliency testing\n"); |
4215 | printk(KERN_ERR "-----------------------\n"); | 4203 | pr_err("-----------------------\n"); |
4216 | printk(KERN_ERR "A. Corruption after allocation\n"); | 4204 | pr_err("A. Corruption after allocation\n"); |
4217 | 4205 | ||
4218 | p = kzalloc(16, GFP_KERNEL); | 4206 | p = kzalloc(16, GFP_KERNEL); |
4219 | p[16] = 0x12; | 4207 | p[16] = 0x12; |
4220 | printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" | 4208 | pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n", |
4221 | " 0x12->0x%p\n\n", p + 16); | 4209 | p + 16); |
4222 | 4210 | ||
4223 | validate_slab_cache(kmalloc_caches[4]); | 4211 | validate_slab_cache(kmalloc_caches[4]); |
4224 | 4212 | ||
4225 | /* Hmmm... The next two are dangerous */ | 4213 | /* Hmmm... The next two are dangerous */ |
4226 | p = kzalloc(32, GFP_KERNEL); | 4214 | p = kzalloc(32, GFP_KERNEL); |
4227 | p[32 + sizeof(void *)] = 0x34; | 4215 | p[32 + sizeof(void *)] = 0x34; |
4228 | printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" | 4216 | pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n", |
4229 | " 0x34 -> -0x%p\n", p); | 4217 | p); |
4230 | printk(KERN_ERR | 4218 | pr_err("If allocated object is overwritten then not detectable\n\n"); |
4231 | "If allocated object is overwritten then not detectable\n\n"); | ||
4232 | 4219 | ||
4233 | validate_slab_cache(kmalloc_caches[5]); | 4220 | validate_slab_cache(kmalloc_caches[5]); |
4234 | p = kzalloc(64, GFP_KERNEL); | 4221 | p = kzalloc(64, GFP_KERNEL); |
4235 | p += 64 + (get_cycles() & 0xff) * sizeof(void *); | 4222 | p += 64 + (get_cycles() & 0xff) * sizeof(void *); |
4236 | *p = 0x56; | 4223 | *p = 0x56; |
4237 | printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", | 4224 | pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", |
4238 | p); | 4225 | p); |
4239 | printk(KERN_ERR | 4226 | pr_err("If allocated object is overwritten then not detectable\n\n"); |
4240 | "If allocated object is overwritten then not detectable\n\n"); | ||
4241 | validate_slab_cache(kmalloc_caches[6]); | 4227 | validate_slab_cache(kmalloc_caches[6]); |
4242 | 4228 | ||
4243 | printk(KERN_ERR "\nB. Corruption after free\n"); | 4229 | pr_err("\nB. Corruption after free\n"); |
4244 | p = kzalloc(128, GFP_KERNEL); | 4230 | p = kzalloc(128, GFP_KERNEL); |
4245 | kfree(p); | 4231 | kfree(p); |
4246 | *p = 0x78; | 4232 | *p = 0x78; |
4247 | printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); | 4233 | pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); |
4248 | validate_slab_cache(kmalloc_caches[7]); | 4234 | validate_slab_cache(kmalloc_caches[7]); |
4249 | 4235 | ||
4250 | p = kzalloc(256, GFP_KERNEL); | 4236 | p = kzalloc(256, GFP_KERNEL); |
4251 | kfree(p); | 4237 | kfree(p); |
4252 | p[50] = 0x9a; | 4238 | p[50] = 0x9a; |
4253 | printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", | 4239 | pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); |
4254 | p); | ||
4255 | validate_slab_cache(kmalloc_caches[8]); | 4240 | validate_slab_cache(kmalloc_caches[8]); |
4256 | 4241 | ||
4257 | p = kzalloc(512, GFP_KERNEL); | 4242 | p = kzalloc(512, GFP_KERNEL); |
4258 | kfree(p); | 4243 | kfree(p); |
4259 | p[512] = 0xab; | 4244 | p[512] = 0xab; |
4260 | printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); | 4245 | pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); |
4261 | validate_slab_cache(kmalloc_caches[9]); | 4246 | validate_slab_cache(kmalloc_caches[9]); |
4262 | } | 4247 | } |
4263 | #else | 4248 | #else |
@@ -5303,7 +5288,7 @@ static int __init slab_sysfs_init(void) | |||
5303 | slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); | 5288 | slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); |
5304 | if (!slab_kset) { | 5289 | if (!slab_kset) { |
5305 | mutex_unlock(&slab_mutex); | 5290 | mutex_unlock(&slab_mutex); |
5306 | printk(KERN_ERR "Cannot register slab subsystem.\n"); | 5291 | pr_err("Cannot register slab subsystem.\n"); |
5307 | return -ENOSYS; | 5292 | return -ENOSYS; |
5308 | } | 5293 | } |
5309 | 5294 | ||
@@ -5312,8 +5297,8 @@ static int __init slab_sysfs_init(void) | |||
5312 | list_for_each_entry(s, &slab_caches, list) { | 5297 | list_for_each_entry(s, &slab_caches, list) { |
5313 | err = sysfs_slab_add(s); | 5298 | err = sysfs_slab_add(s); |
5314 | if (err) | 5299 | if (err) |
5315 | printk(KERN_ERR "SLUB: Unable to add boot slab %s" | 5300 | pr_err("SLUB: Unable to add boot slab %s to sysfs\n", |
5316 | " to sysfs\n", s->name); | 5301 | s->name); |
5317 | } | 5302 | } |
5318 | 5303 | ||
5319 | while (alias_list) { | 5304 | while (alias_list) { |
@@ -5322,8 +5307,8 @@ static int __init slab_sysfs_init(void) | |||
5322 | alias_list = alias_list->next; | 5307 | alias_list = alias_list->next; |
5323 | err = sysfs_slab_alias(al->s, al->name); | 5308 | err = sysfs_slab_alias(al->s, al->name); |
5324 | if (err) | 5309 | if (err) |
5325 | printk(KERN_ERR "SLUB: Unable to add boot slab alias" | 5310 | pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", |
5326 | " %s to sysfs\n", al->name); | 5311 | al->name); |
5327 | kfree(al); | 5312 | kfree(al); |
5328 | } | 5313 | } |
5329 | 5314 | ||